diff --git a/.gitignore b/.gitignore index 94f862885..b724864ef 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ tags # Files created by Sphinx build doc/build doc/source/_static/cinder.conf.sample +doc/source/drivers.rst #Files created for API reference api-ref/build diff --git a/.gitreview b/.gitreview index eecf93944..568456146 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,5 @@ [gerrit] host=review.openstack.org port=29418 -project=openstack/cinder.git +project=openstack/deb-cinder.git +defaultbranch=debian/newton diff --git a/api-ref/v1/source/conf.py b/api-ref/source/conf.py similarity index 96% rename from api-ref/v1/source/conf.py rename to api-ref/source/conf.py index c013c7830..0663723f2 100644 --- a/api-ref/v1/source/conf.py +++ b/api-ref/source/conf.py @@ -28,6 +28,8 @@ import os import subprocess import sys +import openstackdocstheme # noqa + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -41,8 +43,7 @@ sys.path.insert(0, os.path.abspath('./')) # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'os_api_ref', - 'oslosphinx', + 'os_api_ref' ] # The suffix of source filenames. @@ -56,7 +57,7 @@ source_suffix = '.rst' master_doc = 'index' # General information about the project. -project = u'Cinder API Reference' +project = u'Block Storage API Reference' copyright = u'OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for @@ -111,6 +112,13 @@ pygments_style = 'sphinx' # html_theme_path = ["."] # html_theme = '_theme' +html_theme = 'openstackdocs' +html_theme_path = [openstackdocstheme.get_html_theme_path()] +html_theme_options = { + "sidebar_mode": "toc", +} + + # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst new file mode 100644 index 000000000..1678a2a29 --- /dev/null +++ b/api-ref/source/index.rst @@ -0,0 +1,18 @@ +================= +Block Storage API +================= + +Contents: + +.. toctree:: + :maxdepth: 1 + + v1/index + v2/index + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/api-ref/v1/source/index.rst b/api-ref/source/v1/index.rst similarity index 77% rename from api-ref/v1/source/index.rst rename to api-ref/source/v1/index.rst index 5b9ad98b2..fa991bf21 100644 --- a/api-ref/v1/source/index.rst +++ b/api-ref/source/v1/index.rst @@ -1,8 +1,8 @@ :tocdepth: 2 -=============== -Cinder API V1 -=============== +==================== +Block Storage API V1 +==================== .. rest_expand_all:: diff --git a/api-ref/v1/source/os-quota-sets-v1.inc b/api-ref/source/v1/os-quota-sets-v1.inc similarity index 96% rename from api-ref/v1/source/os-quota-sets-v1.inc rename to api-ref/source/v1/os-quota-sets-v1.inc index 177307617..732ccef75 100644 --- a/api-ref/v1/source/os-quota-sets-v1.inc +++ b/api-ref/source/v1/os-quota-sets-v1.inc @@ -9,8 +9,8 @@ Administrators only, depending on policy settings. Shows, updates, and deletes quotas for a tenant. -Show quota details for user -=========================== +Show quota details for user (v1) +================================ .. rest_method:: GET /v1/{tenant_id}/os-quota-sets/{tenant_id}/detail/{user_id} @@ -106,8 +106,8 @@ Response Example :language: javascript -Show quotas -=========== +Show quotas (v1) +================ .. rest_method:: GET /v1/{tenant_id}/os-quota-sets/{tenant_id} @@ -157,8 +157,8 @@ Response Example :language: javascript -Update quotas -============= +Update quotas (v1) +================== .. rest_method:: PUT /v1/{tenant_id}/os-quota-sets/{tenant_id} @@ -227,8 +227,8 @@ Response Example :language: javascript -Delete quotas -============= +Delete quotas (v1) +================== .. rest_method:: DELETE /v1/{tenant_id}/os-quota-sets/{tenant_id} @@ -250,8 +250,8 @@ Response Example .. literalinclude:: ./samples/user-quotas-delete-response.json :language: javascript -Show quotas for user -==================== +Show quotas for user (v1) +========================= .. rest_method:: GET /v1/{tenant_id}/os-quota-sets/{tenant_id}/{user_id} @@ -304,8 +304,8 @@ Response Example -Update quotas for user -====================== +Update quotas for user (v1) +=========================== .. rest_method:: POST /v1/{tenant_id}/os-quota-sets/{tenant_id}/{user_id} @@ -375,8 +375,8 @@ Response Example :language: javascript -Delete quotas for user -====================== +Delete quotas for user (v1) +=========================== .. rest_method:: DELETE /v1/{tenant_id}/os-quota-sets/{tenant_id}/{user_id} diff --git a/api-ref/v1/source/parameters.yaml b/api-ref/source/v1/parameters.yaml similarity index 100% rename from api-ref/v1/source/parameters.yaml rename to api-ref/source/v1/parameters.yaml index f3f5b96c6..f30f2be81 100644 --- a/api-ref/v1/source/parameters.yaml +++ b/api-ref/source/v1/parameters.yaml @@ -430,6 +430,12 @@ name_2: in: body required: true type: string +OS-SCH-HNT:scheduler_hints: + description: | + The dictionary of data to send to the scheduler. + in: body + required: false + type: object quota_set: description: | A ``quota_set`` object. @@ -481,12 +487,6 @@ reserved_1: in: body required: true type: integer -OS-SCH-HNT:scheduler_hints: - description: | - The dictionary of data to send to the scheduler. - in: body - required: false - type: object security_group_rules: description: | The number of rules that are allowed for each diff --git a/api-ref/v1/source/samples/quotas-defaults-show-response.json b/api-ref/source/v1/samples/quotas-defaults-show-response.json similarity index 100% rename from api-ref/v1/source/samples/quotas-defaults-show-response.json rename to api-ref/source/v1/samples/quotas-defaults-show-response.json diff --git a/api-ref/v1/source/samples/quotas-defaults-show-response.xml b/api-ref/source/v1/samples/quotas-defaults-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/quotas-defaults-show-response.xml rename to api-ref/source/v1/samples/quotas-defaults-show-response.xml diff --git a/api-ref/v1/source/samples/quotas-show-response.json b/api-ref/source/v1/samples/quotas-show-response.json similarity index 100% rename from api-ref/v1/source/samples/quotas-show-response.json rename to api-ref/source/v1/samples/quotas-show-response.json diff --git a/api-ref/v1/source/samples/quotas-show-response.xml b/api-ref/source/v1/samples/quotas-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/quotas-show-response.xml rename to api-ref/source/v1/samples/quotas-show-response.xml diff --git a/api-ref/v1/source/samples/quotas-update-request.json b/api-ref/source/v1/samples/quotas-update-request.json similarity index 100% rename from api-ref/v1/source/samples/quotas-update-request.json rename to api-ref/source/v1/samples/quotas-update-request.json diff --git a/api-ref/v1/source/samples/quotas-update-request.xml b/api-ref/source/v1/samples/quotas-update-request.xml similarity index 100% rename from api-ref/v1/source/samples/quotas-update-request.xml rename to api-ref/source/v1/samples/quotas-update-request.xml diff --git a/api-ref/v1/source/samples/quotas-update-response.json b/api-ref/source/v1/samples/quotas-update-response.json similarity index 100% rename from api-ref/v1/source/samples/quotas-update-response.json rename to api-ref/source/v1/samples/quotas-update-response.json diff --git a/api-ref/v1/source/samples/quotas-update-response.xml b/api-ref/source/v1/samples/quotas-update-response.xml similarity index 100% rename from api-ref/v1/source/samples/quotas-update-response.xml rename to api-ref/source/v1/samples/quotas-update-response.xml diff --git a/api-ref/v1/source/samples/snapshot-create-request.json b/api-ref/source/v1/samples/snapshot-create-request.json similarity index 100% rename from api-ref/v1/source/samples/snapshot-create-request.json rename to api-ref/source/v1/samples/snapshot-create-request.json diff --git a/api-ref/v1/source/samples/snapshot-create-request.xml b/api-ref/source/v1/samples/snapshot-create-request.xml similarity index 100% rename from api-ref/v1/source/samples/snapshot-create-request.xml rename to api-ref/source/v1/samples/snapshot-create-request.xml diff --git a/api-ref/v1/source/samples/snapshot-metadata-show-response.json b/api-ref/source/v1/samples/snapshot-metadata-show-response.json similarity index 100% rename from api-ref/v1/source/samples/snapshot-metadata-show-response.json rename to api-ref/source/v1/samples/snapshot-metadata-show-response.json diff --git a/api-ref/v1/source/samples/snapshot-metadata-show-response.xml b/api-ref/source/v1/samples/snapshot-metadata-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/snapshot-metadata-show-response.xml rename to api-ref/source/v1/samples/snapshot-metadata-show-response.xml diff --git a/api-ref/v1/source/samples/snapshot-metadata-update-request.json b/api-ref/source/v1/samples/snapshot-metadata-update-request.json similarity index 100% rename from api-ref/v1/source/samples/snapshot-metadata-update-request.json rename to api-ref/source/v1/samples/snapshot-metadata-update-request.json diff --git a/api-ref/v1/source/samples/snapshot-metadata-update-request.xml b/api-ref/source/v1/samples/snapshot-metadata-update-request.xml similarity index 100% rename from api-ref/v1/source/samples/snapshot-metadata-update-request.xml rename to api-ref/source/v1/samples/snapshot-metadata-update-request.xml diff --git a/api-ref/v1/source/samples/snapshot-metadata-update-response.json b/api-ref/source/v1/samples/snapshot-metadata-update-response.json similarity index 100% rename from api-ref/v1/source/samples/snapshot-metadata-update-response.json rename to api-ref/source/v1/samples/snapshot-metadata-update-response.json diff --git a/api-ref/v1/source/samples/snapshot-metadata-update-response.xml b/api-ref/source/v1/samples/snapshot-metadata-update-response.xml similarity index 100% rename from api-ref/v1/source/samples/snapshot-metadata-update-response.xml rename to api-ref/source/v1/samples/snapshot-metadata-update-response.xml diff --git a/api-ref/v1/source/samples/snapshot-show-response.json b/api-ref/source/v1/samples/snapshot-show-response.json similarity index 100% rename from api-ref/v1/source/samples/snapshot-show-response.json rename to api-ref/source/v1/samples/snapshot-show-response.json diff --git a/api-ref/v1/source/samples/snapshot-show-response.xml b/api-ref/source/v1/samples/snapshot-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/snapshot-show-response.xml rename to api-ref/source/v1/samples/snapshot-show-response.xml diff --git a/api-ref/v1/source/samples/snapshots-list-response.json b/api-ref/source/v1/samples/snapshots-list-response.json similarity index 100% rename from api-ref/v1/source/samples/snapshots-list-response.json rename to api-ref/source/v1/samples/snapshots-list-response.json diff --git a/api-ref/v1/source/samples/snapshots-list-response.xml b/api-ref/source/v1/samples/snapshots-list-response.xml similarity index 100% rename from api-ref/v1/source/samples/snapshots-list-response.xml rename to api-ref/source/v1/samples/snapshots-list-response.xml diff --git a/api-ref/v1/source/samples/user-quotas-delete-response.json b/api-ref/source/v1/samples/user-quotas-delete-response.json similarity index 100% rename from api-ref/v1/source/samples/user-quotas-delete-response.json rename to api-ref/source/v1/samples/user-quotas-delete-response.json diff --git a/api-ref/v1/source/samples/user-quotas-show-detail-response.json b/api-ref/source/v1/samples/user-quotas-show-detail-response.json similarity index 100% rename from api-ref/v1/source/samples/user-quotas-show-detail-response.json rename to api-ref/source/v1/samples/user-quotas-show-detail-response.json diff --git a/api-ref/v1/source/samples/user-quotas-show-response.json b/api-ref/source/v1/samples/user-quotas-show-response.json similarity index 100% rename from api-ref/v1/source/samples/user-quotas-show-response.json rename to api-ref/source/v1/samples/user-quotas-show-response.json diff --git a/api-ref/v1/source/samples/user-quotas-show-response.xml b/api-ref/source/v1/samples/user-quotas-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/user-quotas-show-response.xml rename to api-ref/source/v1/samples/user-quotas-show-response.xml diff --git a/api-ref/v1/source/samples/user-quotas-update-request.json b/api-ref/source/v1/samples/user-quotas-update-request.json similarity index 100% rename from api-ref/v1/source/samples/user-quotas-update-request.json rename to api-ref/source/v1/samples/user-quotas-update-request.json diff --git a/api-ref/v1/source/samples/user-quotas-update-request.xml b/api-ref/source/v1/samples/user-quotas-update-request.xml similarity index 100% rename from api-ref/v1/source/samples/user-quotas-update-request.xml rename to api-ref/source/v1/samples/user-quotas-update-request.xml diff --git a/api-ref/v1/source/samples/user-quotas-update-response.json b/api-ref/source/v1/samples/user-quotas-update-response.json similarity index 100% rename from api-ref/v1/source/samples/user-quotas-update-response.json rename to api-ref/source/v1/samples/user-quotas-update-response.json diff --git a/api-ref/v1/source/samples/user-quotas-update-response.xml b/api-ref/source/v1/samples/user-quotas-update-response.xml similarity index 100% rename from api-ref/v1/source/samples/user-quotas-update-response.xml rename to api-ref/source/v1/samples/user-quotas-update-response.xml diff --git a/api-ref/v1/source/samples/version-show-response.json b/api-ref/source/v1/samples/version-show-response.json similarity index 100% rename from api-ref/v1/source/samples/version-show-response.json rename to api-ref/source/v1/samples/version-show-response.json diff --git a/api-ref/v1/source/samples/versions-list-response.json b/api-ref/source/v1/samples/versions-list-response.json similarity index 100% rename from api-ref/v1/source/samples/versions-list-response.json rename to api-ref/source/v1/samples/versions-list-response.json diff --git a/api-ref/v1/source/samples/volume-create-request.json b/api-ref/source/v1/samples/volume-create-request.json similarity index 100% rename from api-ref/v1/source/samples/volume-create-request.json rename to api-ref/source/v1/samples/volume-create-request.json diff --git a/api-ref/v1/source/samples/volume-create-request.xml b/api-ref/source/v1/samples/volume-create-request.xml similarity index 100% rename from api-ref/v1/source/samples/volume-create-request.xml rename to api-ref/source/v1/samples/volume-create-request.xml diff --git a/api-ref/v1/source/samples/volume-show-response.json b/api-ref/source/v1/samples/volume-show-response.json similarity index 100% rename from api-ref/v1/source/samples/volume-show-response.json rename to api-ref/source/v1/samples/volume-show-response.json diff --git a/api-ref/v1/source/samples/volume-show-response.xml b/api-ref/source/v1/samples/volume-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/volume-show-response.xml rename to api-ref/source/v1/samples/volume-show-response.xml diff --git a/api-ref/v1/source/samples/volume-type-create-request.json b/api-ref/source/v1/samples/volume-type-create-request.json similarity index 100% rename from api-ref/v1/source/samples/volume-type-create-request.json rename to api-ref/source/v1/samples/volume-type-create-request.json diff --git a/api-ref/v1/source/samples/volume-type-create-request.xml b/api-ref/source/v1/samples/volume-type-create-request.xml similarity index 100% rename from api-ref/v1/source/samples/volume-type-create-request.xml rename to api-ref/source/v1/samples/volume-type-create-request.xml diff --git a/api-ref/v1/source/samples/volume-type-show-response.json b/api-ref/source/v1/samples/volume-type-show-response.json similarity index 100% rename from api-ref/v1/source/samples/volume-type-show-response.json rename to api-ref/source/v1/samples/volume-type-show-response.json diff --git a/api-ref/v1/source/samples/volume-type-show-response.xml b/api-ref/source/v1/samples/volume-type-show-response.xml similarity index 100% rename from api-ref/v1/source/samples/volume-type-show-response.xml rename to api-ref/source/v1/samples/volume-type-show-response.xml diff --git a/api-ref/v1/source/samples/volume-types-list-response.json b/api-ref/source/v1/samples/volume-types-list-response.json similarity index 100% rename from api-ref/v1/source/samples/volume-types-list-response.json rename to api-ref/source/v1/samples/volume-types-list-response.json diff --git a/api-ref/v1/source/samples/volume-types-list-response.xml b/api-ref/source/v1/samples/volume-types-list-response.xml similarity index 100% rename from api-ref/v1/source/samples/volume-types-list-response.xml rename to api-ref/source/v1/samples/volume-types-list-response.xml diff --git a/api-ref/v1/source/samples/volumes-list-response.json b/api-ref/source/v1/samples/volumes-list-response.json similarity index 100% rename from api-ref/v1/source/samples/volumes-list-response.json rename to api-ref/source/v1/samples/volumes-list-response.json diff --git a/api-ref/v1/source/samples/volumes-list-response.xml b/api-ref/source/v1/samples/volumes-list-response.xml similarity index 100% rename from api-ref/v1/source/samples/volumes-list-response.xml rename to api-ref/source/v1/samples/volumes-list-response.xml diff --git a/api-ref/v1/source/volumes-v1-snapshots.inc b/api-ref/source/v1/volumes-v1-snapshots.inc similarity index 89% rename from api-ref/v1/source/volumes-v1-snapshots.inc rename to api-ref/source/v1/volumes-v1-snapshots.inc index 2866b2790..d8c45bbcc 100644 --- a/api-ref/v1/source/volumes-v1-snapshots.inc +++ b/api-ref/source/v1/volumes-v1-snapshots.inc @@ -8,8 +8,8 @@ Creates, lists, shows information for, and deletes snapshots. Shows and updates snapshot metadata. -Show snapshot details -===================== +Show snapshot details (v1) +========================== .. rest_method:: GET /v1/{tenant_id}/snapshots/{snapshot_id} @@ -34,8 +34,8 @@ Response Example :language: javascript -Delete snapshot -=============== +Delete snapshot (v1) +==================== .. rest_method:: DELETE /v1/{tenant_id}/snapshots/{snapshot_id} @@ -53,8 +53,8 @@ Request - snapshot_id: snapshot_id -List snapshots with details -=========================== +List snapshots with details (v1) +================================ .. rest_method:: GET /v1/{tenant_id}/snapshots/detail @@ -78,8 +78,8 @@ Response Example :language: javascript -Create snapshot -=============== +Create snapshot (v1) +==================== .. rest_method:: POST /v1/{tenant_id}/snapshots @@ -101,8 +101,8 @@ Request Example .. literalinclude:: ./samples/snapshot-create-request.json :language: javascript -List snapshots -============== +List snapshots (v1) +=================== .. rest_method:: GET /v1/{tenant_id}/snapshots @@ -128,8 +128,8 @@ Response Example :language: javascript -Show snapshot metadata -====================== +Show snapshot metadata (v1) +=========================== .. rest_method:: GET /v1/{tenant_id}/snapshots/{snapshot_id}/metadata @@ -156,8 +156,8 @@ Response Example :language: javascript -Update snapshot metadata -======================== +Update snapshot metadata (v1) +============================= .. rest_method:: PUT /v1/{tenant_id}/snapshots/{snapshot_id}/metadata diff --git a/api-ref/v1/source/volumes-v1-types.inc b/api-ref/source/v1/volumes-v1-types.inc similarity index 90% rename from api-ref/v1/source/volumes-v1-types.inc rename to api-ref/source/v1/volumes-v1-types.inc index 3f1abe20a..df426cd2a 100644 --- a/api-ref/v1/source/volumes-v1-types.inc +++ b/api-ref/source/v1/volumes-v1-types.inc @@ -8,8 +8,8 @@ Lists, creates, updates, shows information for, and deletes volume types. -List volume types -================= +List volume types (v1) +====================== .. rest_method:: GET /v1/{tenant_id}/types @@ -34,8 +34,8 @@ Response Example :language: javascript -Create volume type -================== +Create volume type (v1) +======================= .. rest_method:: POST /v1/{tenant_id}/types @@ -78,8 +78,8 @@ Response Example :language: javascript -Update volume type -================== +Update volume type (v1) +======================= .. rest_method:: PUT /v1/{tenant_id}/types/{volume_type_id} @@ -125,8 +125,8 @@ Response Example :language: javascript -Update extra specs for a volume type -==================================== +Update extra specs for a volume type (v1) +========================================= .. rest_method:: PUT /v1/{tenant_id}/types/{volume_type_id} @@ -171,8 +171,8 @@ Response Example :language: javascript -Show volume type details -======================== +Show volume type details (v1) +============================= .. rest_method:: GET /v1/{tenant_id}/types/{volume_type_id} @@ -199,8 +199,8 @@ Response Example :language: javascript -Delete volume type -================== +Delete volume type (v1) +======================= .. rest_method:: DELETE /v1/{tenant_id}/types/{volume_type_id} diff --git a/api-ref/v1/source/volumes-v1-versions.inc b/api-ref/source/v1/volumes-v1-versions.inc similarity index 93% rename from api-ref/v1/source/volumes-v1-versions.inc rename to api-ref/source/v1/volumes-v1-versions.inc index 693a4f3e1..e460c48e4 100644 --- a/api-ref/v1/source/volumes-v1-versions.inc +++ b/api-ref/source/v1/volumes-v1-versions.inc @@ -30,8 +30,8 @@ Response Example :language: javascript -List API versions -================= +List API versions (v1) +====================== .. rest_method:: GET / diff --git a/api-ref/v1/source/volumes-v1-volumes.inc b/api-ref/source/v1/volumes-v1-volumes.inc similarity index 94% rename from api-ref/v1/source/volumes-v1-volumes.inc rename to api-ref/source/v1/volumes-v1-volumes.inc index 38d4fdb61..d1d870ca4 100644 --- a/api-ref/v1/source/volumes-v1-volumes.inc +++ b/api-ref/source/v1/volumes-v1-volumes.inc @@ -10,8 +10,8 @@ volume was not created from a snapshot or source volume, these values are null. -List volumes, with details -========================== +List volumes, with details (v1) +=============================== .. rest_method:: GET /v1/{tenant_id}/volumes/detail @@ -65,8 +65,8 @@ Response Example :language: javascript -Create volume -============= +Create volume (v1) +================== .. rest_method:: POST /v1/{tenant_id}/volumes @@ -122,8 +122,8 @@ Response Parameters - metadata: metadata -List volumes -============ +List volumes (v1) +================= .. rest_method:: GET /v1/{tenant_id}/volumes @@ -160,8 +160,8 @@ Response Example :language: javascript -Show volume details -=================== +Show volume details (v1) +======================== .. rest_method:: GET /v1/{tenant_id}/volumes/{volume_id} @@ -215,8 +215,8 @@ Response Example .. literalinclude:: ./samples/volume-show-response.json :language: javascript -Delete volume -============= +Delete volume (v1) +================== .. rest_method:: DELETE /v1/{tenant_id}/volumes/{volume_id} diff --git a/api-ref/v2/source/api-versions.inc b/api-ref/source/v2/api-versions.inc similarity index 100% rename from api-ref/v2/source/api-versions.inc rename to api-ref/source/v2/api-versions.inc diff --git a/api-ref/v2/source/capabilities-v2.inc b/api-ref/source/v2/capabilities-v2.inc similarity index 100% rename from api-ref/v2/source/capabilities-v2.inc rename to api-ref/source/v2/capabilities-v2.inc diff --git a/api-ref/v2/source/consistencygroups-v2.inc b/api-ref/source/v2/consistencygroups-v2.inc similarity index 100% rename from api-ref/v2/source/consistencygroups-v2.inc rename to api-ref/source/v2/consistencygroups-v2.inc diff --git a/api-ref/v2/source/ext-backups-actions-v2.inc b/api-ref/source/v2/ext-backups-actions-v2.inc similarity index 100% rename from api-ref/v2/source/ext-backups-actions-v2.inc rename to api-ref/source/v2/ext-backups-actions-v2.inc diff --git a/api-ref/v2/source/ext-backups.inc b/api-ref/source/v2/ext-backups.inc similarity index 100% rename from api-ref/v2/source/ext-backups.inc rename to api-ref/source/v2/ext-backups.inc diff --git a/api-ref/v2/source/index.rst b/api-ref/source/v2/index.rst similarity index 92% rename from api-ref/v2/source/index.rst rename to api-ref/source/v2/index.rst index e46983c8b..33f17dd81 100644 --- a/api-ref/v2/source/index.rst +++ b/api-ref/source/v2/index.rst @@ -1,8 +1,8 @@ :tocdepth: 2 -============== - Volume API V2 -============== +==================== +Block Storage API V2 +==================== .. rest_expand_all:: diff --git a/api-ref/v2/source/limits.inc b/api-ref/source/v2/limits.inc similarity index 100% rename from api-ref/v2/source/limits.inc rename to api-ref/source/v2/limits.inc diff --git a/api-ref/v2/source/os-cgsnapshots-v2.inc b/api-ref/source/v2/os-cgsnapshots-v2.inc similarity index 100% rename from api-ref/v2/source/os-cgsnapshots-v2.inc rename to api-ref/source/v2/os-cgsnapshots-v2.inc diff --git a/api-ref/v2/source/os-vol-image-meta-v2.inc b/api-ref/source/v2/os-vol-image-meta-v2.inc similarity index 100% rename from api-ref/v2/source/os-vol-image-meta-v2.inc rename to api-ref/source/v2/os-vol-image-meta-v2.inc diff --git a/api-ref/v2/source/os-vol-pool-v2.inc b/api-ref/source/v2/os-vol-pool-v2.inc similarity index 100% rename from api-ref/v2/source/os-vol-pool-v2.inc rename to api-ref/source/v2/os-vol-pool-v2.inc diff --git a/api-ref/v2/source/os-vol-transfer-v2.inc b/api-ref/source/v2/os-vol-transfer-v2.inc similarity index 100% rename from api-ref/v2/source/os-vol-transfer-v2.inc rename to api-ref/source/v2/os-vol-transfer-v2.inc diff --git a/api-ref/v2/source/parameters.yaml b/api-ref/source/v2/parameters.yaml similarity index 100% rename from api-ref/v2/source/parameters.yaml rename to api-ref/source/v2/parameters.yaml index 02a7d153c..241507fee 100644 --- a/api-ref/v2/source/parameters.yaml +++ b/api-ref/source/v2/parameters.yaml @@ -1055,6 +1055,12 @@ os-reset_status: in: body required: true type: object +OS-SCH-HNT:scheduler_hints: + description: | + The dictionary of data to send to the scheduler. + in: body + required: false + type: object os-set_image_metadata: description: | The ``os-set_image_metadata`` action. @@ -1236,12 +1242,6 @@ restore: in: body required: true type: object -OS-SCH-HNT:scheduler_hints: - description: | - The dictionary of data to send to the scheduler. - in: body - required: false - type: object security_group_rules: description: | The number of rules that are allowed for each diff --git a/api-ref/v2/source/qos-specs-v2-qos-specs.inc b/api-ref/source/v2/qos-specs-v2-qos-specs.inc similarity index 100% rename from api-ref/v2/source/qos-specs-v2-qos-specs.inc rename to api-ref/source/v2/qos-specs-v2-qos-specs.inc diff --git a/api-ref/v2/source/quota-sets.inc b/api-ref/source/v2/quota-sets.inc similarity index 100% rename from api-ref/v2/source/quota-sets.inc rename to api-ref/source/v2/quota-sets.inc diff --git a/api-ref/v2/source/samples/backend-capabilities-response.json b/api-ref/source/v2/samples/backend-capabilities-response.json similarity index 100% rename from api-ref/v2/source/samples/backend-capabilities-response.json rename to api-ref/source/v2/samples/backend-capabilities-response.json diff --git a/api-ref/v2/source/samples/backup-create-request.json b/api-ref/source/v2/samples/backup-create-request.json similarity index 100% rename from api-ref/v2/source/samples/backup-create-request.json rename to api-ref/source/v2/samples/backup-create-request.json diff --git a/api-ref/v2/source/samples/backup-create-response.json b/api-ref/source/v2/samples/backup-create-response.json similarity index 100% rename from api-ref/v2/source/samples/backup-create-response.json rename to api-ref/source/v2/samples/backup-create-response.json diff --git a/api-ref/v2/source/samples/backup-force-delete-request.json b/api-ref/source/v2/samples/backup-force-delete-request.json similarity index 100% rename from api-ref/v2/source/samples/backup-force-delete-request.json rename to api-ref/source/v2/samples/backup-force-delete-request.json diff --git a/api-ref/v2/source/samples/backup-record-export-response.json b/api-ref/source/v2/samples/backup-record-export-response.json similarity index 100% rename from api-ref/v2/source/samples/backup-record-export-response.json rename to api-ref/source/v2/samples/backup-record-export-response.json diff --git a/api-ref/v2/source/samples/backup-record-import-request.json b/api-ref/source/v2/samples/backup-record-import-request.json similarity index 100% rename from api-ref/v2/source/samples/backup-record-import-request.json rename to api-ref/source/v2/samples/backup-record-import-request.json diff --git a/api-ref/v2/source/samples/backup-record-import-response.json b/api-ref/source/v2/samples/backup-record-import-response.json similarity index 100% rename from api-ref/v2/source/samples/backup-record-import-response.json rename to api-ref/source/v2/samples/backup-record-import-response.json diff --git a/api-ref/v2/source/samples/backup-restore-request.json b/api-ref/source/v2/samples/backup-restore-request.json similarity index 100% rename from api-ref/v2/source/samples/backup-restore-request.json rename to api-ref/source/v2/samples/backup-restore-request.json diff --git a/api-ref/v2/source/samples/backup-restore-response.json b/api-ref/source/v2/samples/backup-restore-response.json similarity index 100% rename from api-ref/v2/source/samples/backup-restore-response.json rename to api-ref/source/v2/samples/backup-restore-response.json diff --git a/api-ref/v2/source/samples/backup-show-response.json b/api-ref/source/v2/samples/backup-show-response.json similarity index 100% rename from api-ref/v2/source/samples/backup-show-response.json rename to api-ref/source/v2/samples/backup-show-response.json diff --git a/api-ref/v2/source/samples/backups-list-detailed-response.json b/api-ref/source/v2/samples/backups-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/backups-list-detailed-response.json rename to api-ref/source/v2/samples/backups-list-detailed-response.json diff --git a/api-ref/v2/source/samples/backups-list-response.json b/api-ref/source/v2/samples/backups-list-response.json similarity index 100% rename from api-ref/v2/source/samples/backups-list-response.json rename to api-ref/source/v2/samples/backups-list-response.json diff --git a/api-ref/v2/source/samples/cgsnapshots-create-request.json b/api-ref/source/v2/samples/cgsnapshots-create-request.json similarity index 100% rename from api-ref/v2/source/samples/cgsnapshots-create-request.json rename to api-ref/source/v2/samples/cgsnapshots-create-request.json diff --git a/api-ref/v2/source/samples/cgsnapshots-create-response.json b/api-ref/source/v2/samples/cgsnapshots-create-response.json similarity index 100% rename from api-ref/v2/source/samples/cgsnapshots-create-response.json rename to api-ref/source/v2/samples/cgsnapshots-create-response.json diff --git a/api-ref/v2/source/samples/cgsnapshots-list-detailed-response.json b/api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/cgsnapshots-list-detailed-response.json rename to api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json diff --git a/api-ref/v2/source/samples/cgsnapshots-list-response.json b/api-ref/source/v2/samples/cgsnapshots-list-response.json similarity index 100% rename from api-ref/v2/source/samples/cgsnapshots-list-response.json rename to api-ref/source/v2/samples/cgsnapshots-list-response.json diff --git a/api-ref/v2/source/samples/cgsnapshots-show-response.json b/api-ref/source/v2/samples/cgsnapshots-show-response.json similarity index 100% rename from api-ref/v2/source/samples/cgsnapshots-show-response.json rename to api-ref/source/v2/samples/cgsnapshots-show-response.json diff --git a/api-ref/v2/source/samples/consistency-group-create-from-src-request.json b/api-ref/source/v2/samples/consistency-group-create-from-src-request.json similarity index 100% rename from api-ref/v2/source/samples/consistency-group-create-from-src-request.json rename to api-ref/source/v2/samples/consistency-group-create-from-src-request.json diff --git a/api-ref/v2/source/samples/consistency-group-create-request.json b/api-ref/source/v2/samples/consistency-group-create-request.json similarity index 100% rename from api-ref/v2/source/samples/consistency-group-create-request.json rename to api-ref/source/v2/samples/consistency-group-create-request.json diff --git a/api-ref/v2/source/samples/consistency-group-create-response.json b/api-ref/source/v2/samples/consistency-group-create-response.json similarity index 100% rename from api-ref/v2/source/samples/consistency-group-create-response.json rename to api-ref/source/v2/samples/consistency-group-create-response.json diff --git a/api-ref/v2/source/samples/consistency-group-delete-request.json b/api-ref/source/v2/samples/consistency-group-delete-request.json similarity index 100% rename from api-ref/v2/source/samples/consistency-group-delete-request.json rename to api-ref/source/v2/samples/consistency-group-delete-request.json diff --git a/api-ref/v2/source/samples/consistency-group-show-response.json b/api-ref/source/v2/samples/consistency-group-show-response.json similarity index 100% rename from api-ref/v2/source/samples/consistency-group-show-response.json rename to api-ref/source/v2/samples/consistency-group-show-response.json diff --git a/api-ref/v2/source/samples/consistency-group-show-response.xml b/api-ref/source/v2/samples/consistency-group-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/consistency-group-show-response.xml rename to api-ref/source/v2/samples/consistency-group-show-response.xml diff --git a/api-ref/v2/source/samples/consistency-group-update-request.json b/api-ref/source/v2/samples/consistency-group-update-request.json similarity index 100% rename from api-ref/v2/source/samples/consistency-group-update-request.json rename to api-ref/source/v2/samples/consistency-group-update-request.json diff --git a/api-ref/v2/source/samples/consistency-groups-list-detailed-response.json b/api-ref/source/v2/samples/consistency-groups-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/consistency-groups-list-detailed-response.json rename to api-ref/source/v2/samples/consistency-groups-list-detailed-response.json diff --git a/api-ref/v2/source/samples/consistency-groups-list-detailed-response.xml b/api-ref/source/v2/samples/consistency-groups-list-detailed-response.xml similarity index 100% rename from api-ref/v2/source/samples/consistency-groups-list-detailed-response.xml rename to api-ref/source/v2/samples/consistency-groups-list-detailed-response.xml diff --git a/api-ref/v2/source/samples/consistency-groups-list-response.json b/api-ref/source/v2/samples/consistency-groups-list-response.json similarity index 100% rename from api-ref/v2/source/samples/consistency-groups-list-response.json rename to api-ref/source/v2/samples/consistency-groups-list-response.json diff --git a/api-ref/v2/source/samples/extensions-list-response.json b/api-ref/source/v2/samples/extensions-list-response.json similarity index 100% rename from api-ref/v2/source/samples/extensions-list-response.json rename to api-ref/source/v2/samples/extensions-list-response.json diff --git a/api-ref/v2/source/samples/extensions-list-response.xml b/api-ref/source/v2/samples/extensions-list-response.xml similarity index 100% rename from api-ref/v2/source/samples/extensions-list-response.xml rename to api-ref/source/v2/samples/extensions-list-response.xml diff --git a/api-ref/v2/source/samples/host-attach-request.json b/api-ref/source/v2/samples/host-attach-request.json similarity index 100% rename from api-ref/v2/source/samples/host-attach-request.json rename to api-ref/source/v2/samples/host-attach-request.json diff --git a/api-ref/v2/source/samples/image-metadata-show-request.json b/api-ref/source/v2/samples/image-metadata-show-request.json similarity index 100% rename from api-ref/v2/source/samples/image-metadata-show-request.json rename to api-ref/source/v2/samples/image-metadata-show-request.json diff --git a/api-ref/v2/source/samples/image-metadata-show-response.json b/api-ref/source/v2/samples/image-metadata-show-response.json similarity index 100% rename from api-ref/v2/source/samples/image-metadata-show-response.json rename to api-ref/source/v2/samples/image-metadata-show-response.json diff --git a/api-ref/v2/source/samples/limits-show-response.json b/api-ref/source/v2/samples/limits-show-response.json similarity index 100% rename from api-ref/v2/source/samples/limits-show-response.json rename to api-ref/source/v2/samples/limits-show-response.json diff --git a/api-ref/v2/source/samples/limits-show-response.xml b/api-ref/source/v2/samples/limits-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/limits-show-response.xml rename to api-ref/source/v2/samples/limits-show-response.xml diff --git a/api-ref/v2/source/samples/pools-list-detailed-response.json b/api-ref/source/v2/samples/pools-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/pools-list-detailed-response.json rename to api-ref/source/v2/samples/pools-list-detailed-response.json diff --git a/api-ref/v2/source/samples/qos-create-request.json b/api-ref/source/v2/samples/qos-create-request.json similarity index 100% rename from api-ref/v2/source/samples/qos-create-request.json rename to api-ref/source/v2/samples/qos-create-request.json diff --git a/api-ref/v2/source/samples/qos-create-request.xml b/api-ref/source/v2/samples/qos-create-request.xml similarity index 100% rename from api-ref/v2/source/samples/qos-create-request.xml rename to api-ref/source/v2/samples/qos-create-request.xml diff --git a/api-ref/v2/source/samples/qos-create-response.json b/api-ref/source/v2/samples/qos-create-response.json similarity index 100% rename from api-ref/v2/source/samples/qos-create-response.json rename to api-ref/source/v2/samples/qos-create-response.json diff --git a/api-ref/v2/source/samples/qos-create-response.xml b/api-ref/source/v2/samples/qos-create-response.xml similarity index 100% rename from api-ref/v2/source/samples/qos-create-response.xml rename to api-ref/source/v2/samples/qos-create-response.xml diff --git a/api-ref/v2/source/samples/qos-list-response.json b/api-ref/source/v2/samples/qos-list-response.json similarity index 100% rename from api-ref/v2/source/samples/qos-list-response.json rename to api-ref/source/v2/samples/qos-list-response.json diff --git a/api-ref/v2/source/samples/qos-list-response.xml b/api-ref/source/v2/samples/qos-list-response.xml similarity index 100% rename from api-ref/v2/source/samples/qos-list-response.xml rename to api-ref/source/v2/samples/qos-list-response.xml diff --git a/api-ref/v2/source/samples/qos-show-response.json b/api-ref/source/v2/samples/qos-show-response.json similarity index 100% rename from api-ref/v2/source/samples/qos-show-response.json rename to api-ref/source/v2/samples/qos-show-response.json diff --git a/api-ref/v2/source/samples/qos-show-response.xml b/api-ref/source/v2/samples/qos-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/qos-show-response.xml rename to api-ref/source/v2/samples/qos-show-response.xml diff --git a/api-ref/v2/source/samples/qos-unset-request.json b/api-ref/source/v2/samples/qos-unset-request.json similarity index 100% rename from api-ref/v2/source/samples/qos-unset-request.json rename to api-ref/source/v2/samples/qos-unset-request.json diff --git a/api-ref/v2/source/samples/qos-unset-request.xml b/api-ref/source/v2/samples/qos-unset-request.xml similarity index 100% rename from api-ref/v2/source/samples/qos-unset-request.xml rename to api-ref/source/v2/samples/qos-unset-request.xml diff --git a/api-ref/v2/source/samples/qos-unset-response.json b/api-ref/source/v2/samples/qos-unset-response.json similarity index 100% rename from api-ref/v2/source/samples/qos-unset-response.json rename to api-ref/source/v2/samples/qos-unset-response.json diff --git a/api-ref/v2/source/samples/qos-update-request.json b/api-ref/source/v2/samples/qos-update-request.json similarity index 100% rename from api-ref/v2/source/samples/qos-update-request.json rename to api-ref/source/v2/samples/qos-update-request.json diff --git a/api-ref/v2/source/samples/qos-update-request.xml b/api-ref/source/v2/samples/qos-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/qos-update-request.xml rename to api-ref/source/v2/samples/qos-update-request.xml diff --git a/api-ref/v2/source/samples/qos-update-response.json b/api-ref/source/v2/samples/qos-update-response.json similarity index 100% rename from api-ref/v2/source/samples/qos-update-response.json rename to api-ref/source/v2/samples/qos-update-response.json diff --git a/api-ref/v2/source/samples/qos-update-response.xml b/api-ref/source/v2/samples/qos-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/qos-update-response.xml rename to api-ref/source/v2/samples/qos-update-response.xml diff --git a/api-ref/v2/source/samples/qos_show_response.json b/api-ref/source/v2/samples/qos_show_response.json similarity index 100% rename from api-ref/v2/source/samples/qos_show_response.json rename to api-ref/source/v2/samples/qos_show_response.json diff --git a/api-ref/v2/source/samples/qos_show_response.xml b/api-ref/source/v2/samples/qos_show_response.xml similarity index 100% rename from api-ref/v2/source/samples/qos_show_response.xml rename to api-ref/source/v2/samples/qos_show_response.xml diff --git a/api-ref/v2/source/samples/quotas-defaults-show-response.xml b/api-ref/source/v2/samples/quotas-defaults-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/quotas-defaults-show-response.xml rename to api-ref/source/v2/samples/quotas-defaults-show-response.xml diff --git a/api-ref/v2/source/samples/quotas-delete-response.json b/api-ref/source/v2/samples/quotas-delete-response.json similarity index 100% rename from api-ref/v2/source/samples/quotas-delete-response.json rename to api-ref/source/v2/samples/quotas-delete-response.json diff --git a/api-ref/v2/source/samples/quotas-show-defaults-response.json b/api-ref/source/v2/samples/quotas-show-defaults-response.json similarity index 100% rename from api-ref/v2/source/samples/quotas-show-defaults-response.json rename to api-ref/source/v2/samples/quotas-show-defaults-response.json diff --git a/api-ref/v2/source/samples/quotas-show-defaults-response.xml b/api-ref/source/v2/samples/quotas-show-defaults-response.xml similarity index 100% rename from api-ref/v2/source/samples/quotas-show-defaults-response.xml rename to api-ref/source/v2/samples/quotas-show-defaults-response.xml diff --git a/api-ref/v2/source/samples/quotas-show-response.json b/api-ref/source/v2/samples/quotas-show-response.json similarity index 100% rename from api-ref/v2/source/samples/quotas-show-response.json rename to api-ref/source/v2/samples/quotas-show-response.json diff --git a/api-ref/v2/source/samples/quotas-show-response.xml b/api-ref/source/v2/samples/quotas-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/quotas-show-response.xml rename to api-ref/source/v2/samples/quotas-show-response.xml diff --git a/api-ref/v2/source/samples/quotas-update-request.json b/api-ref/source/v2/samples/quotas-update-request.json similarity index 100% rename from api-ref/v2/source/samples/quotas-update-request.json rename to api-ref/source/v2/samples/quotas-update-request.json diff --git a/api-ref/v2/source/samples/quotas-update-request.xml b/api-ref/source/v2/samples/quotas-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/quotas-update-request.xml rename to api-ref/source/v2/samples/quotas-update-request.xml diff --git a/api-ref/v2/source/samples/quotas-update-response.json b/api-ref/source/v2/samples/quotas-update-response.json similarity index 100% rename from api-ref/v2/source/samples/quotas-update-response.json rename to api-ref/source/v2/samples/quotas-update-response.json diff --git a/api-ref/v2/source/samples/quotas-update-response.xml b/api-ref/source/v2/samples/quotas-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/quotas-update-response.xml rename to api-ref/source/v2/samples/quotas-update-response.xml diff --git a/api-ref/v2/source/samples/quotas-user-show-detailed-response.json b/api-ref/source/v2/samples/quotas-user-show-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/quotas-user-show-detailed-response.json rename to api-ref/source/v2/samples/quotas-user-show-detailed-response.json diff --git a/api-ref/v2/source/samples/quotas-user-show-response.json b/api-ref/source/v2/samples/quotas-user-show-response.json similarity index 100% rename from api-ref/v2/source/samples/quotas-user-show-response.json rename to api-ref/source/v2/samples/quotas-user-show-response.json diff --git a/api-ref/v2/source/samples/quotas-user-show-response.xml b/api-ref/source/v2/samples/quotas-user-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/quotas-user-show-response.xml rename to api-ref/source/v2/samples/quotas-user-show-response.xml diff --git a/api-ref/v2/source/samples/snapshot-create-request.json b/api-ref/source/v2/samples/snapshot-create-request.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-create-request.json rename to api-ref/source/v2/samples/snapshot-create-request.json diff --git a/api-ref/v2/source/samples/snapshot-create-request.xml b/api-ref/source/v2/samples/snapshot-create-request.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-create-request.xml rename to api-ref/source/v2/samples/snapshot-create-request.xml diff --git a/api-ref/v2/source/samples/snapshot-create-response.json b/api-ref/source/v2/samples/snapshot-create-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-create-response.json rename to api-ref/source/v2/samples/snapshot-create-response.json diff --git a/api-ref/v2/source/samples/snapshot-create-response.xml b/api-ref/source/v2/samples/snapshot-create-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-create-response.xml rename to api-ref/source/v2/samples/snapshot-create-response.xml diff --git a/api-ref/v2/source/samples/snapshot-metadata-show-response.json b/api-ref/source/v2/samples/snapshot-metadata-show-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-metadata-show-response.json rename to api-ref/source/v2/samples/snapshot-metadata-show-response.json diff --git a/api-ref/v2/source/samples/snapshot-metadata-show-response.xml b/api-ref/source/v2/samples/snapshot-metadata-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-metadata-show-response.xml rename to api-ref/source/v2/samples/snapshot-metadata-show-response.xml diff --git a/api-ref/v2/source/samples/snapshot-metadata-update-request.json b/api-ref/source/v2/samples/snapshot-metadata-update-request.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-metadata-update-request.json rename to api-ref/source/v2/samples/snapshot-metadata-update-request.json diff --git a/api-ref/v2/source/samples/snapshot-metadata-update-request.xml b/api-ref/source/v2/samples/snapshot-metadata-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-metadata-update-request.xml rename to api-ref/source/v2/samples/snapshot-metadata-update-request.xml diff --git a/api-ref/v2/source/samples/snapshot-metadata-update-response.json b/api-ref/source/v2/samples/snapshot-metadata-update-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-metadata-update-response.json rename to api-ref/source/v2/samples/snapshot-metadata-update-response.json diff --git a/api-ref/v2/source/samples/snapshot-metadata-update-response.xml b/api-ref/source/v2/samples/snapshot-metadata-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-metadata-update-response.xml rename to api-ref/source/v2/samples/snapshot-metadata-update-response.xml diff --git a/api-ref/v2/source/samples/snapshot-show-response.json b/api-ref/source/v2/samples/snapshot-show-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-show-response.json rename to api-ref/source/v2/samples/snapshot-show-response.json diff --git a/api-ref/v2/source/samples/snapshot-show-response.xml b/api-ref/source/v2/samples/snapshot-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-show-response.xml rename to api-ref/source/v2/samples/snapshot-show-response.xml diff --git a/api-ref/v2/source/samples/snapshot-update-request.json b/api-ref/source/v2/samples/snapshot-update-request.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-update-request.json rename to api-ref/source/v2/samples/snapshot-update-request.json diff --git a/api-ref/v2/source/samples/snapshot-update-request.xml b/api-ref/source/v2/samples/snapshot-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-update-request.xml rename to api-ref/source/v2/samples/snapshot-update-request.xml diff --git a/api-ref/v2/source/samples/snapshot-update-response.json b/api-ref/source/v2/samples/snapshot-update-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshot-update-response.json rename to api-ref/source/v2/samples/snapshot-update-response.json diff --git a/api-ref/v2/source/samples/snapshot-update-response.xml b/api-ref/source/v2/samples/snapshot-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshot-update-response.xml rename to api-ref/source/v2/samples/snapshot-update-response.xml diff --git a/api-ref/v2/source/samples/snapshots-list-detailed-response.json b/api-ref/source/v2/samples/snapshots-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshots-list-detailed-response.json rename to api-ref/source/v2/samples/snapshots-list-detailed-response.json diff --git a/api-ref/v2/source/samples/snapshots-list-detailed-response.xml b/api-ref/source/v2/samples/snapshots-list-detailed-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshots-list-detailed-response.xml rename to api-ref/source/v2/samples/snapshots-list-detailed-response.xml diff --git a/api-ref/v2/source/samples/snapshots-list-response.json b/api-ref/source/v2/samples/snapshots-list-response.json similarity index 100% rename from api-ref/v2/source/samples/snapshots-list-response.json rename to api-ref/source/v2/samples/snapshots-list-response.json diff --git a/api-ref/v2/source/samples/snapshots-list-response.xml b/api-ref/source/v2/samples/snapshots-list-response.xml similarity index 100% rename from api-ref/v2/source/samples/snapshots-list-response.xml rename to api-ref/source/v2/samples/snapshots-list-response.xml diff --git a/api-ref/v2/source/samples/user-quotas-show-response.json b/api-ref/source/v2/samples/user-quotas-show-response.json similarity index 100% rename from api-ref/v2/source/samples/user-quotas-show-response.json rename to api-ref/source/v2/samples/user-quotas-show-response.json diff --git a/api-ref/v2/source/samples/user-quotas-show-response.xml b/api-ref/source/v2/samples/user-quotas-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/user-quotas-show-response.xml rename to api-ref/source/v2/samples/user-quotas-show-response.xml diff --git a/api-ref/v2/source/samples/user-quotas-update-request.json b/api-ref/source/v2/samples/user-quotas-update-request.json similarity index 100% rename from api-ref/v2/source/samples/user-quotas-update-request.json rename to api-ref/source/v2/samples/user-quotas-update-request.json diff --git a/api-ref/v2/source/samples/user-quotas-update-request.xml b/api-ref/source/v2/samples/user-quotas-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/user-quotas-update-request.xml rename to api-ref/source/v2/samples/user-quotas-update-request.xml diff --git a/api-ref/v2/source/samples/user-quotas-update-response.json b/api-ref/source/v2/samples/user-quotas-update-response.json similarity index 100% rename from api-ref/v2/source/samples/user-quotas-update-response.json rename to api-ref/source/v2/samples/user-quotas-update-response.json diff --git a/api-ref/v2/source/samples/user-quotas-update-response.xml b/api-ref/source/v2/samples/user-quotas-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/user-quotas-update-response.xml rename to api-ref/source/v2/samples/user-quotas-update-response.xml diff --git a/api-ref/v2/source/samples/version-show-response.json b/api-ref/source/v2/samples/version-show-response.json similarity index 100% rename from api-ref/v2/source/samples/version-show-response.json rename to api-ref/source/v2/samples/version-show-response.json diff --git a/api-ref/v2/source/samples/version-show-response.xml b/api-ref/source/v2/samples/version-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/version-show-response.xml rename to api-ref/source/v2/samples/version-show-response.xml diff --git a/api-ref/v2/source/samples/version-v2-show-response.json b/api-ref/source/v2/samples/version-v2-show-response.json similarity index 100% rename from api-ref/v2/source/samples/version-v2-show-response.json rename to api-ref/source/v2/samples/version-v2-show-response.json diff --git a/api-ref/v2/source/samples/versions-resp.json b/api-ref/source/v2/samples/versions-resp.json similarity index 100% rename from api-ref/v2/source/samples/versions-resp.json rename to api-ref/source/v2/samples/versions-resp.json diff --git a/api-ref/v2/source/samples/versions-response.json b/api-ref/source/v2/samples/versions-response.json similarity index 100% rename from api-ref/v2/source/samples/versions-response.json rename to api-ref/source/v2/samples/versions-response.json diff --git a/api-ref/v2/source/samples/versions-response.xml b/api-ref/source/v2/samples/versions-response.xml similarity index 100% rename from api-ref/v2/source/samples/versions-response.xml rename to api-ref/source/v2/samples/versions-response.xml diff --git a/api-ref/v2/source/samples/volume-attach-request.json b/api-ref/source/v2/samples/volume-attach-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-attach-request.json rename to api-ref/source/v2/samples/volume-attach-request.json diff --git a/api-ref/v2/source/samples/volume-create-request.json b/api-ref/source/v2/samples/volume-create-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-create-request.json rename to api-ref/source/v2/samples/volume-create-request.json diff --git a/api-ref/v2/source/samples/volume-create-request.xml b/api-ref/source/v2/samples/volume-create-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-create-request.xml rename to api-ref/source/v2/samples/volume-create-request.xml diff --git a/api-ref/v2/source/samples/volume-create-response.json b/api-ref/source/v2/samples/volume-create-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-create-response.json rename to api-ref/source/v2/samples/volume-create-response.json diff --git a/api-ref/v2/source/samples/volume-create-response.xml b/api-ref/source/v2/samples/volume-create-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-create-response.xml rename to api-ref/source/v2/samples/volume-create-response.xml diff --git a/api-ref/v2/source/samples/volume-extend-request.json b/api-ref/source/v2/samples/volume-extend-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-extend-request.json rename to api-ref/source/v2/samples/volume-extend-request.json diff --git a/api-ref/v2/source/samples/volume-force-detach-request.json b/api-ref/source/v2/samples/volume-force-detach-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-force-detach-request.json rename to api-ref/source/v2/samples/volume-force-detach-request.json diff --git a/api-ref/v2/source/samples/volume-image-metadata-set-request.json b/api-ref/source/v2/samples/volume-image-metadata-set-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-image-metadata-set-request.json rename to api-ref/source/v2/samples/volume-image-metadata-set-request.json diff --git a/api-ref/v2/source/samples/volume-image-metadata-unset-request.json b/api-ref/source/v2/samples/volume-image-metadata-unset-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-image-metadata-unset-request.json rename to api-ref/source/v2/samples/volume-image-metadata-unset-request.json diff --git a/api-ref/v2/source/samples/volume-manage-request.json b/api-ref/source/v2/samples/volume-manage-request.json similarity index 80% rename from api-ref/v2/source/samples/volume-manage-request.json rename to api-ref/source/v2/samples/volume-manage-request.json index f84e8261d..363214784 100644 --- a/api-ref/v2/source/samples/volume-manage-request.json +++ b/api-ref/source/v2/samples/volume-manage-request.json @@ -2,8 +2,8 @@ "volume": { "host": "geraint-VirtualBox", "ref": { - "source-volume-name": "existingLV", - "source-volume-id": "1234" + "source-name": "existingLV", + "source-id": "1234" }, "name": "New Volume", "availability_zone": "az2", diff --git a/api-ref/v2/source/samples/volume-manage-response.json b/api-ref/source/v2/samples/volume-manage-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-manage-response.json rename to api-ref/source/v2/samples/volume-manage-response.json diff --git a/api-ref/v2/source/samples/volume-metadata-create-request.json b/api-ref/source/v2/samples/volume-metadata-create-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-create-request.json rename to api-ref/source/v2/samples/volume-metadata-create-request.json diff --git a/api-ref/v2/source/samples/volume-metadata-create-response.json b/api-ref/source/v2/samples/volume-metadata-create-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-create-response.json rename to api-ref/source/v2/samples/volume-metadata-create-response.json diff --git a/api-ref/v2/source/samples/volume-metadata-show-response.json b/api-ref/source/v2/samples/volume-metadata-show-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-show-response.json rename to api-ref/source/v2/samples/volume-metadata-show-response.json diff --git a/api-ref/v2/source/samples/volume-metadata-show-response.xml b/api-ref/source/v2/samples/volume-metadata-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-show-response.xml rename to api-ref/source/v2/samples/volume-metadata-show-response.xml diff --git a/api-ref/v2/source/samples/volume-metadata-update-request.json b/api-ref/source/v2/samples/volume-metadata-update-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-update-request.json rename to api-ref/source/v2/samples/volume-metadata-update-request.json diff --git a/api-ref/v2/source/samples/volume-metadata-update-request.xml b/api-ref/source/v2/samples/volume-metadata-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-update-request.xml rename to api-ref/source/v2/samples/volume-metadata-update-request.xml diff --git a/api-ref/v2/source/samples/volume-metadata-update-response.json b/api-ref/source/v2/samples/volume-metadata-update-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-update-response.json rename to api-ref/source/v2/samples/volume-metadata-update-response.json diff --git a/api-ref/v2/source/samples/volume-metadata-update-response.xml b/api-ref/source/v2/samples/volume-metadata-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-metadata-update-response.xml rename to api-ref/source/v2/samples/volume-metadata-update-response.xml diff --git a/api-ref/v2/source/samples/volume-replica-promote-request.json b/api-ref/source/v2/samples/volume-replica-promote-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-replica-promote-request.json rename to api-ref/source/v2/samples/volume-replica-promote-request.json diff --git a/api-ref/v2/source/samples/volume-replica-reenable-request.json b/api-ref/source/v2/samples/volume-replica-reenable-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-replica-reenable-request.json rename to api-ref/source/v2/samples/volume-replica-reenable-request.json diff --git a/api-ref/v2/source/samples/volume-show-response.json b/api-ref/source/v2/samples/volume-show-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-show-response.json rename to api-ref/source/v2/samples/volume-show-response.json diff --git a/api-ref/v2/source/samples/volume-show-response.xml b/api-ref/source/v2/samples/volume-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-show-response.xml rename to api-ref/source/v2/samples/volume-show-response.xml diff --git a/api-ref/v2/source/samples/volume-status-reset-request.json b/api-ref/source/v2/samples/volume-status-reset-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-status-reset-request.json rename to api-ref/source/v2/samples/volume-status-reset-request.json diff --git a/api-ref/v2/source/samples/volume-transfer-accept-request.json b/api-ref/source/v2/samples/volume-transfer-accept-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfer-accept-request.json rename to api-ref/source/v2/samples/volume-transfer-accept-request.json diff --git a/api-ref/v2/source/samples/volume-transfer-accept-response.json b/api-ref/source/v2/samples/volume-transfer-accept-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfer-accept-response.json rename to api-ref/source/v2/samples/volume-transfer-accept-response.json diff --git a/api-ref/v2/source/samples/volume-transfer-create-request.json b/api-ref/source/v2/samples/volume-transfer-create-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfer-create-request.json rename to api-ref/source/v2/samples/volume-transfer-create-request.json diff --git a/api-ref/v2/source/samples/volume-transfer-create-response.json b/api-ref/source/v2/samples/volume-transfer-create-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfer-create-response.json rename to api-ref/source/v2/samples/volume-transfer-create-response.json diff --git a/api-ref/v2/source/samples/volume-transfer-show-response.json b/api-ref/source/v2/samples/volume-transfer-show-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfer-show-response.json rename to api-ref/source/v2/samples/volume-transfer-show-response.json diff --git a/api-ref/v2/source/samples/volume-transfers-list-detailed-response.json b/api-ref/source/v2/samples/volume-transfers-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfers-list-detailed-response.json rename to api-ref/source/v2/samples/volume-transfers-list-detailed-response.json diff --git a/api-ref/v2/source/samples/volume-transfers-list-response.json b/api-ref/source/v2/samples/volume-transfers-list-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-transfers-list-response.json rename to api-ref/source/v2/samples/volume-transfers-list-response.json diff --git a/api-ref/v2/source/samples/volume-type-access-add-request.json b/api-ref/source/v2/samples/volume-type-access-add-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-access-add-request.json rename to api-ref/source/v2/samples/volume-type-access-add-request.json diff --git a/api-ref/v2/source/samples/volume-type-access-add-request.xml b/api-ref/source/v2/samples/volume-type-access-add-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-type-access-add-request.xml rename to api-ref/source/v2/samples/volume-type-access-add-request.xml diff --git a/api-ref/v2/source/samples/volume-type-access-delete-request.json b/api-ref/source/v2/samples/volume-type-access-delete-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-access-delete-request.json rename to api-ref/source/v2/samples/volume-type-access-delete-request.json diff --git a/api-ref/v2/source/samples/volume-type-access-delete-request.xml b/api-ref/source/v2/samples/volume-type-access-delete-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-type-access-delete-request.xml rename to api-ref/source/v2/samples/volume-type-access-delete-request.xml diff --git a/api-ref/v2/source/samples/volume-type-access-list-response.json b/api-ref/source/v2/samples/volume-type-access-list-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-access-list-response.json rename to api-ref/source/v2/samples/volume-type-access-list-response.json diff --git a/api-ref/v2/source/samples/volume-type-create-request.json b/api-ref/source/v2/samples/volume-type-create-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-create-request.json rename to api-ref/source/v2/samples/volume-type-create-request.json diff --git a/api-ref/v2/source/samples/volume-type-create-request.xml b/api-ref/source/v2/samples/volume-type-create-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-type-create-request.xml rename to api-ref/source/v2/samples/volume-type-create-request.xml diff --git a/api-ref/v2/source/samples/volume-type-show-request.json b/api-ref/source/v2/samples/volume-type-show-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-show-request.json rename to api-ref/source/v2/samples/volume-type-show-request.json diff --git a/api-ref/v2/source/samples/volume-type-show-request.xml b/api-ref/source/v2/samples/volume-type-show-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-type-show-request.xml rename to api-ref/source/v2/samples/volume-type-show-request.xml diff --git a/api-ref/v2/source/samples/volume-type-show-response.json b/api-ref/source/v2/samples/volume-type-show-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-show-response.json rename to api-ref/source/v2/samples/volume-type-show-response.json diff --git a/api-ref/v2/source/samples/volume-type-show-response.xml b/api-ref/source/v2/samples/volume-type-show-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-type-show-response.xml rename to api-ref/source/v2/samples/volume-type-show-response.xml diff --git a/api-ref/v2/source/samples/volume-type-update-request.json b/api-ref/source/v2/samples/volume-type-update-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-type-update-request.json rename to api-ref/source/v2/samples/volume-type-update-request.json diff --git a/api-ref/v2/source/samples/volume-type-update-request.xml b/api-ref/source/v2/samples/volume-type-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-type-update-request.xml rename to api-ref/source/v2/samples/volume-type-update-request.xml diff --git a/api-ref/v2/source/samples/volume-types-list-response.json b/api-ref/source/v2/samples/volume-types-list-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-types-list-response.json rename to api-ref/source/v2/samples/volume-types-list-response.json diff --git a/api-ref/v2/source/samples/volume-types-list-response.xml b/api-ref/source/v2/samples/volume-types-list-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-types-list-response.xml rename to api-ref/source/v2/samples/volume-types-list-response.xml diff --git a/api-ref/v2/source/samples/volume-unmanage-request.json b/api-ref/source/v2/samples/volume-unmanage-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-unmanage-request.json rename to api-ref/source/v2/samples/volume-unmanage-request.json diff --git a/api-ref/v2/source/samples/volume-update-request.json b/api-ref/source/v2/samples/volume-update-request.json similarity index 100% rename from api-ref/v2/source/samples/volume-update-request.json rename to api-ref/source/v2/samples/volume-update-request.json diff --git a/api-ref/v2/source/samples/volume-update-request.xml b/api-ref/source/v2/samples/volume-update-request.xml similarity index 100% rename from api-ref/v2/source/samples/volume-update-request.xml rename to api-ref/source/v2/samples/volume-update-request.xml diff --git a/api-ref/v2/source/samples/volume-update-response.json b/api-ref/source/v2/samples/volume-update-response.json similarity index 100% rename from api-ref/v2/source/samples/volume-update-response.json rename to api-ref/source/v2/samples/volume-update-response.json diff --git a/api-ref/v2/source/samples/volume-update-response.xml b/api-ref/source/v2/samples/volume-update-response.xml similarity index 100% rename from api-ref/v2/source/samples/volume-update-response.xml rename to api-ref/source/v2/samples/volume-update-response.xml diff --git a/api-ref/v2/source/samples/volumes-list-detailed-response.json b/api-ref/source/v2/samples/volumes-list-detailed-response.json similarity index 100% rename from api-ref/v2/source/samples/volumes-list-detailed-response.json rename to api-ref/source/v2/samples/volumes-list-detailed-response.json diff --git a/api-ref/v2/source/samples/volumes-list-detailed-response.xml b/api-ref/source/v2/samples/volumes-list-detailed-response.xml similarity index 100% rename from api-ref/v2/source/samples/volumes-list-detailed-response.xml rename to api-ref/source/v2/samples/volumes-list-detailed-response.xml diff --git a/api-ref/v2/source/samples/volumes-list-response.json b/api-ref/source/v2/samples/volumes-list-response.json similarity index 100% rename from api-ref/v2/source/samples/volumes-list-response.json rename to api-ref/source/v2/samples/volumes-list-response.json diff --git a/api-ref/v2/source/samples/volumes-list-response.xml b/api-ref/source/v2/samples/volumes-list-response.xml similarity index 100% rename from api-ref/v2/source/samples/volumes-list-response.xml rename to api-ref/source/v2/samples/volumes-list-response.xml diff --git a/api-ref/v2/source/volume-manage.inc b/api-ref/source/v2/volume-manage.inc similarity index 91% rename from api-ref/v2/source/volume-manage.inc rename to api-ref/source/v2/volume-manage.inc index 42151651b..5802dd3b3 100644 --- a/api-ref/v2/source/volume-manage.inc +++ b/api-ref/source/v2/volume-manage.inc @@ -18,8 +18,8 @@ Creates a Block Storage volume by using existing storage rather than allocating The caller must specify a reference to an existing storage volume in the ref parameter in the request. Although each storage driver might interpret this reference differently, the driver should -accept a reference structure that contains either a source-volume- -id or source-volume-name element, if possible. +accept a reference structure that contains either a source-id +or source-name element, if possible. The API chooses the size of the volume by rounding up the size of the existing storage volume to the next gibibyte (GiB). diff --git a/api-ref/v2/source/volume-type-access.inc b/api-ref/source/v2/volume-type-access.inc similarity index 100% rename from api-ref/v2/source/volume-type-access.inc rename to api-ref/source/v2/volume-type-access.inc diff --git a/api-ref/v2/source/volumes-v2-extensions.inc b/api-ref/source/v2/volumes-v2-extensions.inc similarity index 100% rename from api-ref/v2/source/volumes-v2-extensions.inc rename to api-ref/source/v2/volumes-v2-extensions.inc diff --git a/api-ref/v2/source/volumes-v2-snapshots.inc b/api-ref/source/v2/volumes-v2-snapshots.inc similarity index 100% rename from api-ref/v2/source/volumes-v2-snapshots.inc rename to api-ref/source/v2/volumes-v2-snapshots.inc diff --git a/api-ref/v2/source/volumes-v2-types.inc b/api-ref/source/v2/volumes-v2-types.inc similarity index 100% rename from api-ref/v2/source/volumes-v2-types.inc rename to api-ref/source/v2/volumes-v2-types.inc diff --git a/api-ref/v2/source/volumes-v2-versions.inc b/api-ref/source/v2/volumes-v2-versions.inc similarity index 100% rename from api-ref/v2/source/volumes-v2-versions.inc rename to api-ref/source/v2/volumes-v2-versions.inc diff --git a/api-ref/v2/source/volumes-v2-volumes-actions.inc b/api-ref/source/v2/volumes-v2-volumes-actions.inc similarity index 100% rename from api-ref/v2/source/volumes-v2-volumes-actions.inc rename to api-ref/source/v2/volumes-v2-volumes-actions.inc diff --git a/api-ref/v2/source/volumes-v2-volumes.inc b/api-ref/source/v2/volumes-v2-volumes.inc similarity index 100% rename from api-ref/v2/source/volumes-v2-volumes.inc rename to api-ref/source/v2/volumes-v2-volumes.inc diff --git a/api-ref/v2/source/conf.py b/api-ref/v2/source/conf.py deleted file mode 100644 index c013c7830..000000000 --- a/api-ref/v2/source/conf.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# cinder documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import subprocess -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -extensions = [ - 'os_api_ref', - 'oslosphinx', -] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Cinder API Reference' -copyright = u'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from cinder.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' -git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", - "-n1"] -html_last_updated_fmt = subprocess.Popen( - git_cmd, stdout=subprocess.PIPE).communicate()[0] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cinderdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Cinder.tex', u'OpenStack Block Storage API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/cinder/api/common.py b/cinder/api/common.py index ff5a4ae5a..c39770cf9 100644 --- a/cinder/api/common.py +++ b/cinder/api/common.py @@ -42,13 +42,13 @@ api_common_opts = [ cfg.ListOpt('query_volume_filters', default=['name', 'status', 'metadata', 'availability_zone', - 'bootable'], + 'bootable', 'group_id'], help="Volume filter options which " "non-admin user could use to " "query volumes. Default values " "are: ['name', 'status', " "'metadata', 'availability_zone' ," - "'bootable']") + "'bootable', 'group_id']") ] CONF = cfg.CONF diff --git a/cinder/api/contrib/admin_actions.py b/cinder/api/contrib/admin_actions.py index 1f68476db..a558bda71 100644 --- a/cinder/api/contrib/admin_actions.py +++ b/cinder/api/contrib/admin_actions.py @@ -102,12 +102,10 @@ class AdminController(wsgi.Controller): notifier.info(context, self.collection + '.reset_status.start', notifier_info) - try: - self._update(context, id, update) - if update.get('attach_status') == 'detached': - _clean_volume_attachment(context, id) - except exception.VolumeNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + self._update(context, id, update) + if update.get('attach_status') == 'detached': + _clean_volume_attachment(context, id) notifier.info(context, self.collection + '.reset_status.end', notifier_info) @@ -119,10 +117,8 @@ class AdminController(wsgi.Controller): """Delete a resource, bypassing the check that it must be available.""" context = req.environ['cinder.context'] self.authorize(context, 'force_delete') - try: - resource = self._get(context, id) - except exception.VolumeNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + resource = self._get(context, id) self._delete(context, resource, force=True) return webob.Response(status_int=202) @@ -193,10 +189,8 @@ class VolumeAdminController(AdminController): """Roll back a bad detach after the volume been disconnected.""" context = req.environ['cinder.context'] self.authorize(context, 'force_detach') - try: - volume = self._get(context, id) - except exception.VolumeNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + volume = self._get(context, id) try: connector = body['os-force_detach'].get('connector', None) except KeyError: @@ -232,10 +226,8 @@ class VolumeAdminController(AdminController): """Migrate a volume to the specified host.""" context = req.environ['cinder.context'] self.authorize(context, 'migrate_volume') - try: - volume = self._get(context, id) - except exception.VolumeNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + volume = self._get(context, id) params = body['os-migrate_volume'] try: host = params['host'] @@ -252,20 +244,16 @@ class VolumeAdminController(AdminController): """Complete an in-progress migration.""" context = req.environ['cinder.context'] self.authorize(context, 'migrate_volume_completion') - try: - volume = self._get(context, id) - except exception.VolumeNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + volume = self._get(context, id) params = body['os-migrate_volume_completion'] try: new_volume_id = params['new_volume'] except KeyError: raise exc.HTTPBadRequest( explanation=_("Must specify 'new_volume'")) - try: - new_volume = self._get(context, new_volume_id) - except exception.VolumeNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + new_volume = self._get(context, new_volume_id) error = params.get('error', False) ret = self.volume_api.migrate_volume_completion(context, volume, new_volume, error) @@ -323,11 +311,9 @@ class BackupAdminController(AdminController): notifier.info(context, self.collection + '.reset_status.start', notifier_info) - try: - self.backup_api.reset_status(context=context, backup_id=id, - status=update['status']) - except exception.BackupNotFound as e: - raise exc.HTTPNotFound(explanation=e.msg) + # Not found exception will be handled at the wsgi level + self.backup_api.reset_status(context=context, backup_id=id, + status=update['status']) return webob.Response(status_int=202) diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py index 652f38f85..2f120a2be 100644 --- a/cinder/api/contrib/backups.py +++ b/cinder/api/contrib/backups.py @@ -47,11 +47,9 @@ class BackupsController(wsgi.Controller): LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] - try: - backup = self.backup_api.get(context, backup_id=id) - req.cache_db_backup(backup) - except exception.BackupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + backup = self.backup_api.get(context, backup_id=id) + req.cache_db_backup(backup) return self._view_builder.detail(req, backup) @@ -60,13 +58,12 @@ class BackupsController(wsgi.Controller): LOG.debug('Delete called for member %s.', id) context = req.environ['cinder.context'] - LOG.info(_LI('Delete backup with id: %s'), id, context=context) + LOG.info(_LI('Delete backup with id: %s'), id) try: backup = self.backup_api.get(context, id) self.backup_api.delete(context, backup) - except exception.BackupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) @@ -97,8 +94,7 @@ class BackupsController(wsgi.Controller): self._get_backup_filter_options()) if 'name' in filters: - filters['display_name'] = filters['name'] - del filters['name'] + filters['display_name'] = filters.pop('name') backups = self.backup_api.get_all(context, search_opts=filters, marker=marker, @@ -157,9 +153,7 @@ class BackupsController(wsgi.Controller): except (exception.InvalidVolume, exception.InvalidSnapshot) as error: raise exc.HTTPBadRequest(explanation=error.msg) - except (exception.VolumeNotFound, - exception.SnapshotNotFound) as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Other not found exceptions will be handled at the wsgi level except exception.ServiceNotFound as error: raise exc.HTTPInternalServerError(explanation=error.msg) @@ -187,16 +181,13 @@ class BackupsController(wsgi.Controller): backup_id=id, volume_id=volume_id, name=name) + # Not found exception will be handled at the wsgi level except exception.InvalidInput as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) - except exception.BackupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) except exception.VolumeSizeExceedsAvailableQuota as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': '0'}) @@ -216,8 +207,7 @@ class BackupsController(wsgi.Controller): try: backup_info = self.backup_api.export_record(context, id) - except exception.BackupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) @@ -247,10 +237,9 @@ class BackupsController(wsgi.Controller): new_backup = self.backup_api.import_record(context, backup_service, backup_url) - except exception.BackupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) + # Other Not found exceptions will be handled at the wsgi level except exception.ServiceNotFound as error: raise exc.HTTPInternalServerError(explanation=error.msg) diff --git a/cinder/api/contrib/cgsnapshots.py b/cinder/api/contrib/cgsnapshots.py index 433940f83..082d996e0 100644 --- a/cinder/api/contrib/cgsnapshots.py +++ b/cinder/api/contrib/cgsnapshots.py @@ -45,12 +45,10 @@ class CgsnapshotsController(wsgi.Controller): LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] - try: - cgsnapshot = self.cgsnapshot_api.get_cgsnapshot( - context, - cgsnapshot_id=id) - except exception.CgSnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + cgsnapshot = self.cgsnapshot_api.get_cgsnapshot( + context, + cgsnapshot_id=id) return self._view_builder.detail(req, cgsnapshot) @@ -59,15 +57,16 @@ class CgsnapshotsController(wsgi.Controller): LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] - LOG.info(_LI('Delete cgsnapshot with id: %s'), id, context=context) + LOG.info(_LI('Delete cgsnapshot with id: %s'), id) try: cgsnapshot = self.cgsnapshot_api.get_cgsnapshot( context, cgsnapshot_id=id) self.cgsnapshot_api.delete_cgsnapshot(context, cgsnapshot) - except exception.CgSnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + except exception.CgSnapshotNotFound: + # Not found exception will be handled at the wsgi level + raise except exception.InvalidCgSnapshot as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except Exception: @@ -112,10 +111,8 @@ class CgsnapshotsController(wsgi.Controller): msg = _("'consistencygroup_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) - try: - group = self.cgsnapshot_api.get(context, group_id) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + group = self.cgsnapshot_api.get(context, group_id) name = cgsnapshot.get('name', None) description = cgsnapshot.get('description', None) @@ -127,10 +124,9 @@ class CgsnapshotsController(wsgi.Controller): try: new_cgsnapshot = self.cgsnapshot_api.create_cgsnapshot( context, group, name, description) + # Not found exception will be handled at the wsgi level except exception.InvalidCgSnapshot as error: raise exc.HTTPBadRequest(explanation=error.msg) - except exception.CgSnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) retval = self._view_builder.summary(req, new_cgsnapshot) diff --git a/cinder/api/contrib/consistencygroups.py b/cinder/api/contrib/consistencygroups.py index a9abc3aef..ec0bc2b53 100644 --- a/cinder/api/contrib/consistencygroups.py +++ b/cinder/api/contrib/consistencygroups.py @@ -45,12 +45,10 @@ class ConsistencyGroupsController(wsgi.Controller): LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] - try: - consistencygroup = self.consistencygroup_api.get( - context, - group_id=id) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + consistencygroup = self.consistencygroup_api.get( + context, + group_id=id) return self._view_builder.detail(req, consistencygroup) @@ -73,14 +71,12 @@ class ConsistencyGroupsController(wsgi.Controller): msg = _("Invalid value '%s' for force.") % force raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI('Delete consistency group with id: %s'), id, - context=context) + LOG.info(_LI('Delete consistency group with id: %s'), id) try: group = self.consistencygroup_api.get(context, id) self.consistencygroup_api.delete(context, group, force) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) @@ -132,19 +128,17 @@ class ConsistencyGroupsController(wsgi.Controller): availability_zone = consistencygroup.get('availability_zone', None) LOG.info(_LI("Creating consistency group %(name)s."), - {'name': name}, - context=context) + {'name': name}) try: new_consistencygroup = self.consistencygroup_api.create( context, name, description, volume_types, availability_zone=availability_zone) + # Not found exception will be handled at the wsgi level except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeType as error: raise exc.HTTPBadRequest(explanation=error.msg) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) retval = self._view_builder.summary(req, new_consistencygroup) return retval @@ -182,23 +176,20 @@ class ConsistencyGroupsController(wsgi.Controller): if cgsnapshot_id: LOG.info(_LI("Creating consistency group %(name)s from " "cgsnapshot %(snap)s."), - {'name': name, 'snap': cgsnapshot_id}, - context=context) + {'name': name, 'snap': cgsnapshot_id}) elif source_cgid: LOG.info(_LI("Creating consistency group %(name)s from " "source consistency group %(source_cgid)s."), - {'name': name, 'source_cgid': source_cgid}, - context=context) + {'name': name, 'source_cgid': source_cgid}) try: new_consistencygroup = self.consistencygroup_api.create_from_src( context, name, description, cgsnapshot_id, source_cgid) except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) - except exception.CgSnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + except exception.NotFound: + # Not found exception will be handled at the wsgi level + raise except exception.CinderException as error: raise exc.HTTPBadRequest(explanation=error.msg) @@ -222,18 +213,13 @@ class ConsistencyGroupsController(wsgi.Controller): 'name': name, 'description': description, 'add_volumes': add_volumes, - 'remove_volumes': remove_volumes}, - context=context) + 'remove_volumes': remove_volumes}) - try: - group = self.consistencygroup_api.get(context, id) - self.consistencygroup_api.update( - context, group, name, description, - add_volumes, remove_volumes, allow_empty) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.InvalidConsistencyGroup as error: - raise exc.HTTPBadRequest(explanation=error.msg) + # Handle relevant exceptions at wsgi level + group = self.consistencygroup_api.get(context, id) + self.consistencygroup_api.update(context, group, name, description, + add_volumes, remove_volumes, + allow_empty) def update(self, req, id, body): """Update the consistency group. diff --git a/cinder/api/contrib/hosts.py b/cinder/api/contrib/hosts.py index 9281d5966..31c2ed479 100644 --- a/cinder/api/contrib/hosts.py +++ b/cinder/api/contrib/hosts.py @@ -22,6 +22,7 @@ import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi +from cinder.common import constants from cinder import db from cinder import exception from cinder.i18n import _, _LI @@ -79,9 +80,7 @@ def check_host(fn): hosts = [h["host_name"] for h in listed_hosts] if id in hosts: return fn(self, req, id, *args, **kwargs) - else: - message = _("Host '%s' could not be found.") % id - raise webob.exc.HTTPNotFound(explanation=message) + raise exception.HostNotFound(host=id) return wrapped @@ -149,11 +148,9 @@ class HostController(wsgi.Controller): msg = _("Describe-resource is admin only functionality") raise webob.exc.HTTPForbidden(explanation=msg) - try: - host_ref = objects.Service.get_by_host_and_topic( - context, host, CONF.volume_topic) - except exception.ServiceNotFound: - raise webob.exc.HTTPNotFound(explanation=_("Host not found")) + # Not found exception will be handled at the wsgi level + host_ref = objects.Service.get_by_host_and_topic( + context, host, constants.VOLUME_TOPIC) # Getting total available/used resource # TODO(jdg): Add summary info for Snapshots diff --git a/cinder/api/contrib/qos_specs_manage.py b/cinder/api/contrib/qos_specs_manage.py index 558418990..b24d220f3 100644 --- a/cinder/api/contrib/qos_specs_manage.py +++ b/cinder/api/contrib/qos_specs_manage.py @@ -36,10 +36,8 @@ authorize = extensions.extension_authorizer('volume', 'qos_specs_manage') def _check_specs(context, specs_id): - try: - qos_specs.get_qos_specs(context, specs_id) - except exception.QoSSpecsNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=six.text_type(ex)) + # Not found exception will be handled at the wsgi level + qos_specs.get_qos_specs(context, specs_id) class QoSSpecsController(wsgi.Controller): @@ -88,10 +86,14 @@ class QoSSpecsController(wsgi.Controller): self.validate_string_length(name, 'name', min_length=1, max_length=255, remove_whitespaces=True) name = name.strip() + # Remove name from 'specs' since passing it in as separate param + del specs['name'] + + # Validate the key-value pairs in the qos spec. + utils.validate_dictionary_string_length(specs) try: - qos_specs.create(context, name, specs) - spec = qos_specs.get_qos_specs_by_name(context, name) + spec = qos_specs.create(context, name, specs) notifier_info = dict(name=name, specs=specs) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.create', @@ -130,18 +132,13 @@ class QoSSpecsController(wsgi.Controller): rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.update', notifier_info) - except exception.QoSSpecsNotFound as err: + except (exception.QoSSpecsNotFound, exception.InvalidQoSSpecs) as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.update', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) - except exception.InvalidQoSSpecs as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.update', - notifier_err) - raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.QoSSpecsUpdateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, @@ -157,10 +154,8 @@ class QoSSpecsController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context) - try: - spec = qos_specs.get_qos_specs(context, id) - except exception.QoSSpecsNotFound as err: - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + spec = qos_specs.get_qos_specs(context, id) return self._view_builder.detail(req, spec) @@ -185,7 +180,8 @@ class QoSSpecsController(wsgi.Controller): self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.QoSSpecsInUse as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, @@ -217,18 +213,13 @@ class QoSSpecsController(wsgi.Controller): notifier_info = dict(id=id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete_keys', notifier_info) - except exception.QoSSpecsNotFound as err: + except exception.NotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete_keys', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) - except exception.QoSSpecsKeyNotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.delete_keys', - notifier_err) - raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise return webob.Response(status_int=202) @@ -250,7 +241,8 @@ class QoSSpecsController(wsgi.Controller): self._notify_qos_specs_error(context, 'qos_specs.associations', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.CinderException as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, @@ -284,18 +276,13 @@ class QoSSpecsController(wsgi.Controller): rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.associate', notifier_info) - except exception.VolumeTypeNotFound as err: + except exception.NotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) - except exception.QoSSpecsNotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.associate', - notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.InvalidVolumeType as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, @@ -338,18 +325,13 @@ class QoSSpecsController(wsgi.Controller): rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.disassociate', notifier_info) - except exception.VolumeTypeNotFound as err: + except exception.NotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) - except exception.QoSSpecsNotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.disassociate', - notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.QoSSpecsDisassociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, @@ -378,7 +360,8 @@ class QoSSpecsController(wsgi.Controller): self._notify_qos_specs_error(context, 'qos_specs.disassociate_all', notifier_err) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.QoSSpecsDisassociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, diff --git a/cinder/api/contrib/quotas.py b/cinder/api/contrib/quotas.py index 0af8496ea..a03384ff1 100644 --- a/cinder/api/contrib/quotas.py +++ b/cinder/api/contrib/quotas.py @@ -15,6 +15,8 @@ import webob +from oslo_utils import strutils + from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import db @@ -25,11 +27,6 @@ from cinder import quota from cinder import quota_utils from cinder import utils -from oslo_config import cfg -from oslo_utils import strutils - - -CONF = cfg.CONF QUOTAS = quota.QUOTAS NON_QUOTA_KEYS = ['tenant_id', 'id'] @@ -85,6 +82,10 @@ class QuotaSetsController(wsgi.Controller): :param parent_id: The parent id of the project in which the user want to perform an update or delete operation. """ + if context_project.is_admin_project: + # The calling project has admin privileges and should be able + # to operate on all quotas. + return if context_project.parent_id and parent_id != context_project.id: msg = _("Update and delete quota operations can only be made " "by an admin of immediate parent or by the CLOUD admin.") @@ -105,15 +106,20 @@ class QuotaSetsController(wsgi.Controller): def _authorize_show(self, context_project, target_project): """Checks if show is allowed in the current hierarchy. - With hierarchical projects, are allowed to perform quota show operation - users with admin role in, at least, one of the following projects: the - current project; the immediate parent project; or the root project. + With hierarchical projects, users are allowed to perform a quota show + operation if they have the cloud admin role or if they belong to at + least one of the following projects: the target project, its immediate + parent project, or the root project of its hierarchy. :param context_project: The project in which the user is scoped to. :param target_project: The project in which the user wants to perform a show operation. """ + if context_project.is_admin_project: + # The calling project has admin privileges and should be able + # to view all quotas. + return if target_project.parent_id: if target_project.id != context_project.id: if not self._is_descendant(target_project.id, @@ -170,7 +176,8 @@ class QuotaSetsController(wsgi.Controller): target_project = quota_utils.get_project_hierarchy( context, target_project_id) context_project = quota_utils.get_project_hierarchy( - context, context.project_id, subtree_as_ids=True) + context, context.project_id, subtree_as_ids=True, + is_admin_project=context.is_admin) self._authorize_show(context_project, target_project) @@ -238,7 +245,8 @@ class QuotaSetsController(wsgi.Controller): # Get the children of the project which the token is scoped to # in order to know if the target_project is in its hierarchy. context_project = quota_utils.get_project_hierarchy( - context, context.project_id, subtree_as_ids=True) + context, context.project_id, subtree_as_ids=True, + is_admin_project=context.is_admin) self._authorize_update_or_delete(context_project, target_project.id, parent_id) diff --git a/cinder/api/contrib/services.py b/cinder/api/contrib/services.py index cddb6b8fa..c6cbc5a90 100644 --- a/cinder/api/contrib/services.py +++ b/cinder/api/contrib/services.py @@ -85,6 +85,11 @@ class ServiceController(wsgi.Controller): 'zone': svc.availability_zone, 'status': active, 'state': art, 'updated_at': updated_at} + + # On V3.7 we added cluster support + if req.api_version_request.matches('3.7'): + ret_fields['cluster'] = svc.cluster_name + if detailed: ret_fields['disabled_reason'] = svc.disabled_reason if svc.binary == "cinder-volume": @@ -148,13 +153,12 @@ class ServiceController(wsgi.Controller): ) return webob.Response(status_int=202) else: - raise webob.exc.HTTPNotFound(explanation=_("Unknown action")) + raise exception.InvalidInput(reason=_("Unknown action")) try: host = body['host'] except (TypeError, KeyError): - msg = _("Missing required element 'host' in request body.") - raise webob.exc.HTTPBadRequest(explanation=msg) + raise exception.MissingRequired(element='host') ret_val['disabled'] = disabled if id == "disable-log-reason" and ext_loaded: @@ -173,17 +177,13 @@ class ServiceController(wsgi.Controller): if not binary_key: raise webob.exc.HTTPBadRequest() - try: - svc = objects.Service.get_by_args(context, host, binary_key) - if not svc: - raise webob.exc.HTTPNotFound(explanation=_('Unknown service')) + # Not found exception will be handled at the wsgi level + svc = objects.Service.get_by_args(context, host, binary_key) - svc.disabled = ret_val['disabled'] - if 'disabled_reason' in ret_val: - svc.disabled_reason = ret_val['disabled_reason'] - svc.save() - except exception.ServiceNotFound: - raise webob.exc.HTTPNotFound(explanation=_("service not found")) + svc.disabled = ret_val['disabled'] + if 'disabled_reason' in ret_val: + svc.disabled_reason = ret_val['disabled_reason'] + svc.save() ret_val.update({'host': host, 'service': service, 'binary': binary, 'status': status}) diff --git a/cinder/api/contrib/snapshot_manage.py b/cinder/api/contrib/snapshot_manage.py index 3de07542c..159669fa9 100644 --- a/cinder/api/contrib/snapshot_manage.py +++ b/cinder/api/contrib/snapshot_manage.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg from oslo_log import log as logging from webob import exc @@ -21,12 +20,10 @@ from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import manageable_snapshots as list_manageable_view from cinder.api.views import snapshots as snapshot_views -from cinder import exception from cinder.i18n import _ from cinder import volume as cinder_volume LOG = logging.getLogger(__name__) -CONF = cfg.CONF authorize_manage = extensions.extension_authorizer('snapshot', 'snapshot_manage') authorize_list_manageable = extensions.extension_authorizer('snapshot', @@ -107,11 +104,8 @@ class SnapshotManageController(wsgi.Controller): # Check whether volume exists volume_id = snapshot['volume_id'] - try: - volume = self.volume_api.get(context, volume_id) - except exception.VolumeNotFound: - msg = _("Volume: %s could not be found.") % volume_id - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, volume_id) LOG.debug('Manage snapshot request body: %s', body) @@ -121,15 +115,12 @@ class SnapshotManageController(wsgi.Controller): snapshot_parameters['description'] = snapshot.get('description', None) snapshot_parameters['name'] = snapshot.get('name') - try: - new_snapshot = self.volume_api.manage_existing_snapshot( - context, - snapshot['ref'], - volume, - **snapshot_parameters) - except exception.ServiceNotFound: - msg = _("Service %s not found.") % CONF.volume_topic - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + new_snapshot = self.volume_api.manage_existing_snapshot( + context, + snapshot['ref'], + volume, + **snapshot_parameters) return self._view_builder.detail(req, new_snapshot) diff --git a/cinder/api/contrib/snapshot_unmanage.py b/cinder/api/contrib/snapshot_unmanage.py index c9e2259df..f817746e0 100644 --- a/cinder/api/contrib/snapshot_unmanage.py +++ b/cinder/api/contrib/snapshot_unmanage.py @@ -48,14 +48,13 @@ class SnapshotUnmanageController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context) - LOG.info(_LI("Unmanage snapshot with id: %s"), id, context=context) + LOG.info(_LI("Unmanage snapshot with id: %s"), id) try: snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.delete_snapshot(context, snapshot, unmanage_only=True) - except exception.SnapshotNotFound as ex: - raise exc.HTTPNotFound(explanation=ex.msg) + # Not found exception will be handled at the wsgi level except exception.InvalidSnapshot as ex: raise exc.HTTPBadRequest(explanation=ex.msg) return webob.Response(status_int=202) diff --git a/cinder/api/contrib/types_extra_specs.py b/cinder/api/contrib/types_extra_specs.py index 5f9d3c25f..b6b2a63fc 100644 --- a/cinder/api/contrib/types_extra_specs.py +++ b/cinder/api/contrib/types_extra_specs.py @@ -41,10 +41,8 @@ class VolumeTypeExtraSpecsController(wsgi.Controller): return dict(extra_specs=specs_dict) def _check_type(self, context, type_id): - try: - volume_types.get_volume_type(context, type_id) - except exception.VolumeTypeNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=ex.msg) + # Not found exception will be handled at the wsgi level + volume_types.get_volume_type(context, type_id) def index(self, req, type_id): """Returns the list of extra specs for a given volume type.""" @@ -62,7 +60,7 @@ class VolumeTypeExtraSpecsController(wsgi.Controller): self._check_type(context, type_id) specs = body['extra_specs'] self._check_key_names(specs.keys()) - utils.validate_extra_specs(specs) + utils.validate_dictionary_string_length(specs) db.volume_type_extra_specs_update_or_create(context, type_id, @@ -87,7 +85,7 @@ class VolumeTypeExtraSpecsController(wsgi.Controller): expl = _('Request body contains too many items') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_key_names(body.keys()) - utils.validate_extra_specs(body) + utils.validate_dictionary_string_length(body) db.volume_type_extra_specs_update_or_create(context, type_id, @@ -108,9 +106,8 @@ class VolumeTypeExtraSpecsController(wsgi.Controller): if id in specs['extra_specs']: return {id: specs['extra_specs'][id]} else: - msg = _("Volume Type %(type_id)s has no extra spec with key " - "%(id)s.") % ({'type_id': type_id, 'id': id}) - raise webob.exc.HTTPNotFound(explanation=msg) + raise exception.VolumeTypeExtraSpecsNotFound( + volume_type_id=type_id, extra_specs_key=id) def delete(self, req, type_id, id): """Deletes an existing extra spec.""" @@ -118,10 +115,8 @@ class VolumeTypeExtraSpecsController(wsgi.Controller): self._check_type(context, type_id) authorize(context) - try: - db.volume_type_extra_specs_delete(context, type_id, id) - except exception.VolumeTypeExtraSpecsNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + db.volume_type_extra_specs_delete(context, type_id, id) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('volumeTypeExtraSpecs') diff --git a/cinder/api/contrib/types_manage.py b/cinder/api/contrib/types_manage.py index 9998da443..f9d55a7d7 100644 --- a/cinder/api/contrib/types_manage.py +++ b/cinder/api/contrib/types_manage.py @@ -58,7 +58,7 @@ class VolumeTypesManageController(wsgi.Controller): name = vol_type.get('name', None) description = vol_type.get('description') specs = vol_type.get('extra_specs', {}) - utils.validate_extra_specs(specs) + utils.validate_dictionary_string_length(specs) is_public = vol_type.get('os-volume-type-access:is_public', True) if name is None or len(name.strip()) == 0: @@ -95,7 +95,8 @@ class VolumeTypesManageController(wsgi.Controller): except exception.VolumeTypeNotFoundByName as err: self._notify_volume_type_error( context, 'volume_type.create', err, name=name) - raise webob.exc.HTTPNotFound(explanation=err.msg) + # Not found exception will be handled at the wsgi level + raise return self._view_builder.show(req, vol_type) @@ -148,7 +149,8 @@ class VolumeTypesManageController(wsgi.Controller): except exception.VolumeTypeNotFound as err: self._notify_volume_type_error( context, 'volume_type.update', err, id=id) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + raise except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.update', err, volume_type=vol_type) @@ -180,7 +182,8 @@ class VolumeTypesManageController(wsgi.Controller): except exception.VolumeTypeNotFound as err: self._notify_volume_type_error( context, 'volume_type.delete', err, id=id) - raise webob.exc.HTTPNotFound(explanation=err.msg) + # Not found exception will be handled at the wsgi level + raise return webob.Response(status_int=202) diff --git a/cinder/api/contrib/volume_actions.py b/cinder/api/contrib/volume_actions.py index c14598cf6..b05e65c4f 100644 --- a/cinder/api/contrib/volume_actions.py +++ b/cinder/api/contrib/volume_actions.py @@ -47,10 +47,8 @@ class VolumeActionsController(wsgi.Controller): def _attach(self, req, id, body): """Add attachment metadata.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) # instance uuid is an option now instance_uuid = None @@ -95,10 +93,8 @@ class VolumeActionsController(wsgi.Controller): def _detach(self, req, id, body): """Clear attachment metadata.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) attachment_id = None if body['os-detach']: @@ -123,10 +119,8 @@ class VolumeActionsController(wsgi.Controller): def _reserve(self, req, id, body): """Mark volume as reserved.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) self.volume_api.reserve_volume(context, volume) return webob.Response(status_int=202) @@ -135,10 +129,8 @@ class VolumeActionsController(wsgi.Controller): def _unreserve(self, req, id, body): """Unmark volume as reserved.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) self.volume_api.unreserve_volume(context, volume) return webob.Response(status_int=202) @@ -147,10 +139,8 @@ class VolumeActionsController(wsgi.Controller): def _begin_detaching(self, req, id, body): """Update volume status to 'detaching'.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) self.volume_api.begin_detaching(context, volume) return webob.Response(status_int=202) @@ -159,10 +149,8 @@ class VolumeActionsController(wsgi.Controller): def _roll_detaching(self, req, id, body): """Roll back volume status to 'in-use'.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) self.volume_api.roll_detaching(context, volume) return webob.Response(status_int=202) @@ -171,10 +159,8 @@ class VolumeActionsController(wsgi.Controller): def _initialize_connection(self, req, id, body): """Initialize volume attachment.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) try: connector = body['os-initialize_connection']['connector'] except KeyError: @@ -187,7 +173,7 @@ class VolumeActionsController(wsgi.Controller): except exception.InvalidInput as err: raise webob.exc.HTTPBadRequest( explanation=err) - except exception.VolumeBackendAPIException as error: + except exception.VolumeBackendAPIException: msg = _("Unable to fetch connection information from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) @@ -197,10 +183,8 @@ class VolumeActionsController(wsgi.Controller): def _terminate_connection(self, req, id, body): """Terminate volume attachment.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) try: connector = body['os-terminate_connection']['connector'] except KeyError: @@ -208,7 +192,7 @@ class VolumeActionsController(wsgi.Controller): explanation=_("Must specify 'connector'")) try: self.volume_api.terminate_connection(context, volume, connector) - except exception.VolumeBackendAPIException as error: + except exception.VolumeBackendAPIException: msg = _("Unable to terminate volume connection from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) return webob.Response(status_int=202) @@ -232,10 +216,8 @@ class VolumeActionsController(wsgi.Controller): msg = _("Invalid value for 'force': '%s'") % err_msg raise webob.exc.HTTPBadRequest(explanation=msg) - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) authorize(context, "upload_image") # check for valid disk-format @@ -292,10 +274,8 @@ class VolumeActionsController(wsgi.Controller): def _extend(self, req, id, body): """Extend size of volume.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) try: size = int(body['os-extend']['new_size']) @@ -314,10 +294,8 @@ class VolumeActionsController(wsgi.Controller): def _volume_readonly_update(self, req, id, body): """Update volume readonly flag.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) try: readonly_flag = body['os-update_readonly_flag']['readonly'] @@ -355,10 +333,8 @@ class VolumeActionsController(wsgi.Controller): def _set_bootable(self, req, id, body): """Update bootable status of a volume.""" context = req.environ['cinder.context'] - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) try: bootable = body['os-set_bootable']['bootable'] diff --git a/cinder/api/contrib/volume_image_metadata.py b/cinder/api/contrib/volume_image_metadata.py index 8f9c41450..7a42c8a71 100644 --- a/cinder/api/contrib/volume_image_metadata.py +++ b/cinder/api/contrib/volume_image_metadata.py @@ -37,12 +37,9 @@ class VolumeImageMetadataController(wsgi.Controller): self.volume_api = volume.API() def _get_image_metadata(self, context, volume_id): - try: - volume = self.volume_api.get(context, volume_id) - meta = self.volume_api.get_volume_image_metadata(context, volume) - except exception.VolumeNotFound: - msg = _('Volume with volume id %s does not exist.') % volume_id - raise webob.exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_image_metadata(context, volume) return (volume, meta) def _add_image_metadata(self, context, resp_volume_list, image_metas=None): @@ -113,9 +110,7 @@ class VolumeImageMetadataController(wsgi.Controller): metadata, delete=False, meta_type=common.METADATA_TYPES.image) - except exception.VolumeNotFound: - msg = _('Volume with volume id %s does not exist.') % volume_id - raise webob.exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body.") raise webob.exc.HTTPBadRequest(explanation=msg) @@ -143,8 +138,7 @@ class VolumeImageMetadataController(wsgi.Controller): if key: vol, metadata = self._get_image_metadata(context, id) if key not in metadata: - msg = _("Metadata item was not found.") - raise webob.exc.HTTPNotFound(explanation=msg) + raise exception.GlanceMetadataNotFound(id=id) self.volume_api.delete_volume_metadata( context, vol, key, diff --git a/cinder/api/contrib/volume_manage.py b/cinder/api/contrib/volume_manage.py index 89ed5a825..8300a4256 100644 --- a/cinder/api/contrib/volume_manage.py +++ b/cinder/api/contrib/volume_manage.py @@ -21,7 +21,6 @@ from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.v2.views import volumes as volume_views from cinder.api.views import manageable_volumes as list_manageable_view -from cinder import exception from cinder.i18n import _ from cinder import utils from cinder import volume as cinder_volume @@ -120,16 +119,14 @@ class VolumeManageController(wsgi.Controller): kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: - try: - if not uuidutils.is_uuid_like(req_volume_type): - kwargs['volume_type'] = \ - volume_types.get_volume_type_by_name( - context, req_volume_type) - else: - kwargs['volume_type'] = volume_types.get_volume_type( + # Not found exception will be handled at the wsgi level + if not uuidutils.is_uuid_like(req_volume_type): + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( context, req_volume_type) - except exception.VolumeTypeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + else: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) else: kwargs['volume_type'] = {} @@ -138,13 +135,14 @@ class VolumeManageController(wsgi.Controller): kwargs['metadata'] = volume.get('metadata', None) kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['bootable'] = utils.get_bool_param('bootable', volume) - try: - new_volume = self.volume_api.manage_existing(context, - volume['host'], - volume['ref'], - **kwargs) - except exception.ServiceNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + + utils.check_metadata_properties(kwargs['metadata']) + + # Not found exception will be handled at wsgi level + new_volume = self.volume_api.manage_existing(context, + volume['host'], + volume['ref'], + **kwargs) utils.add_visible_admin_metadata(new_volume) diff --git a/cinder/api/contrib/volume_transfer.py b/cinder/api/contrib/volume_transfer.py index fc7745c3b..8531ae8c8 100644 --- a/cinder/api/contrib/volume_transfer.py +++ b/cinder/api/contrib/volume_transfer.py @@ -41,10 +41,8 @@ class VolumeTransferController(wsgi.Controller): """Return data about active transfers.""" context = req.environ['cinder.context'] - try: - transfer = self.transfer_api.get(context, transfer_id=id) - except exception.TransferNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + transfer = self.transfer_api.get(context, transfer_id=id) return self._view_builder.detail(req, transfer) @@ -97,15 +95,13 @@ class VolumeTransferController(wsgi.Controller): name = name.strip() LOG.info(_LI("Creating transfer of volume %s"), - volume_id, - context=context) + volume_id) try: new_transfer = self.transfer_api.create(context, volume_id, name) + # Not found exception will be handled at the wsgi level except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) transfer = self._view_builder.create(req, dict(new_transfer)) @@ -127,8 +123,7 @@ class VolumeTransferController(wsgi.Controller): msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI("Accepting transfer %s"), transfer_id, - context=context) + LOG.info(_LI("Accepting transfer %s"), transfer_id) try: accepted_transfer = self.transfer_api.accept(context, transfer_id, @@ -148,12 +143,10 @@ class VolumeTransferController(wsgi.Controller): """Delete a transfer.""" context = req.environ['cinder.context'] - LOG.info(_LI("Delete transfer with id: %s"), id, context=context) + LOG.info(_LI("Delete transfer with id: %s"), id) - try: - self.transfer_api.delete(context, transfer_id=id) - except exception.TransferNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + self.transfer_api.delete(context, transfer_id=id) return webob.Response(status_int=202) diff --git a/cinder/api/contrib/volume_type_access.py b/cinder/api/contrib/volume_type_access.py index 569379009..e45895118 100644 --- a/cinder/api/contrib/volume_type_access.py +++ b/cinder/api/contrib/volume_type_access.py @@ -41,22 +41,17 @@ def _marshall_volume_type_access(vol_type): class VolumeTypeAccessController(object): """The volume type access API controller for the OpenStack API.""" - def __init__(self): - super(VolumeTypeAccessController, self).__init__() - def index(self, req, type_id): context = req.environ['cinder.context'] authorize(context) - try: - vol_type = volume_types.get_volume_type( - context, type_id, expected_fields=['projects']) - except exception.VolumeTypeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + vol_type = volume_types.get_volume_type( + context, type_id, expected_fields=['projects']) if vol_type['is_public']: expl = _("Access list not available for public volume types.") - raise webob.exc.HTTPNotFound(explanation=expl) + raise exception.VolumeTypeAccessNotFound(message=expl) return _marshall_volume_type_access(vol_type) @@ -120,10 +115,9 @@ class VolumeTypeActionController(wsgi.Controller): try: volume_types.add_volume_type_access(context, id, project) + # Not found exception will be handled at the wsgi level except exception.VolumeTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - except exception.VolumeTypeNotFound as err: - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return webob.Response(status_int=202) @wsgi.action('removeProjectAccess') @@ -133,11 +127,8 @@ class VolumeTypeActionController(wsgi.Controller): self._check_body(body, 'removeProjectAccess') project = body['removeProjectAccess']['project'] - try: - volume_types.remove_volume_type_access(context, id, project) - except (exception.VolumeTypeNotFound, - exception.VolumeTypeAccessNotFound) as err: - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + # Not found exception will be handled at the wsgi level + volume_types.remove_volume_type_access(context, id, project) return webob.Response(status_int=202) diff --git a/cinder/api/contrib/volume_type_encryption.py b/cinder/api/contrib/volume_type_encryption.py index 9ae05ce50..bdd037a18 100644 --- a/cinder/api/contrib/volume_type_encryption.py +++ b/cinder/api/contrib/volume_type_encryption.py @@ -45,10 +45,8 @@ class VolumeTypeEncryptionController(wsgi.Controller): return encryption_specs def _check_type(self, context, type_id): - try: - volume_types.get_volume_type(context, type_id) - except exception.VolumeTypeNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=ex.msg) + # Not found exception will be handled at the wsgi level + volume_types.get_volume_type(context, type_id) def _check_encryption_input(self, encryption, create=True): if encryption.get('key_size') is not None: @@ -153,7 +151,7 @@ class VolumeTypeEncryptionController(wsgi.Controller): encryption_specs = self._get_volume_type_encryption(context, type_id) if id not in encryption_specs: - raise webob.exc.HTTPNotFound() + raise exception.VolumeTypeEncryptionNotFound(type_id=type_id) return {id: encryption_specs[id]} @@ -166,10 +164,8 @@ class VolumeTypeEncryptionController(wsgi.Controller): expl = _('Cannot delete encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) else: - try: + # Not found exception will be handled at the wsgi level db.volume_type_encryption_delete(context, type_id) - except exception.VolumeTypeEncryptionNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=ex.msg) return webob.Response(status_int=202) diff --git a/cinder/api/contrib/volume_unmanage.py b/cinder/api/contrib/volume_unmanage.py index 0e156d76e..48b6b4d5f 100644 --- a/cinder/api/contrib/volume_unmanage.py +++ b/cinder/api/contrib/volume_unmanage.py @@ -14,11 +14,9 @@ from oslo_log import log as logging import webob -from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder import exception from cinder.i18n import _LI from cinder import volume @@ -51,13 +49,11 @@ class VolumeUnmanageController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context) - LOG.info(_LI("Unmanage volume with id: %s"), id, context=context) + LOG.info(_LI("Unmanage volume with id: %s"), id) - try: - vol = self.volume_api.get(context, id) - self.volume_api.delete(context, vol, unmanage_only=True) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + vol = self.volume_api.get(context, id) + self.volume_api.delete(context, vol, unmanage_only=True) return webob.Response(status_int=202) diff --git a/cinder/api/extensions.py b/cinder/api/extensions.py index bf8cc96e9..71a8a45c6 100644 --- a/cinder/api/extensions.py +++ b/cinder/api/extensions.py @@ -32,6 +32,7 @@ import cinder.policy CONF = cfg.CONF LOG = logging.getLogger(__name__) +FILES_TO_SKIP = ['resource_common_manage.py'] class ExtensionDescriptor(object): @@ -267,7 +268,7 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py - if ext != '.py' or root == '__init__': + if ext != '.py' or root == '__init__' or fname in FILES_TO_SKIP: continue # Try loading it diff --git a/cinder/api/middleware/auth.py b/cinder/api/middleware/auth.py index 110e728a2..d95e481b8 100644 --- a/cinder/api/middleware/auth.py +++ b/cinder/api/middleware/auth.py @@ -78,28 +78,11 @@ class CinderKeystoneContext(base_wsgi.Middleware): @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): - user_id = req.headers.get('X_USER') - user_id = req.headers.get('X_USER_ID', user_id) - if user_id is None: - LOG.debug("Neither X_USER_ID nor X_USER found in request") - return webob.exc.HTTPUnauthorized() - # get the roles - roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] - if 'X_TENANT_ID' in req.headers: - # This is the new header since Keystone went to ID/Name - project_id = req.headers['X_TENANT_ID'] - else: - # This is for legacy compatibility - project_id = req.headers['X_TENANT'] + # NOTE(jamielennox): from_environ handles these in newer versions project_name = req.headers.get('X_TENANT_NAME') - req_id = req.environ.get(request_id.ENV_REQUEST_ID) - # Get the auth token - auth_token = req.headers.get('X_AUTH_TOKEN', - req.headers.get('X_STORAGE_TOKEN')) - # Build a context, including the auth_token... remote_address = req.remote_addr @@ -114,14 +97,17 @@ class CinderKeystoneContext(base_wsgi.Middleware): if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - project_name=project_name, - roles=roles, - auth_token=auth_token, - remote_address=remote_address, - service_catalog=service_catalog, - request_id=req_id) + + ctx = context.RequestContext.from_environ( + req.environ, + request_id=req_id, + remote_address=remote_address, + project_name=project_name, + service_catalog=service_catalog) + + if ctx.user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() req.environ['cinder.context'] = ctx return self.application diff --git a/cinder/api/openstack/api_version_request.py b/cinder/api/openstack/api_version_request.py index 157c5f0da..70ceb4ba1 100644 --- a/cinder/api/openstack/api_version_request.py +++ b/cinder/api/openstack/api_version_request.py @@ -54,7 +54,16 @@ REST_API_VERSION_HISTORY = """ * 3.5 - Add pagination support to messages API. * 3.6 - Allows to set empty description and empty name for consistency group in consisgroup-update operation. - + * 3.7 - Add cluster API and cluster_name field to service list API + * 3.8 - Adds resources from volume_manage and snapshot_manage extensions. + * 3.9 - Add backup update interface. + * 3.10 - Add group_id filter to list/detail volumes in _get_volumes. + * 3.11 - Add group types and group specs API. + * 3.12 - Add volumes summary API. + * 3.13 - Add generic volume groups API. + * 3.14 - Add group snapshot and create group from src APIs. + * 3.15 - Inject the response's `Etag` header to avoid the lost update + problem with volume metadata. """ # The minimum and maximum versions of the API supported @@ -62,7 +71,7 @@ REST_API_VERSION_HISTORY = """ # minimum version of the API supported. # Explicitly using /v1 or /v2 enpoints will still work _MIN_API_VERSION = "3.0" -_MAX_API_VERSION = "3.6" +_MAX_API_VERSION = "3.15" _LEGACY_API_VERSION1 = "1.0" _LEGACY_API_VERSION2 = "2.0" diff --git a/cinder/api/openstack/rest_api_version_history.rst b/cinder/api/openstack/rest_api_version_history.rst index 7efbfd96b..1659b1f24 100644 --- a/cinder/api/openstack/rest_api_version_history.rst +++ b/cinder/api/openstack/rest_api_version_history.rst @@ -69,3 +69,124 @@ user documentation. --- Allowed to set empty description and empty name for consistency group in consisgroup-update operation. + +3.7 +--- + Added ``cluster_name`` field to service list/detail. + + Added /clusters endpoint to list/show/update clusters. + + Show endpoint requires the cluster name and optionally the binary as a URL + paramter (default is "cinder-volume"). Returns: + + .. code-block:: json + + "cluster": { + "created_at": ..., + "disabled_reason": null, + "last_heartbeat": ..., + "name": "cluster_name", + "num_down_hosts": 4, + "num_hosts": 2, + "state": "up", + "status": "enabled", + "updated_at": ... + } + + Update endpoint allows enabling and disabling a cluster in a similar way to + service's update endpoint, but in the body we must specify the name and + optionally the binary ("cinder-volume" is the default) and the disabled + reason. Returns: + + .. code-block:: json + + "cluster": { + "name": "cluster_name", + "state": "up", + "status": "enabled" + "disabled_reason": null + } + + Index and detail accept filtering by `name`, `binary`, `disabled`, + `num_hosts` , `num_down_hosts`, and up/down status (`is_up`) as URL + parameters. + + Index endpoint returns: + + .. code-block:: json + + "clusters": [ + { + "name": "cluster_name", + "state": "up", + "status": "enabled" + }, + { + ... + } + ] + + Detail endpoint returns: + + .. code-block:: json + + "clusters": [ + { + "created_at": ..., + "disabled_reason": null, + "last_heartbeat": ..., + "name": "cluster_name", + "num_down_hosts": 4, + "num_hosts": 2, + "state": "up", + "status": "enabled", + "updated_at": ... + }, + { + ... + } + ] + +3.8 +--- + Adds the following resources that were previously in extensions: + - os-volume-manage => /v3//manageable_volumes + - os-snapshot-manage => /v3//manageable_snapshots + +3.9 +--- + Added backup update interface to change name and description. + Returns: + + .. code-block:: json + + "backup": { + "id": "backup_id", + "name": "backup_name", + "links": "backup_link", + } + +3.10 +---- + Added the filter parameters ``group_id`` to + list/detail volumes requests. + +3.11 +---- + Added group types and group specs API. + +3.12 +---- + Added volumes/summary API. + +3.13 +---- + Added create/delete/update/list/show APIs for generic volume groups. + +3.14 + Added group snapshots and create group from src APIs. +--- + +3.15 + Added injecting the response's `Etag` header to avoid the lost update + problem with volume metadata. diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py index cc4718e55..8a19d185a 100644 --- a/cinder/api/openstack/wsgi.py +++ b/cinder/api/openstack/wsgi.py @@ -33,6 +33,7 @@ from cinder.api.openstack import versioned_method from cinder import exception from cinder import i18n from cinder.i18n import _, _LE, _LI +from cinder import policy from cinder import utils from cinder.wsgi import common as wsgi @@ -1220,6 +1221,12 @@ class Controller(object): # ranges of valid versions as that is ambiguous func_list.sort(reverse=True) + # NOTE(geguileo): To avoid PEP8 errors when defining multiple + # microversions of the same method in the same class we add the + # api_version decorator to the function so it can be used instead, + # thus preventing method redefinition errors. + f.api_version = cls.api_version + return f return decorator @@ -1295,6 +1302,23 @@ class Controller(object): except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) + @staticmethod + def get_policy_checker(prefix): + @staticmethod + def policy_checker(req, action, resource=None): + ctxt = req.environ['cinder.context'] + target = { + 'project_id': ctxt.project_id, + 'user_id': ctxt.user_id, + } + if resource: + target.update(resource) + + _action = '%s:%s' % (prefix, action) + policy.enforce(ctxt, _action, target) + return ctxt + return policy_checker + class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" diff --git a/cinder/api/v1/snapshot_metadata.py b/cinder/api/v1/snapshot_metadata.py index 8bbf1f8c0..93aa1309f 100644 --- a/cinder/api/v1/snapshot_metadata.py +++ b/cinder/api/v1/snapshot_metadata.py @@ -30,12 +30,9 @@ class Controller(wsgi.Controller): super(Controller, self).__init__() def _get_metadata(self, context, snapshot_id): - try: - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - meta = self.volume_api.get_snapshot_metadata(context, snapshot) - except exception.SnapshotNotFound: - msg = _('snapshot does not exist') - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) return meta def index(self, req, snapshot_id): @@ -106,10 +103,7 @@ class Controller(wsgi.Controller): snapshot, metadata, delete) - except exception.SnapshotNotFound: - msg = _('snapshot does not exist') - raise exc.HTTPNotFound(explanation=msg) - + # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) @@ -128,8 +122,8 @@ class Controller(wsgi.Controller): try: return {'meta': {id: data[id]}} except KeyError: - msg = _("Metadata item was not found") - raise exc.HTTPNotFound(explanation=msg) + raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, + metadata_key=id) def delete(self, req, snapshot_id, id): """Deletes an existing metadata.""" @@ -138,15 +132,12 @@ class Controller(wsgi.Controller): metadata = self._get_metadata(context, snapshot_id) if id not in metadata: - msg = _("Metadata item was not found") - raise exc.HTTPNotFound(explanation=msg) + raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, + metadata_key=id) - try: - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - self.volume_api.delete_snapshot_metadata(context, snapshot, id) - except exception.SnapshotNotFound: - msg = _('snapshot does not exist') - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) return webob.Response(status_int=200) diff --git a/cinder/api/v1/snapshots.py b/cinder/api/v1/snapshots.py index e7b63178f..b09dc9057 100644 --- a/cinder/api/v1/snapshots.py +++ b/cinder/api/v1/snapshots.py @@ -72,11 +72,9 @@ class SnapshotsController(wsgi.Controller): """Return data about the given snapshot.""" context = req.environ['cinder.context'] - try: - snapshot = self.volume_api.get_snapshot(context, id) - req.cache_db_snapshot(snapshot) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, id) + req.cache_db_snapshot(snapshot) return {'snapshot': _translate_snapshot_detail_view(snapshot)} @@ -84,13 +82,11 @@ class SnapshotsController(wsgi.Controller): """Delete a snapshot.""" context = req.environ['cinder.context'] - LOG.info(_LI("Delete snapshot with id: %s"), id, context=context) + LOG.info(_LI("Delete snapshot with id: %s"), id) - try: - snapshot = self.volume_api.get_snapshot(context, id) - self.volume_api.delete_snapshot(context, snapshot) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) return webob.Response(status_int=202) def index(self, req): @@ -139,14 +135,12 @@ class SnapshotsController(wsgi.Controller): msg = _("'volume_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) - try: - volume = self.volume_api.get(context, volume_id) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) msg = _LI("Create snapshot from volume %s") - LOG.info(msg, volume_id, context=context) + LOG.info(msg, volume_id) if not utils.is_valid_boolstr(force): msg = _("Invalid value '%s' for force. ") % force @@ -194,11 +188,9 @@ class SnapshotsController(wsgi.Controller): if key in snapshot: update_dict[key] = snapshot[key] - try: - snapshot = self.volume_api.get_snapshot(context, id) - self.volume_api.update_snapshot(context, snapshot, update_dict) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.update_snapshot(context, snapshot, update_dict) snapshot.update(update_dict) req.cache_db_snapshot(snapshot) diff --git a/cinder/api/v1/types.py b/cinder/api/v1/types.py index fe207253b..bc68bf428 100644 --- a/cinder/api/v1/types.py +++ b/cinder/api/v1/types.py @@ -15,11 +15,8 @@ """The volume type & volume types extra specs extension.""" -from webob import exc - from cinder.api.openstack import wsgi from cinder.api.views import types as views_types -from cinder import exception from cinder.volume import volume_types @@ -40,11 +37,9 @@ class VolumeTypesController(wsgi.Controller): """Return a single volume type item.""" context = req.environ['cinder.context'] - try: - vol_type = volume_types.get_volume_type(context, id) - req.cache_resource(vol_type, name='types') - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + vol_type = volume_types.get_volume_type(context, id) + req.cache_resource(vol_type, name='types') return self._view_builder.show(req, vol_type) diff --git a/cinder/api/v1/volume_metadata.py b/cinder/api/v1/volume_metadata.py index c42f28f5a..c2a3eb28a 100644 --- a/cinder/api/v1/volume_metadata.py +++ b/cinder/api/v1/volume_metadata.py @@ -30,12 +30,9 @@ class Controller(wsgi.Controller): super(Controller, self).__init__() def _get_metadata(self, context, volume_id): - try: - volume = self.volume_api.get(context, volume_id) - meta = self.volume_api.get_volume_metadata(context, volume) - except exception.VolumeNotFound: - msg = _('volume does not exist') - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_metadata(context, volume) return meta def index(self, req, volume_id): @@ -106,10 +103,7 @@ class Controller(wsgi.Controller): volume, metadata, delete) - except exception.VolumeNotFound: - msg = _('volume does not exist') - raise exc.HTTPNotFound(explanation=msg) - + # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) @@ -128,8 +122,8 @@ class Controller(wsgi.Controller): try: return {'meta': {id: data[id]}} except KeyError: - msg = _("Metadata item was not found") - raise exc.HTTPNotFound(explanation=msg) + raise exception.VolumeMetadataNotFound(volume_id=volume_id, + metadata_key=id) def delete(self, req, volume_id, id): """Deletes an existing metadata.""" @@ -138,15 +132,12 @@ class Controller(wsgi.Controller): metadata = self._get_metadata(context, volume_id) if id not in metadata: - msg = _("Metadata item was not found") - raise exc.HTTPNotFound(explanation=msg) + raise exception.VolumeMetadataNotFound(volume_id=volume_id, + metadata_key=id) - try: - volume = self.volume_api.get(context, volume_id) - self.volume_api.delete_volume_metadata(context, volume, id) - except exception.VolumeNotFound: - msg = _('volume does not exist') - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, volume_id) + self.volume_api.delete_volume_metadata(context, volume, id) return webob.Response(status_int=200) diff --git a/cinder/api/v1/volumes.py b/cinder/api/v1/volumes.py index fc29fe5b0..2b96c1231 100644 --- a/cinder/api/v1/volumes.py +++ b/cinder/api/v1/volumes.py @@ -24,7 +24,6 @@ from webob import exc from cinder.api import common from cinder.api.openstack import wsgi -from cinder import exception from cinder.i18n import _, _LI from cinder import utils from cinder import volume as cinder_volume @@ -115,7 +114,7 @@ def _translate_volume_summary_view(context, vol, image_id=None): if image_id: d['image_id'] = image_id - LOG.info(_LI("vol=%s"), vol, context=context) + LOG.info(_LI("vol=%s"), vol) if vol.metadata: d['metadata'] = vol.metadata @@ -137,11 +136,9 @@ class VolumeController(wsgi.Controller): """Return data about the given volume.""" context = req.environ['cinder.context'] - try: - vol = self.volume_api.get(context, id, viewable_admin_meta=True) - req.cache_db_volume(vol) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + vol = self.volume_api.get(context, id, viewable_admin_meta=True) + req.cache_db_volume(vol) utils.add_visible_admin_metadata(vol) @@ -151,13 +148,11 @@ class VolumeController(wsgi.Controller): """Delete a volume.""" context = req.environ['cinder.context'] - LOG.info(_LI("Delete volume with id: %s"), id, context=context) + LOG.info(_LI("Delete volume with id: %s"), id) - try: - volume = self.volume_api.get(context, id) - self.volume_api.delete(context, volume) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) return webob.Response(status_int=202) def index(self, req): @@ -230,41 +225,30 @@ class VolumeController(wsgi.Controller): req_volume_type = volume.get('volume_type', None) if req_volume_type: - try: - if not uuidutils.is_uuid_like(req_volume_type): - kwargs['volume_type'] = \ - volume_types.get_volume_type_by_name( - context, req_volume_type) - else: - kwargs['volume_type'] = volume_types.get_volume_type( + # Not found exception will be handled at the wsgi level + if not uuidutils.is_uuid_like(req_volume_type): + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( context, req_volume_type) - except exception.VolumeTypeNotFound: - explanation = 'Volume type not found.' - raise exc.HTTPNotFound(explanation=explanation) + else: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: - try: - kwargs['snapshot'] = self.volume_api.get_snapshot(context, - snapshot_id) - except exception.NotFound: - explanation = _('snapshot id:%s not found') % snapshot_id - raise exc.HTTPNotFound(explanation=explanation) - + # Not found exception will be handled at the wsgi level + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: - try: - kwargs['source_volume'] = \ - self.volume_api.get_volume(context, - source_volid) - except exception.NotFound: - explanation = _('source vol id:%s not found') % source_volid - raise exc.HTTPNotFound(explanation=explanation) + # Not found exception will be handled at the wsgi level + kwargs['source_volume'] = self.volume_api.get_volume(context, + source_volid) else: kwargs['source_volume'] = None @@ -274,7 +258,7 @@ class VolumeController(wsgi.Controller): elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] - LOG.info(_LI("Create volume of %s GB"), size, context=context) + LOG.info(_LI("Create volume of %s GB"), size) multiattach = volume.get('multiattach', False) kwargs['multiattach'] = multiattach @@ -326,13 +310,11 @@ class VolumeController(wsgi.Controller): if key in volume: update_dict[key] = volume[key] - try: - volume = self.volume_api.get(context, id, viewable_admin_meta=True) - volume_utils.notify_about_volume_usage(context, volume, - 'update.start') - self.volume_api.update(context, volume, update_dict) - except exception.NotFound: - raise exc.HTTPNotFound() + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id, viewable_admin_meta=True) + volume_utils.notify_about_volume_usage(context, volume, + 'update.start') + self.volume_api.update(context, volume, update_dict) volume.update(update_dict) diff --git a/cinder/api/v2/snapshot_metadata.py b/cinder/api/v2/snapshot_metadata.py index 8d2e82ce7..6e94c3f30 100644 --- a/cinder/api/v2/snapshot_metadata.py +++ b/cinder/api/v2/snapshot_metadata.py @@ -30,12 +30,9 @@ class Controller(wsgi.Controller): super(Controller, self).__init__() def _get_metadata(self, context, snapshot_id): - try: - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - meta = self.volume_api.get_snapshot_metadata(context, snapshot) - except exception.SnapshotNotFound: - msg = _('snapshot does not exist') - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) return meta def index(self, req, snapshot_id): @@ -96,10 +93,7 @@ class Controller(wsgi.Controller): snapshot, metadata, delete) - except exception.SnapshotNotFound: - msg = _('snapshot does not exist') - raise exc.HTTPNotFound(explanation=msg) - + # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) @@ -118,8 +112,8 @@ class Controller(wsgi.Controller): try: return {'meta': {id: data[id]}} except KeyError: - msg = _("Metadata item was not found") - raise exc.HTTPNotFound(explanation=msg) + raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, + metadata_key=id) def delete(self, req, snapshot_id, id): """Deletes an existing metadata.""" @@ -128,15 +122,12 @@ class Controller(wsgi.Controller): metadata = self._get_metadata(context, snapshot_id) if id not in metadata: - msg = _("Metadata item was not found") - raise exc.HTTPNotFound(explanation=msg) + raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, + metadata_key=id) - try: - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - self.volume_api.delete_snapshot_metadata(context, snapshot, id) - except exception.SnapshotNotFound: - msg = _('snapshot does not exist') - raise exc.HTTPNotFound(explanation=msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) return webob.Response(status_int=200) diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py index 0e149e04d..b5ed4a27c 100644 --- a/cinder/api/v2/snapshots.py +++ b/cinder/api/v2/snapshots.py @@ -48,11 +48,9 @@ class SnapshotsController(wsgi.Controller): """Return data about the given snapshot.""" context = req.environ['cinder.context'] - try: - snapshot = self.volume_api.get_snapshot(context, id) - req.cache_db_snapshot(snapshot) - except exception.SnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, id) + req.cache_db_snapshot(snapshot) return self._view_builder.detail(req, snapshot) @@ -60,13 +58,11 @@ class SnapshotsController(wsgi.Controller): """Delete a snapshot.""" context = req.environ['cinder.context'] - LOG.info(_LI("Delete snapshot with id: %s"), id, context=context) + LOG.info(_LI("Delete snapshot with id: %s"), id) - try: - snapshot = self.volume_api.get_snapshot(context, id) - self.volume_api.delete_snapshot(context, snapshot) - except exception.SnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) return webob.Response(status_int=202) @@ -94,8 +90,7 @@ class SnapshotsController(wsgi.Controller): # NOTE(thingee): v2 API allows name instead of display_name if 'name' in search_opts: - search_opts['display_name'] = search_opts['name'] - del search_opts['name'] + search_opts['display_name'] = search_opts.pop('name') snapshots = self.volume_api.get_all_snapshots(context, search_opts=search_opts, @@ -130,13 +125,10 @@ class SnapshotsController(wsgi.Controller): msg = _("'volume_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) - try: - volume = self.volume_api.get(context, volume_id) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + volume = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) msg = _LI("Create snapshot from volume %s") - LOG.info(msg, volume_id, context=context) + LOG.info(msg, volume_id) self.validate_name_and_description(snapshot) # NOTE(thingee): v2 API allows name instead of display_name @@ -205,13 +197,11 @@ class SnapshotsController(wsgi.Controller): if key in snapshot: update_dict[key] = snapshot[key] - try: - snapshot = self.volume_api.get_snapshot(context, id) - volume_utils.notify_about_snapshot_usage(context, snapshot, - 'update.start') - self.volume_api.update_snapshot(context, snapshot, update_dict) - except exception.SnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + snapshot = self.volume_api.get_snapshot(context, id) + volume_utils.notify_about_snapshot_usage(context, snapshot, + 'update.start') + self.volume_api.update_snapshot(context, snapshot, update_dict) snapshot.update(update_dict) req.cache_db_snapshot(snapshot) diff --git a/cinder/api/v2/types.py b/cinder/api/v2/types.py index 95bd71bdf..8f3a29407 100644 --- a/cinder/api/v2/types.py +++ b/cinder/api/v2/types.py @@ -47,14 +47,12 @@ class VolumeTypesController(wsgi.Controller): vol_type = volume_types.get_default_volume_type() if not vol_type: msg = _("Default volume type can not be found.") - raise exc.HTTPNotFound(explanation=msg) + raise exception.VolumeTypeNotFound(message=msg) req.cache_resource(vol_type, name='types') else: - try: - vol_type = volume_types.get_volume_type(context, id) - req.cache_resource(vol_type, name='types') - except exception.VolumeTypeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at wsgi level + vol_type = volume_types.get_volume_type(context, id) + req.cache_resource(vol_type, name='types') return self._view_builder.show(req, vol_type) diff --git a/cinder/api/v2/volume_metadata.py b/cinder/api/v2/volume_metadata.py index b7d39c1c8..37c3e52d5 100644 --- a/cinder/api/v2/volume_metadata.py +++ b/cinder/api/v2/volume_metadata.py @@ -35,11 +35,9 @@ class Controller(wsgi.Controller): return self._get_volume_and_metadata(context, volume_id)[1] def _get_volume_and_metadata(self, context, volume_id): - try: - volume = self.volume_api.get(context, volume_id) - meta = self.volume_api.get_volume_metadata(context, volume) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_metadata(context, volume) return (volume, meta) def index(self, req, volume_id): @@ -52,11 +50,9 @@ class Controller(wsgi.Controller): context = req.environ['cinder.context'] metadata = body['metadata'] - new_metadata = self._update_volume_metadata(context, - volume_id, - metadata, - delete=False) - + new_metadata = self._update_volume_metadata(context, volume_id, + metadata, delete=False, + use_create=True) return {'metadata': new_metadata} def update(self, req, volume_id, id, body): @@ -91,20 +87,18 @@ class Controller(wsgi.Controller): return {'metadata': new_metadata} - def _update_volume_metadata(self, context, - volume_id, metadata, - delete=False): + def _update_volume_metadata(self, context, volume_id, metadata, + delete=False, use_create=False): try: volume = self.volume_api.get(context, volume_id) - return self.volume_api.update_volume_metadata( - context, - volume, - metadata, - delete, - meta_type=common.METADATA_TYPES.user) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) - + if use_create: + return self.volume_api.create_volume_metadata(context, volume, + metadata) + else: + return self.volume_api.update_volume_metadata( + context, volume, metadata, delete, + meta_type=common.METADATA_TYPES.user) + # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body") raise webob.exc.HTTPBadRequest(explanation=msg) @@ -123,8 +117,8 @@ class Controller(wsgi.Controller): try: return {'meta': {id: data[id]}} except KeyError: - msg = _("Metadata item was not found") - raise webob.exc.HTTPNotFound(explanation=msg) + raise exception.VolumeMetadataNotFound(volume_id=volume_id, + metadata_key=id) def delete(self, req, volume_id, id): """Deletes an existing metadata.""" @@ -133,17 +127,15 @@ class Controller(wsgi.Controller): volume, metadata = self._get_volume_and_metadata(context, volume_id) if id not in metadata: - msg = _("Metadata item was not found") - raise webob.exc.HTTPNotFound(explanation=msg) + raise exception.VolumeMetadataNotFound(volume_id=volume_id, + metadata_key=id) - try: - self.volume_api.delete_volume_metadata( - context, - volume, - id, - meta_type=common.METADATA_TYPES.user) - except exception.VolumeNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + self.volume_api.delete_volume_metadata( + context, + volume, + id, + meta_type=common.METADATA_TYPES.user) return webob.Response(status_int=200) diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py index aa5cde494..ecbd4e8b0 100644 --- a/cinder/api/v2/volumes.py +++ b/cinder/api/v2/volumes.py @@ -54,11 +54,9 @@ class VolumeController(wsgi.Controller): """Return data about the given volume.""" context = req.environ['cinder.context'] - try: - vol = self.volume_api.get(context, id, viewable_admin_meta=True) - req.cache_db_volume(vol) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + vol = self.volume_api.get(context, id, viewable_admin_meta=True) + req.cache_db_volume(vol) utils.add_visible_admin_metadata(vol) @@ -70,13 +68,11 @@ class VolumeController(wsgi.Controller): cascade = utils.get_bool_param('cascade', req.params) - LOG.info(_LI("Delete volume with id: %s"), id, context=context) + LOG.info(_LI("Delete volume with id: %s"), id) - try: - volume = self.volume_api.get(context, id) - self.volume_api.delete(context, volume, cascade=cascade) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume, cascade=cascade) return webob.Response(status_int=202) def index(self, req): @@ -109,8 +105,7 @@ class VolumeController(wsgi.Controller): sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: - filters['display_name'] = filters['name'] - del filters['name'] + filters['display_name'] = filters.pop('name') self.volume_api.check_volume_filters(filters) volumes = self.volume_api.get_all(context, marker, limit, @@ -196,68 +191,57 @@ class VolumeController(wsgi.Controller): volume['display_description'] = volume.pop('description') if 'image_id' in volume: - volume['imageRef'] = volume.get('image_id') - del volume['image_id'] + volume['imageRef'] = volume.pop('image_id') req_volume_type = volume.get('volume_type', None) if req_volume_type: - try: - if not uuidutils.is_uuid_like(req_volume_type): - kwargs['volume_type'] = \ - volume_types.get_volume_type_by_name( - context, req_volume_type) - else: - kwargs['volume_type'] = volume_types.get_volume_type( + # Not found exception will be handled at the wsgi level + if not uuidutils.is_uuid_like(req_volume_type): + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( context, req_volume_type) - except exception.VolumeTypeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + else: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: - try: - kwargs['snapshot'] = self.volume_api.get_snapshot(context, - snapshot_id) - except exception.SnapshotNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: - try: - kwargs['source_volume'] = \ - self.volume_api.get_volume(context, - source_volid) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + kwargs['source_volume'] = \ + self.volume_api.get_volume(context, + source_volid) else: kwargs['source_volume'] = None source_replica = volume.get('source_replica') if source_replica is not None: - try: - src_vol = self.volume_api.get_volume(context, - source_replica) - if src_vol['replication_status'] == 'disabled': - explanation = _('source volume id:%s is not' - ' replicated') % source_replica - raise exc.HTTPBadRequest(explanation=explanation) - kwargs['source_replica'] = src_vol - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + src_vol = self.volume_api.get_volume(context, + source_replica) + if src_vol['replication_status'] == 'disabled': + explanation = _('source volume id:%s is not' + ' replicated') % source_replica + raise exc.HTTPBadRequest(explanation=explanation) + kwargs['source_replica'] = src_vol else: kwargs['source_replica'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: - try: - kwargs['consistencygroup'] = \ - self.consistencygroup_api.get(context, - consistencygroup_id) - except exception.ConsistencyGroupNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + kwargs['consistencygroup'] = \ + self.consistencygroup_api.get(context, + consistencygroup_id) else: kwargs['consistencygroup'] = None @@ -269,7 +253,7 @@ class VolumeController(wsgi.Controller): elif size is None and kwargs['source_replica'] is not None: size = kwargs['source_replica']['size'] - LOG.info(_LI("Create volume of %s GB"), size, context=context) + LOG.info(_LI("Create volume of %s GB"), size) if self.ext_mgr.is_loaded('os-image-create'): image_ref = volume.get('imageRef') @@ -279,8 +263,7 @@ class VolumeController(wsgi.Controller): kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) - multiattach = volume.get('multiattach', False) - kwargs['multiattach'] = multiattach + kwargs['multiattach'] = utils.get_bool_param('multiattach', volume) new_volume = self.volume_api.create(context, size, @@ -334,15 +317,12 @@ class VolumeController(wsgi.Controller): if 'description' in update_dict: update_dict['display_description'] = update_dict.pop('description') + # Not found and Invalid exceptions will be handled at the wsgi level try: volume = self.volume_api.get(context, id, viewable_admin_meta=True) volume_utils.notify_about_volume_usage(context, volume, 'update.start') self.volume_api.update(context, volume, update_dict) - except exception.VolumeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.InvalidVolumeMetadata as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) diff --git a/cinder/api/v3/backups.py b/cinder/api/v3/backups.py new file mode 100644 index 000000000..7db051217 --- /dev/null +++ b/cinder/api/v3/backups.py @@ -0,0 +1,56 @@ +# Copyright (c) 2016 Intel, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The backups V3 api.""" + +from webob import exc + +from cinder.api.contrib import backups as backups_v2 +from cinder.api.openstack import wsgi +from cinder.i18n import _ + +BACKUP_UPDATE_MICRO_VERSION = '3.9' + + +class BackupsController(backups_v2.BackupsController): + """The backups API controller for the Openstack API V3.""" + + @wsgi.Controller.api_version(BACKUP_UPDATE_MICRO_VERSION) + def update(self, req, id, body): + """Update a backup.""" + context = req.environ['cinder.context'] + self.assert_valid_body(body, 'backup') + + backup_update = body['backup'] + + self.validate_name_and_description(backup_update) + update_dict = {} + if 'name' in backup_update: + update_dict['display_name'] = backup_update.pop('name') + if 'description' in backup_update: + update_dict['display_description'] = ( + backup_update.pop('description')) + # Check no unsupported fields. + if backup_update: + msg = _("Unsupported fields %s.") % (", ".join(backup_update)) + raise exc.HTTPBadRequest(explanation=msg) + + new_backup = self.backup_api.update(context, id, update_dict) + + return self._view_builder.summary(req, new_backup) + + +def create_resource(): + return wsgi.Resource(BackupsController()) diff --git a/cinder/api/v3/clusters.py b/cinder/api/v3/clusters.py new file mode 100644 index 000000000..0048cf25a --- /dev/null +++ b/cinder/api/v3/clusters.py @@ -0,0 +1,132 @@ +# Copyright (c) 2016 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.openstack import wsgi +from cinder.api.v3.views import clusters as clusters_view +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder import utils + + +CLUSTER_MICRO_VERSION = '3.7' + + +class ClusterController(wsgi.Controller): + allowed_list_keys = {'name', 'binary', 'is_up', 'disabled', 'num_hosts', + 'num_down_hosts', 'binary'} + + policy_checker = wsgi.Controller.get_policy_checker('clusters') + + @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) + def show(self, req, id, binary='cinder-volume'): + """Return data for a given cluster name with optional binary.""" + # Let the wsgi middleware convert NotAuthorized exceptions + context = self.policy_checker(req, 'get') + # Let the wsgi middleware convert NotFound exceptions + cluster = objects.Cluster.get_by_id(context, None, binary=binary, + name=id, services_summary=True) + return clusters_view.ViewBuilder.detail(cluster) + + @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) + def index(self, req): + """Return a non detailed list of all existing clusters. + + Filter by is_up, disabled, num_hosts, and num_down_hosts. + """ + return self._get_clusters(req, detail=False) + + @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) + def detail(self, req): + """Return a detailed list of all existing clusters. + + Filter by is_up, disabled, num_hosts, and num_down_hosts. + """ + return self._get_clusters(req, detail=True) + + def _get_clusters(self, req, detail): + # Let the wsgi middleware convert NotAuthorized exceptions + context = self.policy_checker(req, 'get_all') + + filters = dict(req.GET) + allowed = self.allowed_list_keys + + # Check filters are valid + if not allowed.issuperset(filters): + invalid_keys = set(filters).difference(allowed) + msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys) + raise exception.InvalidInput(reason=msg) + + # Check boolean values + for bool_key in ('disabled', 'is_up'): + if bool_key in filters: + filters[bool_key] = utils.get_bool_param(bool_key, req.GET) + + # For detailed view we need the services summary information + filters['services_summary'] = detail + + clusters = objects.ClusterList.get_all(context, **filters) + return clusters_view.ViewBuilder.list(clusters, detail) + + @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) + def update(self, req, id, body): + """Enable/Disable scheduling for a cluster.""" + # NOTE(geguileo): This method tries to be consistent with services + # update endpoint API. + + # Let the wsgi middleware convert NotAuthorized exceptions + context = self.policy_checker(req, 'update') + + if id not in ('enable', 'disable'): + raise exception.NotFound(message=_("Unknown action")) + + disabled = id != 'enable' + disabled_reason = self._get_disabled_reason(body) if disabled else None + + if not disabled and disabled_reason: + msg = _("Unexpected 'disabled_reason' found on enable request.") + raise exception.InvalidInput(reason=msg) + + name = body.get('name') + if not name: + raise exception.MissingRequired(element='name') + + binary = body.get('binary', 'cinder-volume') + + # Let wsgi handle NotFound exception + cluster = objects.Cluster.get_by_id(context, None, binary=binary, + name=name) + cluster.disabled = disabled + cluster.disabled_reason = disabled_reason + cluster.save() + + # We return summary data plus the disabled reason + ret_val = clusters_view.ViewBuilder.summary(cluster) + ret_val['cluster']['disabled_reason'] = disabled_reason + + return ret_val + + def _get_disabled_reason(self, body): + reason = body.get('disabled_reason') + if reason: + # Let wsgi handle InvalidInput exception + reason = reason.strip() + utils.check_string_length(reason, 'Disabled reason', min_length=1, + max_length=255) + return reason + + +def create_resource(): + return wsgi.Resource(ClusterController()) diff --git a/cinder/api/v3/group_snapshots.py b/cinder/api/v3/group_snapshots.py new file mode 100644 index 000000000..0da9dd3ec --- /dev/null +++ b/cinder/api/v3/group_snapshots.py @@ -0,0 +1,146 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The group_snapshots api.""" + +from oslo_log import log as logging +import six +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v3.views import group_snapshots as group_snapshot_views +from cinder import exception +from cinder import group as group_api +from cinder.i18n import _, _LI + +LOG = logging.getLogger(__name__) + +GROUP_SNAPSHOT_API_VERSION = '3.14' + + +class GroupSnapshotsController(wsgi.Controller): + """The group_snapshots API controller for the OpenStack API.""" + + _view_builder_class = group_snapshot_views.ViewBuilder + + def __init__(self): + self.group_snapshot_api = group_api.API() + super(GroupSnapshotsController, self).__init__() + + @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) + def show(self, req, id): + """Return data about the given group_snapshot.""" + LOG.debug('show called for member %s', id) + context = req.environ['cinder.context'] + + group_snapshot = self.group_snapshot_api.get_group_snapshot( + context, + group_snapshot_id=id) + + return self._view_builder.detail(req, group_snapshot) + + @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) + def delete(self, req, id): + """Delete a group_snapshot.""" + LOG.debug('delete called for member %s', id) + context = req.environ['cinder.context'] + + LOG.info(_LI('Delete group_snapshot with id: %s'), id, context=context) + + try: + group_snapshot = self.group_snapshot_api.get_group_snapshot( + context, + group_snapshot_id=id) + self.group_snapshot_api.delete_group_snapshot(context, + group_snapshot) + except exception.InvalidGroupSnapshot as e: + raise exc.HTTPBadRequest(explanation=six.text_type(e)) + except exception.GroupSnapshotNotFound: + # Not found exception will be handled at the wsgi level + raise + except Exception: + msg = _("Error occurred when deleting group snapshot %s.") % id + LOG.exception(msg) + raise exc.HTTPBadRequest(explanation=msg) + + return webob.Response(status_int=202) + + @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) + def index(self, req): + """Returns a summary list of group_snapshots.""" + return self._get_group_snapshots(req, is_detail=False) + + @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) + def detail(self, req): + """Returns a detailed list of group_snapshots.""" + return self._get_group_snapshots(req, is_detail=True) + + def _get_group_snapshots(self, req, is_detail): + """Returns a list of group_snapshots through view builder.""" + context = req.environ['cinder.context'] + group_snapshots = self.group_snapshot_api.get_all_group_snapshots( + context) + limited_list = common.limited(group_snapshots, req) + + if is_detail: + group_snapshots = self._view_builder.detail_list(req, limited_list) + else: + group_snapshots = self._view_builder.summary_list(req, + limited_list) + return group_snapshots + + @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) + @wsgi.response(202) + def create(self, req, body): + """Create a new group_snapshot.""" + LOG.debug('Creating new group_snapshot %s', body) + self.assert_valid_body(body, 'group_snapshot') + + context = req.environ['cinder.context'] + group_snapshot = body['group_snapshot'] + self.validate_name_and_description(group_snapshot) + + try: + group_id = group_snapshot['group_id'] + except KeyError: + msg = _("'group_id' must be specified") + raise exc.HTTPBadRequest(explanation=msg) + + group = self.group_snapshot_api.get(context, group_id) + + name = group_snapshot.get('name', None) + description = group_snapshot.get('description', None) + + LOG.info(_LI("Creating group_snapshot %(name)s."), + {'name': name}, + context=context) + + try: + new_group_snapshot = self.group_snapshot_api.create_group_snapshot( + context, group, name, description) + except (exception.InvalidGroup, + exception.InvalidGroupSnapshot, + exception.InvalidVolume) as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + retval = self._view_builder.summary(req, new_group_snapshot) + + return retval + + +def create_resource(): + return wsgi.Resource(GroupSnapshotsController()) diff --git a/cinder/api/v3/group_specs.py b/cinder/api/v3/group_specs.py new file mode 100644 index 000000000..5fdbbc592 --- /dev/null +++ b/cinder/api/v3/group_specs.py @@ -0,0 +1,154 @@ +# Copyright (c) 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The group types specs controller""" + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import db +from cinder import exception +from cinder.i18n import _ +from cinder import policy +from cinder import rpc +from cinder import utils +from cinder.volume import group_types + + +class GroupTypeSpecsController(wsgi.Controller): + """The group type specs API controller for the OpenStack API.""" + + def _check_policy(self, context): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + policy.enforce(context, 'group:group_types_specs', target) + + def _get_group_specs(self, context, group_type_id): + group_specs = db.group_type_specs_get(context, group_type_id) + specs_dict = {} + for key, value in group_specs.items(): + specs_dict[key] = value + return dict(group_specs=specs_dict) + + def _check_type(self, context, group_type_id): + try: + group_types.get_group_type(context, group_type_id) + except exception.GroupTypeNotFound as ex: + raise webob.exc.HTTPNotFound(explanation=ex.msg) + + @wsgi.Controller.api_version('3.11') + def index(self, req, group_type_id): + """Returns the list of group specs for a given group type.""" + context = req.environ['cinder.context'] + self._check_policy(context) + self._check_type(context, group_type_id) + return self._get_group_specs(context, group_type_id) + + @wsgi.Controller.api_version('3.11') + @wsgi.response(202) + def create(self, req, group_type_id, body=None): + context = req.environ['cinder.context'] + self._check_policy(context) + self.assert_valid_body(body, 'group_specs') + + self._check_type(context, group_type_id) + specs = body['group_specs'] + self._check_key_names(specs.keys()) + utils.validate_dictionary_string_length(specs) + + db.group_type_specs_update_or_create(context, + group_type_id, + specs) + notifier_info = dict(type_id=group_type_id, specs=specs) + notifier = rpc.get_notifier('groupTypeSpecs') + notifier.info(context, 'group_type_specs.create', + notifier_info) + return body + + @wsgi.Controller.api_version('3.11') + def update(self, req, group_type_id, id, body=None): + context = req.environ['cinder.context'] + self._check_policy(context) + + if not body: + expl = _('Request body empty') + raise webob.exc.HTTPBadRequest(explanation=expl) + self._check_type(context, group_type_id) + if id not in body: + expl = _('Request body and URI mismatch') + raise webob.exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise webob.exc.HTTPBadRequest(explanation=expl) + self._check_key_names(body.keys()) + utils.validate_dictionary_string_length(body) + + db.group_type_specs_update_or_create(context, + group_type_id, + body) + notifier_info = dict(type_id=group_type_id, id=id) + notifier = rpc.get_notifier('groupTypeSpecs') + notifier.info(context, + 'group_type_specs.update', + notifier_info) + return body + + @wsgi.Controller.api_version('3.11') + def show(self, req, group_type_id, id): + """Return a single extra spec item.""" + context = req.environ['cinder.context'] + self._check_policy(context) + + self._check_type(context, group_type_id) + specs = self._get_group_specs(context, group_type_id) + if id in specs['group_specs']: + return {id: specs['group_specs'][id]} + else: + msg = _("Group Type %(type_id)s has no extra spec with key " + "%(id)s.") % ({'type_id': group_type_id, 'id': id}) + raise webob.exc.HTTPNotFound(explanation=msg) + + @wsgi.Controller.api_version('3.11') + def delete(self, req, group_type_id, id): + """Deletes an existing group spec.""" + context = req.environ['cinder.context'] + self._check_policy(context) + + self._check_type(context, group_type_id) + + try: + db.group_type_specs_delete(context, group_type_id, id) + except exception.GroupTypeSpecsNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + notifier_info = dict(type_id=group_type_id, id=id) + notifier = rpc.get_notifier('groupTypeSpecs') + notifier.info(context, + 'group_type_specs.delete', + notifier_info) + return webob.Response(status_int=202) + + def _check_key_names(self, keys): + if not common.validate_key_names(keys): + expl = _('Key names can only contain alphanumeric characters, ' + 'underscores, periods, colons and hyphens.') + + raise webob.exc.HTTPBadRequest(explanation=expl) + + +def create_resource(): + return wsgi.Resource(GroupTypeSpecsController()) diff --git a/cinder/api/v3/group_types.py b/cinder/api/v3/group_types.py new file mode 100644 index 000000000..dd6d3be5f --- /dev/null +++ b/cinder/api/v3/group_types.py @@ -0,0 +1,269 @@ +# Copyright (c) 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The group type & group type specs controller.""" + +from oslo_utils import strutils +import six +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v3.views import group_types as views_types +from cinder import exception +from cinder.i18n import _ +from cinder import policy +from cinder import rpc +from cinder import utils +from cinder.volume import group_types + + +class GroupTypesController(wsgi.Controller): + """The group types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + def _check_policy(self, context): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + policy.enforce(context, 'group:group_types_manage', target) + + def _notify_group_type_error(self, context, method, err, + group_type=None, id=None, name=None): + payload = dict( + group_types=group_type, name=name, id=id, error_message=err) + rpc.get_notifier('groupType').error(context, method, payload) + + def _notify_group_type_info(self, context, method, group_type): + payload = dict(group_types=group_type) + rpc.get_notifier('groupType').info(context, method, payload) + + @wsgi.Controller.api_version('3.11') + @wsgi.response(202) + def create(self, req, body): + """Creates a new group type.""" + context = req.environ['cinder.context'] + self._check_policy(context) + + self.assert_valid_body(body, 'group_type') + + grp_type = body['group_type'] + name = grp_type.get('name', None) + description = grp_type.get('description') + specs = grp_type.get('group_specs', {}) + is_public = grp_type.get('is_public', True) + + if name is None or len(name.strip()) == 0: + msg = _("Group type name can not be empty.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + utils.check_string_length(name, 'Type name', + min_length=1, max_length=255) + + if description is not None: + utils.check_string_length(description, 'Type description', + min_length=0, max_length=255) + + try: + group_types.create(context, + name, + specs, + is_public, + description=description) + grp_type = group_types.get_group_type_by_name(context, name) + req.cache_resource(grp_type, name='group_types') + self._notify_group_type_info( + context, 'group_type.create', grp_type) + + except exception.GroupTypeExists as err: + self._notify_group_type_error( + context, 'group_type.create', err, group_type=grp_type) + raise webob.exc.HTTPConflict(explanation=six.text_type(err)) + except exception.GroupTypeNotFoundByName as err: + self._notify_group_type_error( + context, 'group_type.create', err, name=name) + raise webob.exc.HTTPNotFound(explanation=err.msg) + + return self._view_builder.show(req, grp_type) + + @wsgi.Controller.api_version('3.11') + def update(self, req, id, body): + # Update description for a given group type. + context = req.environ['cinder.context'] + self._check_policy(context) + + self.assert_valid_body(body, 'group_type') + + grp_type = body['group_type'] + description = grp_type.get('description') + name = grp_type.get('name') + is_public = grp_type.get('is_public') + + # Name and description can not be both None. + # If name specified, name can not be empty. + if name and len(name.strip()) == 0: + msg = _("Group type name can not be empty.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if name is None and description is None and is_public is None: + msg = _("Specify group type name, description or " + "a combination thereof.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if is_public is not None and not utils.is_valid_boolstr(is_public): + msg = _("Invalid value '%s' for is_public. Accepted values: " + "True or False.") % is_public + raise webob.exc.HTTPBadRequest(explanation=msg) + + if name: + utils.check_string_length(name, 'Type name', + min_length=1, max_length=255) + + if description is not None: + utils.check_string_length(description, 'Type description', + min_length=0, max_length=255) + + try: + group_types.update(context, id, name, description, + is_public=is_public) + # Get the updated + grp_type = group_types.get_group_type(context, id) + req.cache_resource(grp_type, name='group_types') + self._notify_group_type_info( + context, 'group_type.update', grp_type) + + except exception.GroupTypeNotFound as err: + self._notify_group_type_error( + context, 'group_type.update', err, id=id) + raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) + except exception.GroupTypeExists as err: + self._notify_group_type_error( + context, 'group_type.update', err, group_type=grp_type) + raise webob.exc.HTTPConflict(explanation=six.text_type(err)) + except exception.GroupTypeUpdateFailed as err: + self._notify_group_type_error( + context, 'group_type.update', err, group_type=grp_type) + raise webob.exc.HTTPInternalServerError( + explanation=six.text_type(err)) + + return self._view_builder.show(req, grp_type) + + @wsgi.Controller.api_version('3.11') + def delete(self, req, id): + """Deletes an existing group type.""" + context = req.environ['cinder.context'] + self._check_policy(context) + + try: + grp_type = group_types.get_group_type(context, id) + group_types.destroy(context, grp_type['id']) + self._notify_group_type_info( + context, 'group_type.delete', grp_type) + except exception.GroupTypeInUse as err: + self._notify_group_type_error( + context, 'group_type.delete', err, group_type=grp_type) + msg = _('Target group type is still in use.') + raise webob.exc.HTTPBadRequest(explanation=msg) + except exception.GroupTypeNotFound as err: + self._notify_group_type_error( + context, 'group_type.delete', err, id=id) + raise webob.exc.HTTPNotFound(explanation=err.msg) + + return webob.Response(status_int=202) + + @wsgi.Controller.api_version('3.11') + def index(self, req): + """Returns the list of group types.""" + limited_types = self._get_group_types(req) + req.cache_resource(limited_types, name='group_types') + return self._view_builder.index(req, limited_types) + + @wsgi.Controller.api_version('3.11') + def show(self, req, id): + """Return a single group type item.""" + context = req.environ['cinder.context'] + + # get default group type + if id is not None and id == 'default': + grp_type = group_types.get_default_group_type() + if not grp_type: + msg = _("Default group type can not be found.") + raise exc.HTTPNotFound(explanation=msg) + req.cache_resource(grp_type, name='group_types') + else: + try: + grp_type = group_types.get_group_type(context, id) + req.cache_resource(grp_type, name='group_types') + except exception.GroupTypeNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + + return self._view_builder.show(req, grp_type) + + def _parse_is_public(self, is_public): + """Parse is_public into something usable. + + * True: List public group types only + * False: List private group types only + * None: List both public and private group types + """ + + if is_public is None: + # preserve default value of showing only public types + return True + elif utils.is_none_string(is_public): + return None + else: + try: + return strutils.bool_from_string(is_public, strict=True) + except ValueError: + msg = _('Invalid is_public filter [%s]') % is_public + raise exc.HTTPBadRequest(explanation=msg) + + def _get_group_types(self, req): + """Helper function that returns a list of type dicts.""" + params = req.params.copy() + marker, limit, offset = common.get_pagination_params(params) + sort_keys, sort_dirs = common.get_sort_params(params) + filters = {} + context = req.environ['cinder.context'] + if context.is_admin: + # Only admin has query access to all group types + filters['is_public'] = self._parse_is_public( + req.params.get('is_public', None)) + else: + filters['is_public'] = True + utils.remove_invalid_filter_options(context, + filters, + self._get_grp_type_filter_options() + ) + limited_types = group_types.get_all_group_types(context, + filters=filters, + marker=marker, + limit=limit, + sort_keys=sort_keys, + sort_dirs=sort_dirs, + offset=offset, + list_result=True) + return limited_types + + def _get_grp_type_filter_options(self): + """Return group type search options allowed by non-admin.""" + return ['is_public'] + + +def create_resource(): + return wsgi.Resource(GroupTypesController()) diff --git a/cinder/api/v3/groups.py b/cinder/api/v3/groups.py new file mode 100644 index 000000000..95f944543 --- /dev/null +++ b/cinder/api/v3/groups.py @@ -0,0 +1,290 @@ +# Copyright (c) 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The groups controller.""" + +from oslo_log import log as logging +from oslo_utils import strutils +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v3.views import groups as views_groups +from cinder import exception +from cinder import group as group_api +from cinder.i18n import _, _LI + +LOG = logging.getLogger(__name__) + +GROUP_API_VERSION = '3.13' +GROUP_CREATE_FROM_SRC_API_VERSION = '3.14' + + +class GroupsController(wsgi.Controller): + """The groups API controller for the OpenStack API.""" + + _view_builder_class = views_groups.ViewBuilder + + def __init__(self): + self.group_api = group_api.API() + super(GroupsController, self).__init__() + + @wsgi.Controller.api_version(GROUP_API_VERSION) + def show(self, req, id): + """Return data about the given group.""" + LOG.debug('show called for member %s', id) + context = req.environ['cinder.context'] + + # Not found exception will be handled at the wsgi level + group = self.group_api.get( + context, + group_id=id) + + return self._view_builder.detail(req, group) + + @wsgi.Controller.api_version(GROUP_API_VERSION) + @wsgi.action("delete") + def delete_group(self, req, id, body): + return self._delete(req, id, body) + + def _delete(self, req, id, body): + """Delete a group.""" + LOG.debug('delete called for group %s', id) + context = req.environ['cinder.context'] + del_vol = False + if body: + if not self.is_valid_body(body, 'delete'): + msg = _("Missing required element 'delete' in " + "request body.") + raise exc.HTTPBadRequest(explanation=msg) + + grp_body = body['delete'] + try: + del_vol = strutils.bool_from_string( + grp_body.get('delete-volumes', False), + strict=True) + except ValueError: + msg = (_("Invalid value '%s' for delete-volumes flag.") + % del_vol) + raise exc.HTTPBadRequest(explanation=msg) + + LOG.info(_LI('Delete group with id: %s'), id, + context=context) + + try: + group = self.group_api.get(context, id) + self.group_api.delete(context, group, del_vol) + except exception.GroupNotFound: + # Not found exception will be handled at the wsgi level + raise + except exception.InvalidGroup as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + return webob.Response(status_int=202) + + @wsgi.Controller.api_version(GROUP_API_VERSION) + def index(self, req): + """Returns a summary list of groups.""" + return self._get_groups(req, is_detail=False) + + @wsgi.Controller.api_version(GROUP_API_VERSION) + def detail(self, req): + """Returns a detailed list of groups.""" + return self._get_groups(req, is_detail=True) + + def _get_groups(self, req, is_detail): + """Returns a list of groups through view builder.""" + context = req.environ['cinder.context'] + filters = req.params.copy() + marker, limit, offset = common.get_pagination_params(filters) + sort_keys, sort_dirs = common.get_sort_params(filters) + + groups = self.group_api.get_all( + context, filters=filters, marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) + + if is_detail: + groups = self._view_builder.detail_list( + req, groups) + else: + groups = self._view_builder.summary_list( + req, groups) + return groups + + @wsgi.Controller.api_version(GROUP_API_VERSION) + @wsgi.response(202) + def create(self, req, body): + """Create a new group.""" + LOG.debug('Creating new group %s', body) + self.assert_valid_body(body, 'group') + + context = req.environ['cinder.context'] + group = body['group'] + self.validate_name_and_description(group) + name = group.get('name') + description = group.get('description') + group_type = group.get('group_type') + if not group_type: + msg = _("group_type must be provided to create " + "group %(name)s.") % {'name': name} + raise exc.HTTPBadRequest(explanation=msg) + volume_types = group.get('volume_types') + if not volume_types: + msg = _("volume_types must be provided to create " + "group %(name)s.") % {'name': name} + raise exc.HTTPBadRequest(explanation=msg) + availability_zone = group.get('availability_zone') + + LOG.info(_LI("Creating group %(name)s."), + {'name': name}, + context=context) + + try: + new_group = self.group_api.create( + context, name, description, group_type, volume_types, + availability_zone=availability_zone) + except (exception.Invalid, exception.ObjectActionError) as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except exception.NotFound: + # Not found exception will be handled at the wsgi level + raise + + retval = self._view_builder.summary(req, new_group) + return retval + + @wsgi.Controller.api_version(GROUP_CREATE_FROM_SRC_API_VERSION) + @wsgi.action("create-from-src") + @wsgi.response(202) + def create_from_src(self, req, body): + """Create a new group from a source. + + The source can be a group snapshot or a group. Note that + this does not require group_type and volume_types as the + "create" API above. + """ + LOG.debug('Creating new group %s.', body) + self.assert_valid_body(body, 'create-from-src') + + context = req.environ['cinder.context'] + group = body['create-from-src'] + self.validate_name_and_description(group) + name = group.get('name', None) + description = group.get('description', None) + group_snapshot_id = group.get('group_snapshot_id', None) + source_group_id = group.get('source_group_id', None) + if not group_snapshot_id and not source_group_id: + msg = (_("Either 'group_snapshot_id' or 'source_group_id' must be " + "provided to create group %(name)s from source.") + % {'name': name}) + raise exc.HTTPBadRequest(explanation=msg) + + if group_snapshot_id and source_group_id: + msg = _("Cannot provide both 'group_snapshot_id' and " + "'source_group_id' to create group %(name)s from " + "source.") % {'name': name} + raise exc.HTTPBadRequest(explanation=msg) + + if group_snapshot_id: + LOG.info(_LI("Creating group %(name)s from group_snapshot " + "%(snap)s."), + {'name': name, 'snap': group_snapshot_id}, + context=context) + elif source_group_id: + LOG.info(_LI("Creating group %(name)s from " + "source group %(source_group_id)s."), + {'name': name, 'source_group_id': source_group_id}, + context=context) + + try: + new_group = self.group_api.create_from_src( + context, name, description, group_snapshot_id, source_group_id) + except exception.InvalidGroup as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except (exception.GroupNotFound, exception.GroupSnapshotNotFound): + # Not found exception will be handled at the wsgi level + raise + except exception.CinderException as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + retval = self._view_builder.summary(req, new_group) + return retval + + @wsgi.Controller.api_version(GROUP_API_VERSION) + def update(self, req, id, body): + """Update the group. + + Expected format of the input parameter 'body': + + .. code-block:: json + + { + "group": + { + "name": "my_group", + "description": "My group", + "add_volumes": "volume-uuid-1,volume-uuid-2,...", + "remove_volumes": "volume-uuid-8,volume-uuid-9,..." + } + } + + """ + LOG.debug('Update called for group %s.', id) + + if not body: + msg = _("Missing request body.") + raise exc.HTTPBadRequest(explanation=msg) + + self.assert_valid_body(body, 'group') + context = req.environ['cinder.context'] + + group = body.get('group') + self.validate_name_and_description(group) + name = group.get('name') + description = group.get('description') + add_volumes = group.get('add_volumes') + remove_volumes = group.get('remove_volumes') + + # Allow name or description to be changed to an empty string ''. + if (name is None and description is None and not add_volumes + and not remove_volumes): + msg = _("Name, description, add_volumes, and remove_volumes " + "can not be all empty in the request body.") + raise exc.HTTPBadRequest(explanation=msg) + + LOG.info(_LI("Updating group %(id)s with name %(name)s " + "description: %(description)s add_volumes: " + "%(add_volumes)s remove_volumes: %(remove_volumes)s."), + {'id': id, 'name': name, + 'description': description, + 'add_volumes': add_volumes, + 'remove_volumes': remove_volumes}, + context=context) + + try: + group = self.group_api.get(context, id) + self.group_api.update( + context, group, name, description, + add_volumes, remove_volumes) + except exception.GroupNotFound: + # Not found exception will be handled at the wsgi level + raise + except exception.InvalidGroup as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + return webob.Response(status_int=202) + + +def create_resource(): + return wsgi.Resource(GroupsController()) diff --git a/cinder/api/v3/messages.py b/cinder/api/v3/messages.py index a7f332c76..00b43272b 100644 --- a/cinder/api/v3/messages.py +++ b/cinder/api/v3/messages.py @@ -13,19 +13,15 @@ """The messages API.""" -from oslo_config import cfg import webob -from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.v3.views import messages as messages_view -from cinder import exception from cinder.message import api as message_api from cinder.message import defined_messages import cinder.policy -CONF = cfg.CONF MESSAGES_BASE_MICRO_VERSION = '3.3' @@ -56,10 +52,8 @@ class MessagesController(wsgi.Controller): """Return the given message.""" context = req.environ['cinder.context'] - try: - message = self.message_api.get(context, id) - except exception.MessageNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + message = self.message_api.get(context, id) check_policy(context, 'get', message) @@ -74,12 +68,10 @@ class MessagesController(wsgi.Controller): """Delete a message.""" context = req.environ['cinder.context'] - try: - message = self.message_api.get(context, id) - check_policy(context, 'delete', message) - self.message_api.delete(context, message) - except exception.MessageNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) + # Not found exception will be handled at the wsgi level + message = self.message_api.get(context, id) + check_policy(context, 'delete', message) + self.message_api.delete(context, message) return webob.Response(status_int=204) diff --git a/cinder/api/v3/router.py b/cinder/api/v3/router.py index ef04bd0bd..5c3e916d2 100644 --- a/cinder/api/v3/router.py +++ b/cinder/api/v3/router.py @@ -23,11 +23,19 @@ from cinder.api import extensions import cinder.api.openstack from cinder.api.v2 import limits from cinder.api.v2 import snapshot_metadata -from cinder.api.v2 import snapshots from cinder.api.v2 import types -from cinder.api.v2 import volume_metadata +from cinder.api.v3 import backups +from cinder.api.v3 import clusters from cinder.api.v3 import consistencygroups +from cinder.api.v3 import group_snapshots +from cinder.api.v3 import group_specs +from cinder.api.v3 import group_types +from cinder.api.v3 import groups from cinder.api.v3 import messages +from cinder.api.v3 import snapshot_manage +from cinder.api.v3 import snapshots +from cinder.api.v3 import volume_manage +from cinder.api.v3 import volume_metadata from cinder.api.v3 import volumes from cinder.api import versions @@ -47,7 +55,7 @@ class APIRouter(cinder.api.openstack.APIRouter): self.resources['volumes'] = volumes.create_resource(ext_mgr) mapper.resource("volume", "volumes", controller=self.resources['volumes'], - collection={'detail': 'GET'}, + collection={'detail': 'GET', 'summary': 'GET'}, member={'action': 'POST'}) self.resources['messages'] = messages.create_resource(ext_mgr) @@ -55,11 +63,49 @@ class APIRouter(cinder.api.openstack.APIRouter): controller=self.resources['messages'], collection={'detail': 'GET'}) + self.resources['clusters'] = clusters.create_resource() + mapper.resource('cluster', 'clusters', + controller=self.resources['clusters'], + collection={'detail': 'GET'}) + self.resources['types'] = types.create_resource() mapper.resource("type", "types", controller=self.resources['types'], member={'action': 'POST'}) + self.resources['group_types'] = group_types.create_resource() + mapper.resource("group_type", "group_types", + controller=self.resources['group_types'], + member={'action': 'POST'}) + + self.resources['group_specs'] = group_specs.create_resource() + mapper.resource("group_spec", "group_specs", + controller=self.resources['group_specs'], + parent_resource=dict(member_name='group_type', + collection_name='group_types')) + + self.resources['groups'] = groups.create_resource() + mapper.resource("group", "groups", + controller=self.resources['groups'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + mapper.connect("groups", + "/{project_id}/groups/{id}/action", + controller=self.resources["groups"], + action="action", + conditions={"action": ["POST"]}) + mapper.connect("groups/action", + "/{project_id}/groups/action", + controller=self.resources["groups"], + action="action", + conditions={"action": ["POST"]}) + + self.resources['group_snapshots'] = (group_snapshots.create_resource()) + mapper.resource("group_snapshot", "group_snapshots", + controller=self.resources['group_snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], @@ -106,3 +152,20 @@ class APIRouter(cinder.api.openstack.APIRouter): controller=self.resources['consistencygroups'], collection={'detail': 'GET'}, member={'action': 'POST'}) + + self.resources['manageable_volumes'] = volume_manage.create_resource() + mapper.resource("manageable_volume", "manageable_volumes", + controller=self.resources['manageable_volumes'], + collection={'detail': 'GET'}) + + self.resources['manageable_snapshots'] = \ + snapshot_manage.create_resource() + mapper.resource("manageable_snapshot", "manageable_snapshots", + controller=self.resources['manageable_snapshots'], + collection={'detail': 'GET'}) + + self.resources['backups'] = ( + backups.create_resource()) + mapper.resource("backup", "backups", + controller=self.resources['backups'], + collection={'detail': 'GET'}) diff --git a/cinder/api/v3/snapshot_manage.py b/cinder/api/v3/snapshot_manage.py new file mode 100644 index 000000000..4cd566712 --- /dev/null +++ b/cinder/api/v3/snapshot_manage.py @@ -0,0 +1,45 @@ +# Copyright (c) 2016 Stratoscale, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.contrib import snapshot_manage as snapshot_manage_v2 +from cinder.api.openstack import wsgi +from cinder import exception + + +class SnapshotManageController(snapshot_manage_v2.SnapshotManageController): + def _ensure_min_version(self, req, allowed_version): + version = req.api_version_request + if not version.matches(allowed_version, None): + raise exception.VersionNotFoundForAPIMethod(version=version) + + @wsgi.response(202) + def create(self, req, body): + self._ensure_min_version(req, "3.8") + return super(SnapshotManageController, self).create(req, body) + + @wsgi.extends + def index(self, req): + """Returns a summary list of snapshots available to manage.""" + self._ensure_min_version(req, "3.8") + return super(SnapshotManageController, self).index(req) + + @wsgi.extends + def detail(self, req): + """Returns a detailed list of snapshots available to manage.""" + self._ensure_min_version(req, "3.8") + return super(SnapshotManageController, self).detail(req) + + +def create_resource(): + return wsgi.Resource(SnapshotManageController()) diff --git a/cinder/tests/unit/keymgr/test_key_mgr.py b/cinder/api/v3/snapshots.py similarity index 55% rename from cinder/tests/unit/keymgr/test_key_mgr.py rename to cinder/api/v3/snapshots.py index 0c37ecd60..a46fcac41 100644 --- a/cinder/tests/unit/keymgr/test_key_mgr.py +++ b/cinder/api/v3/snapshots.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,21 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Test cases for the key manager. -""" +"""The volumes snapshots V3 api.""" -from cinder import test +from cinder.api.openstack import wsgi +from cinder.api.v2 import snapshots as snapshots_v2 +from cinder.api.v3.views import snapshots as snapshot_views -class KeyManagerTestCase(test.TestCase): - def __init__(self, *args, **kwargs): - super(KeyManagerTestCase, self).__init__(*args, **kwargs) +class SnapshotsController(snapshots_v2.SnapshotsController): + """The Snapshots API controller for the OpenStack API.""" - def _create_key_manager(self): - raise NotImplementedError() + _view_builder_class = snapshot_views.ViewBuilder - def setUp(self): - super(KeyManagerTestCase, self).setUp() - self.key_mgr = self._create_key_manager() +def create_resource(ext_mgr): + return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v3/views/clusters.py b/cinder/api/v3/views/clusters.py new file mode 100644 index 000000000..d4bbdfd1f --- /dev/null +++ b/cinder/api/v3/views/clusters.py @@ -0,0 +1,63 @@ +# Copyright (c) 2016 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import timeutils + + +class ViewBuilder(object): + """Map Cluster into dicts for API responses.""" + + _collection_name = 'clusters' + + @staticmethod + def _normalize(date): + if date: + return timeutils.normalize_time(date) + return '' + + @classmethod + def detail(cls, cluster, flat=False): + """Detailed view of a cluster.""" + result = cls.summary(cluster, flat=True) + result.update( + num_hosts=cluster.num_hosts, + num_down_hosts=cluster.num_down_hosts, + last_heartbeat=cls._normalize(cluster.last_heartbeat), + created_at=cls._normalize(cluster.created_at), + updated_at=cls._normalize(cluster.updated_at), + disabled_reason=cluster.disabled_reason + ) + + if flat: + return result + return {'cluster': result} + + @staticmethod + def summary(cluster, flat=False): + """Generic, non-detailed view of a cluster.""" + result = { + 'name': cluster.name, + 'binary': cluster.binary, + 'state': 'up' if cluster.is_up() else 'down', + 'status': 'disabled' if cluster.disabled else 'enabled', + } + if flat: + return result + return {'cluster': result} + + @classmethod + def list(cls, clusters, detail=False): + func = cls.detail if detail else cls.summary + return {'clusters': [func(n, flat=True) for n in clusters]} diff --git a/cinder/api/v3/views/group_snapshots.py b/cinder/api/v3/views/group_snapshots.py new file mode 100644 index 000000000..b3411fd05 --- /dev/null +++ b/cinder/api/v3/views/group_snapshots.py @@ -0,0 +1,64 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common + + +class ViewBuilder(common.ViewBuilder): + """Model group_snapshot API responses as a python dictionary.""" + + _collection_name = "group_snapshots" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, group_snapshots): + """Show a list of group_snapshots without many details.""" + return self._list_view(self.summary, request, group_snapshots) + + def detail_list(self, request, group_snapshots): + """Detailed view of a list of group_snapshots .""" + return self._list_view(self.detail, request, group_snapshots) + + def summary(self, request, group_snapshot): + """Generic, non-detailed view of a group_snapshot.""" + return { + 'group_snapshot': { + 'id': group_snapshot.id, + 'name': group_snapshot.name + } + } + + def detail(self, request, group_snapshot): + """Detailed view of a single group_snapshot.""" + return { + 'group_snapshot': { + 'id': group_snapshot.id, + 'group_id': group_snapshot.group_id, + 'status': group_snapshot.status, + 'created_at': group_snapshot.created_at, + 'name': group_snapshot.name, + 'description': group_snapshot.description + } + } + + def _list_view(self, func, request, group_snapshots): + """Provide a view for a list of group_snapshots.""" + group_snapshots_list = [func(request, group_snapshot)['group_snapshot'] + for group_snapshot in group_snapshots] + group_snapshots_dict = dict(group_snapshots=group_snapshots_list) + + return group_snapshots_dict diff --git a/cinder/api/v3/views/group_types.py b/cinder/api/v3/views/group_types.py new file mode 100644 index 000000000..6313d7712 --- /dev/null +++ b/cinder/api/v3/views/group_types.py @@ -0,0 +1,43 @@ +# Copyright 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common + + +class ViewBuilder(common.ViewBuilder): + + def show(self, request, group_type, brief=False): + """Trim away extraneous group type attributes.""" + context = request.environ['cinder.context'] + trimmed = dict(id=group_type.get('id'), + name=group_type.get('name'), + description=group_type.get('description'), + is_public=group_type.get('is_public')) + if common.validate_policy( + context, + 'group:access_group_types_specs'): + trimmed['group_specs'] = group_type.get('group_specs') + return trimmed if brief else dict(group_type=trimmed) + + def index(self, request, group_types): + """Index over trimmed group types.""" + group_types_list = [self.show(request, group_type, True) + for group_type in group_types] + group_type_links = self._get_collection_links(request, group_types, + 'group_types') + group_types_dict = dict(group_types=group_types_list) + if group_type_links: + group_types_dict['group_type_links'] = group_type_links + return group_types_dict diff --git a/cinder/api/v3/views/groups.py b/cinder/api/v3/views/groups.py new file mode 100644 index 000000000..1d012804b --- /dev/null +++ b/cinder/api/v3/views/groups.py @@ -0,0 +1,81 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common + + +class ViewBuilder(common.ViewBuilder): + """Model group API responses as a python dictionary.""" + + _collection_name = "groups" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, groups): + """Show a list of groups without many details.""" + return self._list_view(self.summary, request, groups) + + def detail_list(self, request, groups): + """Detailed view of a list of groups .""" + return self._list_view(self.detail, request, groups) + + def summary(self, request, group): + """Generic, non-detailed view of a group.""" + return { + 'group': { + 'id': group.id, + 'name': group.name + } + } + + def detail(self, request, group): + """Detailed view of a single group.""" + group_ref = { + 'group': { + 'id': group.id, + 'status': group.status, + 'availability_zone': group.availability_zone, + 'created_at': group.created_at, + 'name': group.name, + 'description': group.description, + 'group_type': group.group_type_id, + 'volume_types': [v_type.id for v_type in group.volume_types], + } + } + + req_version = request.api_version_request + # Add group_snapshot_id and source_group_id if min version is greater + # than or equal to 3.14. + if req_version.matches("3.14", None): + group_ref['group']['group_snapshot_id'] = group.group_snapshot_id + group_ref['group']['source_group_id'] = group.source_group_id + + return group_ref + + def _list_view(self, func, request, groups): + """Provide a view for a list of groups.""" + groups_list = [ + func(request, group)['group'] + for group in groups] + grp_links = self._get_collection_links(request, + groups, + self._collection_name) + groups_dict = dict(groups=groups_list) + if grp_links: + groups_dict['group_links'] = grp_links + + return groups_dict diff --git a/cinder/api/v3/views/snapshots.py b/cinder/api/v3/views/snapshots.py new file mode 100644 index 000000000..1f3e3b79f --- /dev/null +++ b/cinder/api/v3/views/snapshots.py @@ -0,0 +1,33 @@ +# Copyright 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.views import snapshots as views_v2 + + +class ViewBuilder(views_v2.ViewBuilder): + """Model a snapshots API V3 response as a python dictionary.""" + + def detail(self, request, snapshot): + """Detailed view of a single snapshot.""" + snapshot_ref = super(ViewBuilder, self).detail(request, snapshot) + + req_version = request.api_version_request + # Add group_snapshot_id if min version is greater than or equal + # to 3.14. + if req_version.matches("3.14", None): + snapshot_ref['snapshot']['group_snapshot_id'] = ( + snapshot.get('group_snapshot_id')) + + return snapshot_ref diff --git a/cinder/api/v3/views/volumes.py b/cinder/api/v3/views/volumes.py new file mode 100644 index 000000000..c1331bd74 --- /dev/null +++ b/cinder/api/v3/views/volumes.py @@ -0,0 +1,40 @@ +# Copyright 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.v2.views import volumes as views_v2 + + +class ViewBuilder(views_v2.ViewBuilder): + """Model a volumes API V3 response as a python dictionary.""" + + def quick_summary(self, volume_count, volume_size): + """Number of volumes and size of volumes.""" + return { + 'volume-summary': { + 'total_count': volume_count, + 'total_size': volume_size + }, + } + + def detail(self, request, volume): + """Detailed view of a single volume.""" + volume_ref = super(ViewBuilder, self).detail(request, volume) + + req_version = request.api_version_request + # Add group_id if min version is greater than or equal to 3.13. + if req_version.matches("3.13", None): + volume_ref['volume']['group_id'] = volume.get('group_id') + + return volume_ref diff --git a/cinder/api/v3/volume_manage.py b/cinder/api/v3/volume_manage.py new file mode 100644 index 000000000..9f0133e35 --- /dev/null +++ b/cinder/api/v3/volume_manage.py @@ -0,0 +1,45 @@ +# Copyright (c) 2016 Stratoscale, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.contrib import volume_manage as volume_manage_v2 +from cinder.api.openstack import wsgi +from cinder import exception + + +class VolumeManageController(volume_manage_v2.VolumeManageController): + def _ensure_min_version(self, req, allowed_version): + version = req.api_version_request + if not version.matches(allowed_version, None): + raise exception.VersionNotFoundForAPIMethod(version=version) + + @wsgi.response(202) + def create(self, req, body): + self._ensure_min_version(req, "3.8") + return super(VolumeManageController, self).create(req, body) + + @wsgi.extends + def index(self, req): + """Returns a summary list of volumes available to manage.""" + self._ensure_min_version(req, "3.8") + return super(VolumeManageController, self).index(req) + + @wsgi.extends + def detail(self, req): + """Returns a detailed list of volumes available to manage.""" + self._ensure_min_version(req, "3.8") + return super(VolumeManageController, self).detail(req) + + +def create_resource(): + return wsgi.Resource(VolumeManageController()) diff --git a/cinder/api/v3/volume_metadata.py b/cinder/api/v3/volume_metadata.py new file mode 100644 index 000000000..8b171582a --- /dev/null +++ b/cinder/api/v3/volume_metadata.py @@ -0,0 +1,80 @@ +# Copyright 2016 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume metadata V3 api.""" + +import hashlib + +from oslo_serialization import jsonutils +import six +import webob + +from cinder.api.openstack import wsgi +from cinder.api.v2 import volume_metadata as volume_meta_v2 +from cinder import exception + + +METADATA_MICRO_VERSION = '3.15' + + +class Controller(volume_meta_v2.Controller): + """The volume metadata API controller for the OpenStack API.""" + def _validate_etag(self, req, volume_id): + if not req.if_match: + return True + context = req.environ['cinder.context'] + metadata = self._get_metadata(context, volume_id) + data = jsonutils.dumps({"metadata": metadata}) + if six.PY3: + data = data.encode('utf-8') + checksum = hashlib.md5(data).hexdigest() + return checksum in req.if_match.etags + + def _ensure_min_version(self, req, allowed_version): + version = req.api_version_request + if not version.matches(allowed_version, None): + raise exception.VersionNotFoundForAPIMethod(version=version) + + @wsgi.extends + def index(self, req, volume_id): + self._ensure_min_version(req, METADATA_MICRO_VERSION) + metadata = super(Controller, self).index(req, volume_id) + resp = webob.Response() + data = jsonutils.dumps(metadata) + if six.PY3: + data = data.encode('utf-8') + resp.headers['Etag'] = hashlib.md5(data).hexdigest() + resp.body = data + return resp + + @wsgi.extends + def update(self, req, volume_id, id, body): + self._ensure_min_version(req, METADATA_MICRO_VERSION) + if not self._validate_etag(req, volume_id): + return webob.Response(status_int=412) + return super(Controller, self).update(req, volume_id, + id, body) + + @wsgi.extends + def update_all(self, req, volume_id, body): + self._ensure_min_version(req, METADATA_MICRO_VERSION) + if not self._validate_etag(req, volume_id): + return webob.Response(status_int=412) + return super(Controller, self).update_all(req, volume_id, + body) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v3/volumes.py b/cinder/api/v3/volumes.py index 7be0c4c5b..1048c9a92 100644 --- a/cinder/api/v3/volumes.py +++ b/cinder/api/v3/volumes.py @@ -13,15 +13,34 @@ """The volumes V3 api.""" +from oslo_log import log as logging +from oslo_utils import uuidutils +from webob import exc + from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.v2 import volumes as volumes_v2 +from cinder.api.v3.views import volumes as volume_views_v3 +from cinder import exception +from cinder import group as group_api +from cinder.i18n import _, _LI from cinder import utils +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +SUMMARY_BASE_MICRO_VERSION = '3.12' class VolumeController(volumes_v2.VolumeController): """The Volumes API controller for the OpenStack API V3.""" + _view_builder_class = volume_views_v3.ViewBuilder + + def __init__(self, ext_mgr): + self.group_api = group_api.API() + super(VolumeController, self).__init__(ext_mgr) + def _get_volumes(self, req, is_detail): """Returns a list of volumes, transformed through view builder.""" @@ -36,6 +55,9 @@ class VolumeController(volumes_v2.VolumeController): if req_version.matches(None, "3.3"): filters.pop('glance_metadata', None) + if req_version.matches(None, "3.9"): + filters.pop('group_id', None) + utils.remove_invalid_filter_options(context, filters, self._get_volume_filter_options()) # NOTE(thingee): v2 API allows name instead of display_name @@ -43,8 +65,10 @@ class VolumeController(volumes_v2.VolumeController): sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: - filters['display_name'] = filters['name'] - del filters['name'] + filters['display_name'] = filters.pop('name') + + if 'group_id' in filters: + filters['consistencygroup_id'] = filters.pop('group_id') strict = req.api_version_request.matches("3.2", None) self.volume_api.check_volume_filters(filters, strict) @@ -67,6 +91,157 @@ class VolumeController(volumes_v2.VolumeController): volumes = self._view_builder.summary_list(req, volumes) return volumes + @wsgi.Controller.api_version(SUMMARY_BASE_MICRO_VERSION) + def summary(self, req): + """Return summary of volumes.""" + view_builder_v3 = volume_views_v3.ViewBuilder() + context = req.environ['cinder.context'] + filters = req.params.copy() + + utils.remove_invalid_filter_options(context, filters, + self._get_volume_filter_options()) + + volumes = self.volume_api.get_volume_summary(context, filters=filters) + return view_builder_v3.quick_summary(volumes[0], int(volumes[1])) + + @wsgi.response(202) + def create(self, req, body): + """Creates a new volume. + + :param req: the request + :param body: the request body + :returns: dict -- the new volume dictionary + :raises: HTTPNotFound, HTTPBadRequest + """ + self.assert_valid_body(body, 'volume') + + LOG.debug('Create volume request body: %s', body) + context = req.environ['cinder.context'] + + req_version = req.api_version_request + # Remove group_id from body if max version is less than 3.13. + if req_version.matches(None, "3.12"): + # NOTE(xyang): The group_id is from a group created with a + # group_type. So with this group_id, we've got a group_type + # for this volume. Also if group_id is passed in, that means + # we already know which backend is hosting the group and the + # volume will be created on the same backend as well. So it + # won't go through the scheduler again if a group_id is + # passed in. + try: + body.get('volume', {}).pop('group_id', None) + except AttributeError: + msg = (_("Invalid body provided for creating volume. " + "Request API version: %s.") % req_version) + raise exc.HTTPBadRequest(explanation=msg) + + volume = body['volume'] + kwargs = {} + self.validate_name_and_description(volume) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in volume: + volume['display_name'] = volume.pop('name') + + # NOTE(thingee): v2 API allows description instead of + # display_description + if 'description' in volume: + volume['display_description'] = volume.pop('description') + + if 'image_id' in volume: + volume['imageRef'] = volume.pop('image_id') + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + # Not found exception will be handled at the wsgi level + if not uuidutils.is_uuid_like(req_volume_type): + kwargs['volume_type'] = ( + volume_types.get_volume_type_by_name( + context, req_volume_type)) + else: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + # Not found exception will be handled at the wsgi level + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + source_volid = volume.get('source_volid') + if source_volid is not None: + # Not found exception will be handled at the wsgi level + kwargs['source_volume'] = ( + self.volume_api.get_volume(context, + source_volid)) + else: + kwargs['source_volume'] = None + + source_replica = volume.get('source_replica') + if source_replica is not None: + # Not found exception will be handled at the wsgi level + src_vol = self.volume_api.get_volume(context, + source_replica) + if src_vol['replication_status'] == 'disabled': + explanation = _('source volume id:%s is not' + ' replicated') % source_replica + raise exc.HTTPBadRequest(explanation=explanation) + kwargs['source_replica'] = src_vol + else: + kwargs['source_replica'] = None + + consistencygroup_id = volume.get('consistencygroup_id') + if consistencygroup_id is not None: + # Not found exception will be handled at the wsgi level + kwargs['consistencygroup'] = ( + self.consistencygroup_api.get(context, + consistencygroup_id)) + else: + kwargs['consistencygroup'] = None + + # Get group_id if volume is in a group. + group_id = volume.get('group_id') + if group_id is not None: + try: + kwargs['group'] = self.group_api.get(context, group_id) + except exception.GroupNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + + size = volume.get('size', None) + if size is None and kwargs['snapshot'] is not None: + size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] + elif size is None and kwargs['source_replica'] is not None: + size = kwargs['source_replica']['size'] + + LOG.info(_LI("Create volume of %s GB"), size) + + if self.ext_mgr.is_loaded('os-image-create'): + image_ref = volume.get('imageRef') + if image_ref is not None: + image_uuid = self._image_uuid_from_ref(image_ref, context) + kwargs['image_id'] = image_uuid + + kwargs['availability_zone'] = volume.get('availability_zone', None) + kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) + multiattach = volume.get('multiattach', False) + kwargs['multiattach'] = multiattach + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + retval = self._view_builder.detail(req, new_volume) + + return retval + def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr)) diff --git a/cinder/api/versions.py b/cinder/api/versions.py index 60413d3b4..01d31b310 100644 --- a/cinder/api/versions.py +++ b/cinder/api/versions.py @@ -17,8 +17,6 @@ import copy -from oslo_config import cfg - from cinder.api import extensions from cinder.api import openstack from cinder.api.openstack import api_version_request @@ -26,8 +24,6 @@ from cinder.api.openstack import wsgi from cinder.api.views import versions as views_versions -CONF = cfg.CONF - _LINKS = [{ "rel": "describedby", "type": "text/html", @@ -100,7 +96,7 @@ class VersionsController(wsgi.Controller): known_versions.pop('v3.0') return builder.build_versions(known_versions) - @wsgi.Controller.api_version('2.0') # noqa + @index.api_version('2.0') def index(self, req): # pylint: disable=E0102 """Return versions supported prior to the microversions epoch.""" builder = views_versions.get_view_builder(req) @@ -109,7 +105,7 @@ class VersionsController(wsgi.Controller): known_versions.pop('v3.0') return builder.build_versions(known_versions) - @wsgi.Controller.api_version('3.0') # noqa + @index.api_version('3.0') def index(self, req): # pylint: disable=E0102 """Return versions supported after the start of microversions.""" builder = views_versions.get_view_builder(req) diff --git a/cinder/api/views/qos_specs.py b/cinder/api/views/qos_specs.py index ad49f8fff..794228278 100644 --- a/cinder/api/views/qos_specs.py +++ b/cinder/api/views/qos_specs.py @@ -31,19 +31,20 @@ class ViewBuilder(common.ViewBuilder): def summary(self, request, qos_spec): """Generic, non-detailed view of a qos_specs.""" - return { - 'qos_specs': qos_spec, - 'links': self._get_links(request, - qos_spec['id']), - } + return self.detail(request, qos_spec) def detail(self, request, qos_spec): """Detailed view of a single qos_spec.""" # TODO(zhiteng) Add associations to detailed view return { - 'qos_specs': qos_spec, + 'qos_specs': { + 'id': qos_spec.id, + 'name': qos_spec.name, + 'consumer': qos_spec.consumer, + 'specs': qos_spec.specs + }, 'links': self._get_links(request, - qos_spec['id']), + qos_spec.id), } def associations(self, request, associates): diff --git a/cinder/backup/api.py b/cinder/backup/api.py index f89cf48b8..69b33cea5 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -25,11 +25,11 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils -from oslo_utils import versionutils from pytz import timezone import random from cinder.backup import rpcapi as backup_rpcapi +from cinder.common import constants from cinder import context from cinder.db import base from cinder import exception @@ -139,31 +139,13 @@ class API(base.Base): return backups - def _is_scalable_only(self): - """True if we're running in deployment where all c-bak are scalable. - - We need this method to decide if we can assume that all of our c-bak - services are decoupled from c-vol. - - FIXME(dulek): This shouldn't be needed in Newton. - """ - cap = self.backup_rpcapi.client.version_cap - if cap: - cap = versionutils.convert_version_to_tuple(cap) - return cap >= (1, 3) # Mitaka is marked by c-bak 1.3+. - else: - # NOTE(dulek): No version cap means we're running in an environment - # without c-bak services. Letting it pass as Mitaka, request will - # just fail anyway so it doesn't really matter. - return True - def _az_matched(self, service, availability_zone): return ((not availability_zone) or service.availability_zone == availability_zone) def _is_backup_service_enabled(self, availability_zone, host): """Check if there is a backup service available.""" - topic = CONF.backup_topic + topic = constants.BACKUP_TOPIC ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) @@ -192,29 +174,10 @@ class API(base.Base): idx = idx + 1 return None - def _get_available_backup_service_host(self, host, az, volume_host=None): + def _get_available_backup_service_host(self, host, az): """Return an appropriate backup service host.""" - - # FIXME(dulek): We need to keep compatibility with Liberty, where c-bak - # were coupled with c-vol. If we're running in mixed Liberty-Mitaka - # environment we will be scheduling backup jobs the old way. - # - # This snippet should go away in Newton. Note that volume_host - # parameter will also be unnecessary then. - if not self._is_scalable_only(): - if volume_host: - volume_host = volume_utils.extract_host(volume_host, - level='host') - if volume_host and self._is_backup_service_enabled(az, - volume_host): - return volume_host - elif host and self._is_backup_service_enabled(az, host): - return host - else: - raise exception.ServiceNotFound(service_id='cinder-backup') - backup_host = None - if (not host or not CONF.backup_use_same_host): + if not host or not CONF.backup_use_same_host: backup_host = self._get_any_available_backup_service(az) elif self._is_backup_service_enabled(az, host): backup_host = host @@ -227,7 +190,7 @@ class API(base.Base): :returns: list -- hosts for services that are enabled for backup. """ - topic = CONF.backup_topic + topic = constants.BACKUP_TOPIC ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) @@ -270,9 +233,9 @@ class API(base.Base): raise exception.InvalidSnapshot(reason=msg) previous_status = volume['status'] + volume_host = volume_utils.extract_host(volume.host, 'host') host = self._get_available_backup_service_host( - None, volume.availability_zone, - volume_utils.extract_host(volume.host, 'host')) + volume_host, volume.availability_zone) # Reserve a quota before setting volume status and backup status try: @@ -395,8 +358,7 @@ class API(base.Base): LOG.info(_LI("Creating volume of %(size)s GB for restore of " "backup %(backup_id)s."), - {'size': size, 'backup_id': backup_id}, - context=context) + {'size': size, 'backup_id': backup_id}) volume = self.volume_api.create(context, size, name, description) volume_id = volume['id'] @@ -422,13 +384,12 @@ class API(base.Base): LOG.info(_LI("Overwriting volume %(volume_id)s with restore of " "backup %(backup_id)s"), - {'volume_id': volume_id, 'backup_id': backup_id}, - context=context) + {'volume_id': volume_id, 'backup_id': backup_id}) # Setting the status here rather than setting at start and unrolling # for each error condition, it should be a very small window backup.host = self._get_available_backup_service_host( - backup.host, backup.availability_zone, volume_host=volume.host) + backup.host, backup.availability_zone) backup.status = fields.BackupStatus.RESTORING backup.restore_volume_id = volume.id backup.save() @@ -589,3 +550,10 @@ class API(base.Base): hosts) return backup + + def update(self, context, backup_id, fields): + check_policy(context, 'update') + backup = self.get(context, backup_id) + backup.update(fields) + backup.save() + return backup diff --git a/cinder/backup/chunkeddriver.py b/cinder/backup/chunkeddriver.py index 20230532d..595986ace 100644 --- a/cinder/backup/chunkeddriver.py +++ b/cinder/backup/chunkeddriver.py @@ -159,7 +159,7 @@ class ChunkedBackupDriver(driver.BackupDriver): """ return - def _create_container(self, context, backup): + def _create_container(self, backup): # Container's name will be decided by the driver (returned by method # update_container_name), if no change is required by the driver then # we'll use the one the backup object already has, but if it doesn't @@ -280,7 +280,7 @@ class ChunkedBackupDriver(driver.BackupDriver): err = _('volume size %d is invalid.') % volume['size'] raise exception.InvalidVolume(reason=err) - container = self._create_container(self.context, backup) + container = self._create_container(backup) object_prefix = self._generate_object_name_prefix(backup) backup.service_metadata = object_prefix diff --git a/cinder/backup/driver.py b/cinder/backup/driver.py index 81cb37440..a42ab979b 100644 --- a/cinder/backup/driver.py +++ b/cinder/backup/driver.py @@ -25,7 +25,7 @@ import six from cinder.db import base from cinder import exception from cinder.i18n import _, _LI, _LW -from cinder import keymgr +from cinder import keymgr as key_manager service_opts = [ cfg.IntOpt('backup_metadata_version', default=2, @@ -89,7 +89,8 @@ class BackupMetadataAPI(base.Base): continue # Copy the encryption key uuid for backup if key is 'encryption_key_id' and value is not None: - value = keymgr.API().copy_key(self.context, value) + km = key_manager.API(CONF) + value = km.store(self.context, km.get(self.context, value)) LOG.debug("Copying encryption key uuid for backup.") container[type_tag][key] = value diff --git a/cinder/backup/drivers/google.py b/cinder/backup/drivers/google.py index 534ef72e8..3f67e9a6b 100644 --- a/cinder/backup/drivers/google.py +++ b/cinder/backup/drivers/google.py @@ -91,6 +91,10 @@ gcsbackup_service_opts = [ 'progress notifications to Ceilometer when backing ' 'up the volume to the GCS backend storage. The ' 'default value is True to enable the timer.'), + cfg.URIOpt('backup_gcs_proxy_url', + help='URL for http proxy access.', + secret=True), + ] CONF = cfg.CONF @@ -134,14 +138,21 @@ class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver): self.bucket_location = CONF.backup_gcs_bucket_location self.storage_class = CONF.backup_gcs_storage_class self.num_retries = CONF.backup_gcs_num_retries - http_user_agent = http.set_user_agent(httplib2.Http(), - CONF.backup_gcs_user_agent) + http_user_agent = http.set_user_agent( + httplib2.Http(proxy_info=self.get_gcs_proxy_info()), + CONF.backup_gcs_user_agent) self.conn = discovery.build('storage', 'v1', http=http_user_agent, credentials=credentials) self.resumable = self.writer_chunk_size != -1 + def get_gcs_proxy_info(self): + if CONF.backup_gcs_proxy_url: + return httplib2.proxy_info_from_url(CONF.backup_gcs_proxy_url) + else: + return httplib2.proxy_info_from_environment() + def check_gcs_options(self): required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file', 'backup_gcs_project_id') diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py index f9469efb4..cf3083dc4 100644 --- a/cinder/backup/drivers/swift.py +++ b/cinder/backup/drivers/swift.py @@ -198,8 +198,8 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver): 'endpoints')[0].get(endpoint_type) break else: - self.auth_url = '%s%s' % (CONF.backup_swift_auth_url, - context.project_id) + self.auth_url = CONF.backup_swift_auth_url + if self.auth_url is None: raise exception.BackupDriverException(_( "Could not determine which Keystone endpoint to use. This can " diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index 7c7f7f7ee..4d3555783 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -25,8 +25,6 @@ Volume backups can be created, restored, deleted and listed. **Related Flags** -:backup_topic: What :mod:`rpc` topic to listen to (default: - `cinder-backup`). :backup_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`cinder.backup.manager.Manager`). @@ -83,7 +81,7 @@ QUOTAS = quota.QUOTAS class BackupManager(manager.SchedulerDependentManager): """Manages backup of block storage devices.""" - RPC_API_VERSION = '2.0' + RPC_API_VERSION = backup_rpcapi.BackupAPI.RPC_API_VERSION target = messaging.Target(version=RPC_API_VERSION) @@ -200,12 +198,12 @@ class BackupManager(manager.SchedulerDependentManager): return mapper[service] return service - def _update_backup_error(self, backup, context, err): + def _update_backup_error(self, backup, err): backup.status = fields.BackupStatus.ERROR backup.fail_reason = err backup.save() - def init_host(self): + def init_host(self, **kwargs): """Run initialization needed for a standalone service.""" ctxt = context.get_admin_context() @@ -269,7 +267,7 @@ class BackupManager(manager.SchedulerDependentManager): self._cleanup_one_volume(ctxt, volume) err = 'incomplete backup reset on manager restart' - self._update_backup_error(backup, ctxt, err) + self._update_backup_error(backup, err) elif backup['status'] == fields.BackupStatus.RESTORING: LOG.info(_LI('Resetting backup %s to ' 'available (was restoring).'), @@ -381,7 +379,7 @@ class BackupManager(manager.SchedulerDependentManager): 'expected_status': expected_status, 'actual_status': actual_status, } - self._update_backup_error(backup, context, err) + self._update_backup_error(backup, err) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING @@ -392,7 +390,7 @@ class BackupManager(manager.SchedulerDependentManager): 'expected_status': expected_status, 'actual_status': actual_status, } - self._update_backup_error(backup, context, err) + self._update_backup_error(backup, err) backup.save() raise exception.InvalidBackup(reason=err) @@ -403,7 +401,7 @@ class BackupManager(manager.SchedulerDependentManager): self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) - self._update_backup_error(backup, context, six.text_type(err)) + self._update_backup_error(backup, six.text_type(err)) # Restore the original status. self.db.volume_update(context, volume_id, @@ -487,7 +485,7 @@ class BackupManager(manager.SchedulerDependentManager): '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) - self._update_backup_error(backup, context, err) + self._update_backup_error(backup, err) self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) @@ -572,7 +570,7 @@ class BackupManager(manager.SchedulerDependentManager): '%(expected_status)s but got %(actual_status)s.') \ % {'expected_status': expected_status, 'actual_status': actual_status} - self._update_backup_error(backup, context, err) + self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) backup_service = self._map_service_to_driver(backup['service']) @@ -585,7 +583,7 @@ class BackupManager(manager.SchedulerDependentManager): ' backup [%(backup_service)s].')\ % {'configured_service': configured_service, 'backup_service': backup_service} - self._update_backup_error(backup, context, err) + self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: @@ -593,8 +591,7 @@ class BackupManager(manager.SchedulerDependentManager): backup_service.delete(backup) except Exception as err: with excutils.save_and_reraise_exception(): - self._update_backup_error(backup, context, - six.text_type(err)) + self._update_backup_error(backup, six.text_type(err)) # Get reservations try: @@ -721,7 +718,7 @@ class BackupManager(manager.SchedulerDependentManager): err = _('Import record failed, cannot find backup ' 'service to perform the import. Request service ' '%(service)s') % {'service': backup_service} - self._update_backup_error(backup, context, err) + self._update_backup_error(backup, err) raise exception.ServiceNotFound(service_id=backup_service) else: # Yes... @@ -735,7 +732,7 @@ class BackupManager(manager.SchedulerDependentManager): backup_service.import_record(backup, driver_options) except Exception as err: msg = six.text_type(err) - self._update_backup_error(backup, context, msg) + self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) required_import_options = { @@ -755,7 +752,7 @@ class BackupManager(manager.SchedulerDependentManager): msg = (_('Driver successfully decoded imported backup data, ' 'but there are missing fields (%s).') % ', '.join(missing_opts)) - self._update_backup_error(backup, context, msg) + self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) # Confirm the ID from the record in the DB is the right one @@ -764,7 +761,7 @@ class BackupManager(manager.SchedulerDependentManager): msg = (_('Trying to import backup metadata from id %(meta_id)s' ' into backup %(id)s.') % {'meta_id': backup_id, 'id': backup.id}) - self._update_backup_error(backup, context, msg) + self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) # Overwrite some fields @@ -794,8 +791,7 @@ class BackupManager(manager.SchedulerDependentManager): 'id': backup.id}) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): - self._update_backup_error(backup, context, - six.text_type(err)) + self._update_backup_error(backup, six.text_type(err)) LOG.info(_LI('Import record id %s metadata from driver ' 'finished.'), backup.id) @@ -815,17 +811,17 @@ class BackupManager(manager.SchedulerDependentManager): {'backup_id': backup.id, 'status': status}) - backup_service = self._map_service_to_driver(backup.service) - LOG.info(_LI('Backup service: %s.'), backup_service) - if backup_service is not None: + backup_service_name = self._map_service_to_driver(backup.service) + LOG.info(_LI('Backup service: %s.'), backup_service_name) + if backup_service_name is not None: configured_service = self.driver_name - if backup_service != configured_service: + if backup_service_name != configured_service: err = _('Reset backup status aborted, the backup service' ' currently configured [%(configured_service)s] ' 'is not the backup service that was used to create' ' this backup [%(backup_service)s].') % \ {'configured_service': configured_service, - 'backup_service': backup_service} + 'backup_service': backup_service_name} raise exception.InvalidBackup(reason=err) # Verify backup try: @@ -833,6 +829,7 @@ class BackupManager(manager.SchedulerDependentManager): if (status == fields.BackupStatus.AVAILABLE and backup['status'] != fields.BackupStatus.RESTORING): # check whether we could verify the backup is ok or not + backup_service = self.service.get_backup_driver(context) if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) diff --git a/cinder/backup/rpcapi.py b/cinder/backup/rpcapi.py index d17065586..feb7ec80a 100644 --- a/cinder/backup/rpcapi.py +++ b/cinder/backup/rpcapi.py @@ -18,13 +18,12 @@ Client side of the volume backup RPC API. """ -from oslo_config import cfg from oslo_log import log as logging +from cinder.common import constants from cinder import rpc -CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -49,7 +48,7 @@ class BackupAPI(rpc.RPCAPI): """ RPC_API_VERSION = '2.0' - TOPIC = CONF.backup_topic + TOPIC = constants.BACKUP_TOPIC BINARY = 'cinder-backup' def _compat_ver(self, current, legacy): diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py index ac2e5672e..871047da6 100644 --- a/cinder/brick/local_dev/lvm.py +++ b/cinder/brick/local_dev/lvm.py @@ -577,6 +577,18 @@ class LVM(executor.Executor): return name return '_' + name + def _lv_is_active(self, name): + cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', + 'Attr', '%s/%s' % (self.vg_name, name)] + out, _err = self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + if out: + out = out.strip() + if (out[4] == 'a'): + return True + return False + def deactivate_lv(self, name): lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] @@ -592,6 +604,21 @@ class LVM(executor.Executor): LOG.error(_LE('StdErr :%s'), err.stderr) raise + # Wait until lv is deactivated to return in + # order to prevent a race condition. + self._wait_for_volume_deactivation(name) + + @utils.retry(exceptions=exception.VolumeNotDeactivated, retries=3, + backoff_rate=1) + def _wait_for_volume_deactivation(self, name): + LOG.debug("Checking to see if volume %s has been deactivated.", + name) + if self._lv_is_active(name): + LOG.debug("Volume %s is still active.", name) + raise exception.VolumeNotDeactivated(name=name) + else: + LOG.debug("Volume %s has been deactivated.", name) + def activate_lv(self, name, is_snapshot=False, permanent=False): """Ensure that logical volume/snapshot logical volume is activated. @@ -703,6 +730,44 @@ class LVM(executor.Executor): return True return False + def lv_is_snapshot(self, name): + """Return True if LV is a snapshot, False otherwise.""" + cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', + 'Attr', '%s/%s' % (self.vg_name, name)] + out, _err = self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + out = out.strip() + if out: + if (out[0] == 's'): + return True + return False + + def lv_is_open(self, name): + """Return True if LV is currently open, False otherwise.""" + cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', + 'Attr', '%s/%s' % (self.vg_name, name)] + out, _err = self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + out = out.strip() + if out: + if (out[5] == 'o'): + return True + return False + + def lv_get_origin(self, name): + """Return the origin of an LV that is a snapshot, None otherwise.""" + cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', + 'Origin', '%s/%s' % (self.vg_name, name)] + out, _err = self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + out = out.strip() + if out: + return out + return None + def extend_volume(self, lv_name, new_size): """Extend the size of an existing volume.""" # Volumes with snaps have attributes 'o' or 'O' and will be diff --git a/cinder/cmd/all.py b/cinder/cmd/all.py index a5708f05b..6baf6e3f6 100644 --- a/cinder/cmd/all.py +++ b/cinder/cmd/all.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2011 OpenStack, LLC +# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. diff --git a/cinder/cmd/backup.py b/cinder/cmd/backup.py index 19691b551..0472c3a5f 100644 --- a/cinder/cmd/backup.py +++ b/cinder/cmd/backup.py @@ -18,11 +18,13 @@ """Starter script for Cinder Volume Backup.""" import logging as python_logging +import shlex import sys import eventlet from oslo_config import cfg from oslo_log import log as logging +from oslo_privsep import priv_context from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts @@ -49,6 +51,7 @@ def main(): version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) + priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='cinder-backup') diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py index 6afc0db46..2d186889c 100644 --- a/cinder/cmd/manage.py +++ b/cinder/cmd/manage.py @@ -58,6 +58,7 @@ from __future__ import print_function import logging as python_logging import os import sys +import time from oslo_config import cfg from oslo_db import exception as db_exc @@ -71,6 +72,7 @@ i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa +from cinder.common import constants from cinder import context from cinder import db from cinder.db import migration as db_migration @@ -117,7 +119,7 @@ class ShellCommands(object): """ self.run('python') - @args('--shell', dest="shell", + @args('--shell', metavar='', help='Python shell') def run(self, shell=None): @@ -229,6 +231,9 @@ class DbCommands(object): if age_in_days <= 0: print(_("Must supply a positive, non-zero value for age")) sys.exit(1) + if age_in_days >= (int(time.time()) / 86400): + print(_("Maximum age is count of days since epoch.")) + sys.exit(1) ctxt = context.get_admin_context() try: @@ -262,7 +267,7 @@ class VolumeCommands(object): if self._client is None: if not rpc.initialized(): rpc.init(CONF) - target = messaging.Target(topic=CONF.volume_topic) + target = messaging.Target(topic=constants.VOLUME_TOPIC) serializer = objects.base.CinderObjectSerializer() self._client = rpc.get_client(target, serializer=serializer) @@ -436,13 +441,23 @@ class BackupCommands(object): bk.save() -class ServiceCommands(object): +class BaseCommand(object): + @staticmethod + def _normalize_time(time_field): + return time_field and timeutils.normalize_time(time_field) + + @staticmethod + def _state_repr(is_up): + return ':-)' if is_up else 'XXX' + + +class ServiceCommands(BaseCommand): """Methods for managing services.""" def list(self): """Show a list of all cinder services.""" ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) - print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s" + print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s" print(print_format % (_('Binary'), _('Host'), _('Zone'), @@ -450,23 +465,19 @@ class ServiceCommands(object): _('State'), _('Updated At'), _('RPC Version'), - _('Object Version'))) + _('Object Version'), + _('Cluster'))) for svc in services: - alive = utils.service_is_up(svc) - art = ":-)" if alive else "XXX" - status = 'enabled' - if svc.disabled: - status = 'disabled' - updated_at = svc.updated_at - if updated_at: - updated_at = timeutils.normalize_time(updated_at) - rpc_version = (svc.rpc_current_version or - rpc.LIBERTY_RPC_VERSIONS.get(svc.binary, '')) - object_version = (svc.object_current_version or 'liberty') + art = self._state_repr(utils.service_is_up(svc)) + status = 'disabled' if svc.disabled else 'enabled' + updated_at = self._normalize_time(svc.updated_at) + rpc_version = svc.rpc_current_version + object_version = svc.object_current_version + cluster = svc.cluster_name or '' print(print_format % (svc.binary, svc.host.partition('.')[0], svc.availability_zone, status, art, - updated_at, rpc_version, - object_version)) + updated_at, rpc_version, object_version, + cluster)) @args('binary', type=str, help='Service to delete from the host.') @@ -487,9 +498,109 @@ class ServiceCommands(object): print(_("Service %(service)s on host %(host)s removed.") % {'service': binary, 'host': host_name}) + +class ClusterCommands(BaseCommand): + """Methods for managing clusters.""" + def list(self): + """Show a list of all cinder services.""" + ctxt = context.get_admin_context() + clusters = objects.ClusterList.get_all(ctxt, services_summary=True) + print_format = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s" + print(print_format % (_('Name'), + _('Binary'), + _('Status'), + _('State'), + _('Heartbeat'), + _('Hosts'), + _('Down Hosts'), + _('Updated At'))) + for cluster in clusters: + art = self._state_repr(cluster.is_up()) + status = 'disabled' if cluster.disabled else 'enabled' + heartbeat = self._normalize_time(cluster.last_heartbeat) + updated_at = self._normalize_time(cluster.updated_at) + print(print_format % (cluster.name, cluster.binary, status, art, + heartbeat, cluster.num_hosts, + cluster.num_down_hosts, updated_at)) + + @args('--recursive', action='store_true', default=False, + help='Delete associated hosts.') + @args('binary', type=str, + help='Service to delete from the cluster.') + @args('cluster-name', type=str, help='Cluster to delete.') + def remove(self, recursive, binary, cluster_name): + """Completely removes a cluster.""" + ctxt = context.get_admin_context() + try: + cluster = objects.Cluster.get_by_id(ctxt, None, name=cluster_name, + binary=binary, + get_services=recursive) + except exception.ClusterNotFound: + print(_("Couldn't remove cluster %s because it doesn't exist.") % + cluster_name) + return 2 + + if recursive: + for service in cluster.services: + service.destroy() + + try: + cluster.destroy() + except exception.ClusterHasHosts: + print(_("Couldn't remove cluster %s because it still has hosts.") % + cluster_name) + return 2 + + msg = _('Cluster %s successfully removed.') % cluster_name + if recursive: + msg = (_('%(msg)s And %(num)s services from the cluster were also ' + 'removed.') % {'msg': msg, 'num': len(cluster.services)}) + print(msg) + + @args('--full-rename', dest='partial', + action='store_false', default=True, + help='Do full cluster rename instead of just replacing provided ' + 'current cluster name and preserving backend and/or pool info.') + @args('current', help='Current cluster name.') + @args('new', help='New cluster name.') + def rename(self, partial, current, new): + """Rename cluster name for Volumes and Consistency Groups. + + Useful when you want to rename a cluster, particularly when the + backend_name has been modified in a multi-backend config or we have + moved from a single backend to multi-backend. + """ + ctxt = context.get_admin_context() + + # Convert empty strings to None + current = current or None + new = new or None + + # Update Volumes + num_vols = objects.VolumeList.include_in_cluster( + ctxt, new, partial_rename=partial, cluster_name=current) + + # Update Consistency Groups + num_cgs = objects.ConsistencyGroupList.include_in_cluster( + ctxt, new, partial_rename=partial, cluster_name=current) + + if num_vols or num_cgs: + msg = _('Successfully renamed %(num_vols)s volumes and ' + '%(num_cgs)s consistency groups from cluster %(current)s ' + 'to %(new)s') + print(msg % {'num_vols': num_vols, 'num_cgs': num_cgs, 'new': new, + 'current': current}) + else: + msg = _('No volumes or consistency groups exist in cluster ' + '%(current)s.') + print(msg % {'current': current}) + return 2 + + CATEGORIES = { 'backup': BackupCommands, 'config': ConfigCommands, + 'cluster': ClusterCommands, 'db': DbCommands, 'host': HostCommands, 'logs': GetLogCommands, @@ -539,29 +650,32 @@ category_opt = cfg.SubCommandOpt('category', def get_arg_string(args): - arg = None if args[0] == '-': # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg - arg = args[2:] + args = args[2:] else: - arg = args[1:] - else: - arg = args + args = args[1:] - return arg + # We convert dashes to underscores so we can have cleaner optional arg + # names + if args: + args = args.replace('-', '_') + + return args def fetch_func_args(func): - fn_args = [] + fn_kwargs = {} for args, kwargs in getattr(func, 'args', []): - arg = get_arg_string(args[0]) - fn_args.append(getattr(CONF.category, arg)) + # Argparser `dest` configuration option takes precedence for the name + arg = kwargs.get('dest') or get_arg_string(args[0]) + fn_kwargs[arg] = getattr(CONF.category, arg) - return fn_args + return fn_kwargs def main(): @@ -600,5 +714,5 @@ def main(): sys.exit(2) fn = CONF.category.action_fn - fn_args = fetch_func_args(fn) - fn(*fn_args) + fn_kwargs = fetch_func_args(fn) + fn(**fn_kwargs) diff --git a/cinder/cmd/volume.py b/cinder/cmd/volume.py index 8ce6eb6e6..35925529c 100644 --- a/cinder/cmd/volume.py +++ b/cinder/cmd/volume.py @@ -31,10 +31,12 @@ if os.name == 'nt': else: eventlet.monkey_patch() +import shlex import sys from oslo_config import cfg from oslo_log import log as logging +from oslo_privsep import priv_context from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts @@ -50,11 +52,21 @@ from cinder import utils from cinder import version +CONF = cfg.CONF + deprecated_host_opt = cfg.DeprecatedOpt('host') host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.', deprecated_opts=[deprecated_host_opt]) -cfg.CONF.register_cli_opt(host_opt) -CONF = cfg.CONF +CONF.register_cli_opt(host_opt) + +# TODO(geguileo): Once we complete the work on A-A update the option's help. +cluster_opt = cfg.StrOpt('cluster', + default=None, + help='Name of this cluster. Used to group volume ' + 'hosts that share the same backend ' + 'configurations to work in HA Active-Active ' + 'mode. Active-Active is not yet supported.') +CONF.register_opt(cluster_opt) def main(): @@ -64,6 +76,7 @@ def main(): version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) + priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) launcher = service.get_launcher() @@ -75,11 +88,16 @@ def main(): CONF.register_opt(host_opt, group=backend) backend_host = getattr(CONF, backend).backend_host host = "%s@%s" % (backend_host or CONF.host, backend) + # We also want to set cluster to None on empty strings, and we + # ignore leading and trailing spaces. + cluster = CONF.cluster and CONF.cluster.strip() + cluster = (cluster or None) and '%s@%s' % (cluster, backend) try: server = service.Service.create(host=host, service_name=backend, binary='cinder-volume', - coordination=True) + coordination=True, + cluster=cluster) except Exception: msg = _('Volume service %s failed to start.') % host LOG.exception(msg) @@ -96,7 +114,8 @@ def main(): 'Support for DEFAULT section to configure drivers ' 'will be removed in the next release.')) server = service.Service.create(binary='cinder-volume', - coordination=True) + coordination=True, + cluster=CONF.cluster) launcher.launch_service(server) service_started = True diff --git a/cinder/common/config.py b/cinder/common/config.py index ac48e7ef9..45ee72569 100644 --- a/cinder/common/config.py +++ b/cinder/common/config.py @@ -61,6 +61,7 @@ global_opts = [ default=1, help='Version of the glance API to use'), cfg.IntOpt('glance_num_retries', + min=0, default=0, help='Number retries when downloading an image from glance'), cfg.BoolOpt('glance_api_insecure', @@ -82,15 +83,6 @@ global_opts = [ help='http/https timeout value for glance operations. If no ' 'value (None) is supplied here, the glanceclient default ' 'value is used.'), - cfg.StrOpt('scheduler_topic', - default='cinder-scheduler', - help='The topic that scheduler nodes listen on'), - cfg.StrOpt('volume_topic', - default='cinder-volume', - help='The topic that volume nodes listen on'), - cfg.StrOpt('backup_topic', - default='cinder-backup', - help='The topic that volume backup nodes listen on'), cfg.BoolOpt('enable_v1_api', default=True, deprecated_for_removal=True, @@ -142,6 +134,8 @@ global_opts = [ 'storage_availability_zone, instead of failing.'), cfg.StrOpt('default_volume_type', help='Default volume type to use'), + cfg.StrOpt('default_group_type', + help='Default group type to use'), cfg.StrOpt('volume_usage_audit_period', default='month', help='Time period for which to generate volume usages. ' @@ -187,6 +181,9 @@ global_opts = [ cfg.StrOpt('consistencygroup_api_class', default='cinder.consistencygroup.api.API', help='The full class name of the consistencygroup API class'), + cfg.StrOpt('group_api_class', + default='cinder.group.api.API', + help='The full class name of the group API class'), cfg.StrOpt('os_privileged_user_name', help='OpenStack privileged account username. Used for requests ' 'to other services (such as Nova) that require an account ' diff --git a/cinder/common/constants.py b/cinder/common/constants.py index fa0858965..f7af0c916 100644 --- a/cinder/common/constants.py +++ b/cinder/common/constants.py @@ -16,3 +16,11 @@ # The maximum value a signed INT type may have DB_MAX_INT = 0x7FFFFFFF + +# The cinder services binaries and topics' names +SCHEDULER_BINARY = "cinder-scheduler" +VOLUME_BINARY = "cinder-volume" +BACKUP_BINARY = "cinder-backup" +SCHEDULER_TOPIC = SCHEDULER_BINARY +VOLUME_TOPIC = VOLUME_BINARY +BACKUP_TOPIC = BACKUP_BINARY diff --git a/cinder/compute/nova.py b/cinder/compute/nova.py index e7cc70955..4d93a377b 100644 --- a/cinder/compute/nova.py +++ b/cinder/compute/nova.py @@ -175,9 +175,10 @@ class API(base.Base): def update_server_volume(self, context, server_id, attachment_id, new_volume_id): - novaclient(context).volumes.update_server_volume(server_id, - attachment_id, - new_volume_id) + nova = novaclient(context, admin_endpoint=True, privileged_user=True) + nova.volumes.update_server_volume(server_id, + attachment_id, + new_volume_id) def create_volume_snapshot(self, context, volume_id, create_info): nova = novaclient(context, admin_endpoint=True, privileged_user=True) diff --git a/cinder/config/cinder-config-generator.conf b/cinder/config/cinder-config-generator.conf index 18c3a29b1..c98b02352 100644 --- a/cinder/config/cinder-config-generator.conf +++ b/cinder/config/cinder-config-generator.conf @@ -1,6 +1,7 @@ [DEFAULT] output_file = etc/cinder/cinder.conf.sample wrap_width = 79 +namespace = castellan.config namespace = cinder namespace = keystonemiddleware.auth_token namespace = oslo.config diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py index e51552acf..3502091c6 100644 --- a/cinder/consistencygroup/api.py +++ b/cinder/consistencygroup/api.py @@ -25,6 +25,7 @@ from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils +from cinder.common import constants from cinder import db from cinder.db import base from cinder import exception @@ -395,7 +396,7 @@ class API(base.Base): # to select the target host for this group. self.scheduler_rpcapi.create_consistencygroup( context, - CONF.volume_topic, + constants.VOLUME_TOPIC, group, request_spec_list=request_spec_list, filter_properties_list=filter_properties_list) @@ -731,7 +732,7 @@ class API(base.Base): snap_desc = cgsnapshot.description with group.obj_as_admin(): self.volume_api.create_snapshots_in_db( - context, group.volumes, snap_name, snap_desc, True, + context, group.volumes, snap_name, snap_desc, cgsnapshot_id) except Exception: diff --git a/cinder/context.py b/cinder/context.py index b8793ce9d..a7a654463 100644 --- a/cinder/context.py +++ b/cinder/context.py @@ -49,11 +49,9 @@ class RequestContext(context.RequestContext): Represents the user taking a given action within the system. """ - def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", - roles=None, project_name=None, remote_address=None, - timestamp=None, request_id=None, auth_token=None, - overwrite=True, quota_class=None, service_catalog=None, - domain=None, user_domain=None, project_domain=None, + def __init__(self, user_id=None, project_id=None, is_admin=None, + read_deleted="no", project_name=None, remote_address=None, + timestamp=None, quota_class=None, service_catalog=None, **kwargs): """Initialize RequestContext. @@ -63,21 +61,15 @@ class RequestContext(context.RequestContext): :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. - - :param kwargs: Extra arguments that might be present, but we ignore - because they possibly came in from older rpc messages. """ + # NOTE(jamielennox): oslo.context still uses some old variables names. + # These arguments are maintained instead of passed as kwargs to + # maintain the interface for tests. + kwargs.setdefault('user', user_id) + kwargs.setdefault('tenant', project_id) + + super(RequestContext, self).__init__(is_admin=is_admin, **kwargs) - super(RequestContext, self).__init__(auth_token=auth_token, - user=user_id, - tenant=project_id, - domain=domain, - user_domain=user_domain, - project_domain=project_domain, - is_admin=is_admin, - request_id=request_id, - overwrite=overwrite, - roles=roles) self.project_name = project_name self.read_deleted = read_deleted self.remote_address = remote_address @@ -137,7 +129,21 @@ class RequestContext(context.RequestContext): @classmethod def from_dict(cls, values): - return cls(**values) + return cls(user_id=values.get('user_id'), + project_id=values.get('project_id'), + project_name=values.get('project_name'), + domain=values.get('domain'), + read_deleted=values.get('read_deleted'), + remote_address=values.get('remote_address'), + timestamp=values.get('timestamp'), + quota_class=values.get('quota_class'), + service_catalog=values.get('service_catalog'), + request_id=values.get('request_id'), + is_admin=values.get('is_admin'), + roles=values.get('roles'), + auth_token=values.get('auth_token'), + user_domain=values.get('user_domain'), + project_domain=values.get('project_domain')) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" diff --git a/cinder/db/api.py b/cinder/db/api.py index 7ee9b905a..560e6c6db 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -62,7 +62,6 @@ db_opts = [ CONF = cfg.CONF CONF.register_opts(db_opts) db_options.set_defaults(CONF) -CONF.set_default('sqlite_db', 'cinder.sqlite', group='database') _BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'} @@ -99,34 +98,33 @@ def service_destroy(context, service_id): return IMPL.service_destroy(context, service_id) -def service_get(context, service_id): - """Get a service or raise if it does not exist.""" - return IMPL.service_get(context, service_id) +def service_get(context, service_id=None, backend_match_level=None, **filters): + """Get a service that matches the criteria. + + A possible filter is is_up=True and it will filter nodes that are down. + + :param service_id: Id of the service. + :param filters: Filters for the query in the form of key/value. + :param backend_match_level: 'pool', 'backend', or 'host' for host and + cluster filters (as defined in _filter_host + method) + :raise ServiceNotFound: If service doesn't exist. + """ + return IMPL.service_get(context, service_id, backend_match_level, + **filters) -def service_get_by_host_and_topic(context, host, topic): - """Get a service by host it's on and topic it listens to.""" - return IMPL.service_get_by_host_and_topic(context, host, topic) +def service_get_all(context, backend_match_level=None, **filters): + """Get all services that match the criteria. + A possible filter is is_up=True and it will filter nodes that are down. -def service_get_all(context, filters=None): - """Get all services.""" - return IMPL.service_get_all(context, filters) - - -def service_get_all_by_topic(context, topic, disabled=None): - """Get all services for a given topic.""" - return IMPL.service_get_all_by_topic(context, topic, disabled=disabled) - - -def service_get_all_by_binary(context, binary, disabled=None): - """Get all services for a given binary.""" - return IMPL.service_get_all_by_binary(context, binary, disabled) - - -def service_get_by_args(context, host, binary): - """Get the state of a service by node name and binary.""" - return IMPL.service_get_by_args(context, host, binary) + :param filters: Filters for the query in the form of key/value arguments. + :param backend_match_level: 'pool', 'backend', or 'host' for host and + cluster filters (as defined in _filter_host + method) + """ + return IMPL.service_get_all(context, backend_match_level, **filters) def service_create(context, values): @@ -138,7 +136,6 @@ def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. - """ return IMPL.service_update(context, service_id, values) @@ -146,6 +143,70 @@ def service_update(context, service_id, values): ############### +def cluster_get(context, id=None, is_up=None, get_services=False, + services_summary=False, read_deleted='no', + name_match_level=None, **filters): + """Get a cluster that matches the criteria. + + :param id: Id of the cluster. + :param is_up: Boolean value to filter based on the cluster's up status. + :param get_services: If we want to load all services from this cluster. + :param services_summary: If we want to load num_hosts and + num_down_hosts fields. + :param read_deleted: Filtering based on delete status. Default value is + "no". + :param name_match_level: 'pool', 'backend', or 'host' for name filter (as + defined in _filter_host method) + :param filters: Field based filters in the form of key/value. + :raise ClusterNotFound: If cluster doesn't exist. + """ + return IMPL.cluster_get(context, id, is_up, get_services, services_summary, + read_deleted, name_match_level, **filters) + + +def cluster_get_all(context, is_up=None, get_services=False, + services_summary=False, read_deleted='no', + name_match_level=None, **filters): + """Get all clusters that match the criteria. + + :param is_up: Boolean value to filter based on the cluster's up status. + :param get_services: If we want to load all services from this cluster. + :param services_summary: If we want to load num_hosts and + num_down_hosts fields. + :param read_deleted: Filtering based on delete status. Default value is + "no". + :param name_match_level: 'pool', 'backend', or 'host' for name filter (as + defined in _filter_host method) + :param filters: Field based filters in the form of key/value. + """ + return IMPL.cluster_get_all(context, is_up, get_services, services_summary, + read_deleted, name_match_level, **filters) + + +def cluster_create(context, values): + """Create a cluster from the values dictionary.""" + return IMPL.cluster_create(context, values) + + +def cluster_update(context, id, values): + """Set the given properties on an cluster and update it. + + Raises ClusterNotFound if cluster does not exist. + """ + return IMPL.cluster_update(context, id, values) + + +def cluster_destroy(context, id): + """Destroy the cluster or raise if it does not exist or has hosts. + + :raise ClusterNotFound: If cluster doesn't exist. + """ + return IMPL.cluster_destroy(context, id) + + +############### + + def volume_attach(context, values): """Attach a volume.""" return IMPL.volume_attach(context, values) @@ -208,6 +269,12 @@ def volume_get_all_by_group(context, group_id, filters=None): return IMPL.volume_get_all_by_group(context, group_id, filters=filters) +def volume_get_all_by_generic_group(context, group_id, filters=None): + """Get all volumes belonging to a generic volume group.""" + return IMPL.volume_get_all_by_generic_group(context, group_id, + filters=filters) + + def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): @@ -219,6 +286,16 @@ def volume_get_all_by_project(context, project_id, marker, limit, offset=offset) +def get_volume_summary_all(context): + """Get all volume summary.""" + return IMPL.get_volume_summary_all(context) + + +def get_volume_summary_by_project(context, project_id): + """Get all volume summary belonging to a project.""" + return IMPL.get_volume_summary_by_project(context, project_id) + + def volume_update(context, volume_id, values): """Set the given properties on a volume and update it. @@ -228,6 +305,33 @@ def volume_update(context, volume_id, values): return IMPL.volume_update(context, volume_id, values) +def volumes_update(context, values_list): + """Set the given properties on a list of volumes and update them. + + Raises NotFound if a volume does not exist. + """ + return IMPL.volumes_update(context, values_list) + + +def volume_include_in_cluster(context, cluster, partial_rename=True, + **filters): + """Include all volumes matching the filters into a cluster. + + When partial_rename is set we will not set the cluster_name with cluster + parameter value directly, we'll replace provided cluster_name or host + filter value with cluster instead. + + This is useful when we want to replace just the cluster name but leave + the backend and pool information as it is. If we are using cluster_name + to filter, we'll use that same DB field to replace the cluster value and + leave the rest as it is. Likewise if we use the host to filter. + + Returns the number of volumes that have been changed. + """ + return IMPL.volume_include_in_cluster(context, cluster, partial_rename, + **filters) + + def volume_attachment_update(context, attachment_id, values): return IMPL.volume_attachment_update(context, attachment_id, values) @@ -268,10 +372,6 @@ def volume_has_attachments_filter(): return IMPL.volume_has_attachments_filter() -def volume_has_same_encryption_type(new_vol_type): - return IMPL.volume_has_same_encryption_type(new_vol_type) - - def volume_qos_allows_retype(new_vol_type): return IMPL.volume_qos_allows_retype(new_vol_type) @@ -324,6 +424,11 @@ def snapshot_get_all_for_cgsnapshot(context, project_id): return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id) +def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): + """Get all snapshots belonging to a group snapshot.""" + return IMPL.snapshot_get_all_for_group_snapshot(context, group_snapshot_id) + + def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) @@ -541,6 +646,99 @@ def volume_type_access_remove(context, type_id, project_id): #################### +def group_type_create(context, values, projects=None): + """Create a new group type.""" + return IMPL.group_type_create(context, values, projects) + + +def group_type_update(context, group_type_id, values): + return IMPL.group_type_update(context, group_type_id, values) + + +def group_type_get_all(context, inactive=False, filters=None, marker=None, + limit=None, sort_keys=None, sort_dirs=None, + offset=None, list_result=False): + """Get all group types. + + :param context: context to query under + :param inactive: Include inactive group types to the result set + :param filters: Filters for the query in the form of key/value. + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys + :param list_result: For compatibility, if list_result = True, return a list + instead of dict. + + :is_public: Filter group types based on visibility: + + * **True**: List public group types only + * **False**: List private group types only + * **None**: List both public and private group types + + :returns: list/dict of matching group types + """ + + return IMPL.group_type_get_all(context, inactive, filters, marker=marker, + limit=limit, sort_keys=sort_keys, + sort_dirs=sort_dirs, offset=offset, + list_result=list_result) + + +def group_type_get(context, id, inactive=False, expected_fields=None): + """Get group type by id. + + :param context: context to query under + :param id: Group type id to get. + :param inactive: Consider inactive group types when searching + :param expected_fields: Return those additional fields. + Supported fields are: projects. + :returns: group type + """ + return IMPL.group_type_get(context, id, inactive, expected_fields) + + +def group_type_get_by_name(context, name): + """Get group type by name.""" + return IMPL.group_type_get_by_name(context, name) + + +def group_types_get_by_name_or_id(context, group_type_list): + """Get group types by name or id.""" + return IMPL.group_types_get_by_name_or_id(context, group_type_list) + + +def group_type_destroy(context, id): + """Delete a group type.""" + return IMPL.group_type_destroy(context, id) + + +def group_type_access_get_all(context, type_id): + """Get all group type access of a group type.""" + return IMPL.group_type_access_get_all(context, type_id) + + +def group_type_access_add(context, type_id, project_id): + """Add group type access for project.""" + return IMPL.group_type_access_add(context, type_id, project_id) + + +def group_type_access_remove(context, type_id, project_id): + """Remove group type access for project.""" + return IMPL.group_type_access_remove(context, type_id, project_id) + + +def volume_type_get_all_by_group(context, group_id): + """Get all volumes in a group.""" + return IMPL.volume_type_get_all_by_group(context, group_id) + + +#################### + + def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) @@ -567,6 +765,32 @@ def volume_type_extra_specs_update_or_create(context, ################### +def group_type_specs_get(context, group_type_id): + """Get all group specs for a group type.""" + return IMPL.group_type_specs_get(context, group_type_id) + + +def group_type_specs_delete(context, group_type_id, key): + """Delete the given group specs item.""" + return IMPL.group_type_specs_delete(context, group_type_id, key) + + +def group_type_specs_update_or_create(context, + group_type_id, + group_specs): + """Create or update group type specs. + + This adds or modifies the key/value pairs specified in the group specs dict + argument. + """ + return IMPL.group_type_specs_update_or_create(context, + group_type_id, + group_specs) + + +################### + + def volume_type_encryption_get(context, volume_type_id, session=None): return IMPL.volume_type_encryption_get(context, volume_type_id, session) @@ -1054,6 +1278,109 @@ def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): return IMPL.cg_creating_from_src(cg_id, cgsnapshot_id) +def consistencygroup_include_in_cluster(context, cluster, partial_rename=True, + **filters): + """Include all consistency groups matching the filters into a cluster. + + When partial_rename is set we will not set the cluster_name with cluster + parameter value directly, we'll replace provided cluster_name or host + filter value with cluster instead. + + This is useful when we want to replace just the cluster name but leave + the backend and pool information as it is. If we are using cluster_name + to filter, we'll use that same DB field to replace the cluster value and + leave the rest as it is. Likewise if we use the host to filter. + + Returns the number of consistency groups that have been changed. + """ + return IMPL.consistencygroup_include_in_cluster(context, cluster, + partial_rename, + **filters) + + +################### + + +def group_get(context, group_id): + """Get a group or raise if it does not exist.""" + return IMPL.group_get(context, group_id) + + +def group_get_all(context, filters=None, marker=None, limit=None, + offset=None, sort_keys=None, sort_dirs=None): + """Get all groups.""" + return IMPL.group_get_all(context, filters=filters, + marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, + sort_dirs=sort_dirs) + + +def group_create(context, values, group_snapshot_id=None, group_id=None): + """Create a group from the values dictionary.""" + return IMPL.group_create(context, values, group_snapshot_id, group_id) + + +def group_get_all_by_project(context, project_id, filters=None, + marker=None, limit=None, offset=None, + sort_keys=None, sort_dirs=None): + """Get all groups belonging to a project.""" + return IMPL.group_get_all_by_project(context, project_id, + filters=filters, + marker=marker, limit=limit, + offset=offset, + sort_keys=sort_keys, + sort_dirs=sort_dirs) + + +def group_update(context, group_id, values): + """Set the given properties on a group and update it. + + Raises NotFound if group does not exist. + """ + return IMPL.group_update(context, group_id, values) + + +def group_destroy(context, group_id): + """Destroy the group or raise if it does not exist.""" + return IMPL.group_destroy(context, group_id) + + +def group_has_group_snapshot_filter(): + """Return a filter that checks if a Group has Group Snapshots.""" + return IMPL.group_has_group_snapshot_filter() + + +def group_has_volumes_filter(attached_or_with_snapshots=False): + """Return a filter to check if a Group has volumes. + + When attached_or_with_snapshots parameter is given a True value only + attached volumes or those with snapshots will be considered. + """ + return IMPL.group_has_volumes_filter(attached_or_with_snapshots) + + +def group_creating_from_src(group_id=None, group_snapshot_id=None): + """Return a filter to check if a Group is being used as creation source. + + Returned filter is meant to be used in the Conditional Update mechanism and + checks if provided Group ID or Group Snapshot ID is currently being used to + create another Group. + + This filter will not include Groups that have used the ID but have already + finished their creation (status is no longer creating). + + Filter uses a subquery that allows it to be used on updates to the + groups table. + """ + return IMPL.group_creating_from_src(group_id, group_snapshot_id) + + +def group_volume_type_mapping_create(context, group_id, volume_type_id): + """Create a group volume_type mapping entry.""" + return IMPL.group_volume_type_mapping_create(context, group_id, + volume_type_id) + + ################### @@ -1103,6 +1430,52 @@ def cgsnapshot_creating_from_src(): ################### +def group_snapshot_get(context, group_snapshot_id): + """Get a group snapshot or raise if it does not exist.""" + return IMPL.group_snapshot_get(context, group_snapshot_id) + + +def group_snapshot_get_all(context, filters=None): + """Get all group snapshots.""" + return IMPL.group_snapshot_get_all(context, filters) + + +def group_snapshot_create(context, values): + """Create a group snapshot from the values dictionary.""" + return IMPL.group_snapshot_create(context, values) + + +def group_snapshot_get_all_by_group(context, group_id, filters=None): + """Get all group snapshots belonging to a group.""" + return IMPL.group_snapshot_get_all_by_group(context, group_id, filters) + + +def group_snapshot_get_all_by_project(context, project_id, filters=None): + """Get all group snapshots belonging to a project.""" + return IMPL.group_snapshot_get_all_by_project(context, project_id, filters) + + +def group_snapshot_update(context, group_snapshot_id, values): + """Set the given properties on a group snapshot and update it. + + Raises NotFound if group snapshot does not exist. + """ + return IMPL.group_snapshot_update(context, group_snapshot_id, values) + + +def group_snapshot_destroy(context, group_snapshot_id): + """Destroy the group snapshot or raise if it does not exist.""" + return IMPL.group_snapshot_destroy(context, group_snapshot_id) + + +def group_snapshot_creating_from_src(): + """Get a filter to check if a grp snapshot is being created from a grp.""" + return IMPL.group_snapshot_creating_from_src() + + +################### + + def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than given age from cinder tables @@ -1204,6 +1577,41 @@ def message_destroy(context, message_id): ################### +def worker_create(context, **values): + """Create a worker entry from optional arguments.""" + return IMPL.worker_create(context, **values) + + +def worker_get(context, **filters): + """Get a worker or raise exception if it does not exist.""" + return IMPL.worker_get(context, **filters) + + +def worker_get_all(context, until=None, db_filters=None, **filters): + """Get all workers that match given criteria.""" + return IMPL.worker_get_all(context, until=until, db_filters=db_filters, + **filters) + + +def worker_update(context, id, filters=None, orm_worker=None, **values): + """Update a worker with given values.""" + return IMPL.worker_update(context, id, filters=filters, + orm_worker=orm_worker, **values) + + +def worker_claim_for_cleanup(context, claimer_id, orm_worker): + """Soft delete a worker, change the service_id and update the worker.""" + return IMPL.worker_claim_for_cleanup(context, claimer_id, orm_worker) + + +def worker_destroy(context, **filters): + """Delete a worker (no soft delete).""" + return IMPL.worker_destroy(context, **filters) + + +################### + + def resource_exists(context, model, resource_id): return IMPL.resource_exists(context, model, resource_id) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index caecf480d..15eeb31bb 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -42,8 +42,7 @@ import six import sqlalchemy from sqlalchemy import MetaData from sqlalchemy import or_, and_, case -from sqlalchemy.orm import aliased -from sqlalchemy.orm import joinedload, joinedload_all +from sqlalchemy.orm import joinedload, joinedload_all, undefer_group from sqlalchemy.orm import RelationshipProperty from sqlalchemy.schema import Table from sqlalchemy import sql @@ -61,6 +60,7 @@ from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _, _LW, _LE, _LI from cinder.objects import fields +from cinder import utils CONF = cfg.CONF @@ -341,6 +341,15 @@ def _sync_consistencygroups(context, project_id, session, return {key: groups} +def _sync_groups(context, project_id, session, + volume_type_id=None, + volume_type_name=None): + (_junk, groups) = _group_data_get_for_project( + context, project_id, session=session) + key = 'groups' + return {key: groups} + + def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): key = 'backup_gigabytes' @@ -355,112 +364,144 @@ QUOTA_SYNC_FUNCTIONS = { '_sync_gigabytes': _sync_gigabytes, '_sync_consistencygroups': _sync_consistencygroups, '_sync_backups': _sync_backups, - '_sync_backup_gigabytes': _sync_backup_gigabytes + '_sync_backup_gigabytes': _sync_backup_gigabytes, + '_sync_groups': _sync_groups, } ################### -@require_admin_context -def service_destroy(context, service_id): - session = get_session() - with session.begin(): - service_ref = _service_get(context, service_id, session=session) - service_ref.delete(session=session) +def _clean_filters(filters): + return {k: v for k, v in filters.items() if v is not None} -@require_admin_context -def _service_get(context, service_id, session=None): - result = model_query( - context, - models.Service, - session=session).\ - filter_by(id=service_id).\ - first() - if not result: - raise exception.ServiceNotFound(service_id=service_id) +def _filter_host(field, value, match_level=None): + """Generate a filter condition for host and cluster fields. - return result + Levels are: + - 'pool': Will search for an exact match + - 'backend': Will search for exact match and value#* + - 'host'; Will search for exact match, value@* and value#* + + If no level is provided we'll determine it based on the value we want to + match: + - 'pool': If '#' is present in value + - 'backend': If '@' is present in value and '#' is not present + - 'host': In any other case + + :param field: ORM field. Ex: objects.Volume.model.host + :param value: String to compare with + :param match_level: 'pool', 'backend', or 'host' + """ + # If we don't set level we'll try to determine it automatically. LIKE + # operations are expensive, so we try to reduce them to the minimum. + if match_level is None: + if '#' in value: + match_level = 'pool' + elif '@' in value: + match_level = 'backend' + else: + match_level = 'host' + + # Mysql is not doing case sensitive filtering, so we force it + conn_str = CONF.database.connection + if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']: + cmp_value = func.binary(value) + like_op = 'LIKE BINARY' + else: + cmp_value = value + like_op = 'LIKE' + + conditions = [field == cmp_value] + if match_level != 'pool': + conditions.append(field.op(like_op)(value + '#%')) + if match_level == 'host': + conditions.append(field.op(like_op)(value + '@%')) + + return or_(*conditions) -@require_admin_context -def service_get(context, service_id): - return _service_get(context, service_id) - - -@require_admin_context -def service_get_all(context, filters=None): +def _service_query(context, session=None, read_deleted='no', host=None, + cluster_name=None, is_up=None, backend_match_level=None, + **filters): + filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Service, filters): - return [] + return None - query = model_query(context, models.Service) + query = model_query(context, models.Service, session=session, + read_deleted=read_deleted) + + # Host and cluster are particular cases of filters, because we must + # retrieve not only exact matches (single backend configuration), but also + # match those that have the backend defined (multi backend configuration). + if host: + query = query.filter(_filter_host(models.Service.host, host, + backend_match_level)) + if cluster_name: + query = query.filter(_filter_host(models.Service.cluster_name, + cluster_name, backend_match_level)) if filters: - try: - host = filters.pop('host') - host_attr = models.Service.host - conditions = or_(host_attr == - host, host_attr.op('LIKE')(host + '@%')) - query = query.filter(conditions) - except KeyError: - pass - query = query.filter_by(**filters) - return query.all() + if is_up is not None: + date_limit = utils.service_expired_time() + svc = models.Service + filter_ = or_( + and_(svc.created_at.isnot(None), svc.created_at >= date_limit), + and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit)) + query = query.filter(filter_ == is_up) + + return query @require_admin_context -def service_get_all_by_topic(context, topic, disabled=None): - query = model_query( - context, models.Service, read_deleted="no").\ - filter_by(topic=topic) - - if disabled is not None: - query = query.filter_by(disabled=disabled) - - return query.all() +def service_destroy(context, service_id): + query = _service_query(context, id=service_id) + updated_values = models.Service.delete_values() + if not query.update(updated_values): + raise exception.ServiceNotFound(service_id=service_id) + return updated_values @require_admin_context -def service_get_all_by_binary(context, binary, disabled=None): - query = model_query( - context, models.Service, read_deleted="no").filter_by(binary=binary) +def service_get(context, service_id=None, backend_match_level=None, **filters): + """Get a service that matches the criteria. - if disabled is not None: - query = query.filter_by(disabled=disabled) + A possible filter is is_up=True and it will filter nodes that are down. - return query.all() + :param service_id: Id of the service. + :param filters: Filters for the query in the form of key/value. + :param backend_match_level: 'pool', 'backend', or 'host' for host and + cluster filters (as defined in _filter_host + method) + :raise ServiceNotFound: If service doesn't exist. + """ + query = _service_query(context, backend_match_level=backend_match_level, + id=service_id, **filters) + service = None if not query else query.first() + if not service: + serv_id = service_id or filters.get('topic') or filters.get('binary') + raise exception.ServiceNotFound(service_id=serv_id, + host=filters.get('host')) + return service @require_admin_context -def service_get_by_host_and_topic(context, host, topic): - result = model_query( - context, models.Service, read_deleted="no").\ - filter_by(disabled=False).\ - filter_by(host=host).\ - filter_by(topic=topic).\ - first() - if not result: - raise exception.ServiceNotFound(service_id=topic, - host=host) - return result +def service_get_all(context, backend_match_level=None, **filters): + """Get all services that match the criteria. + A possible filter is is_up=True and it will filter nodes that are down. -@require_admin_context -def service_get_by_args(context, host, binary): - results = model_query(context, models.Service).\ - filter_by(host=host).\ - filter_by(binary=binary).\ - all() - - for result in results: - if host == result['host']: - return result - - raise exception.ServiceNotFound(service_id=binary, - host=host) + :param filters: Filters for the query in the form of key/value. + :param backend_match_level: 'pool', 'backend', or 'host' for host and + cluster filters (as defined in _filter_host + method) + """ + query = _service_query(context, backend_match_level=backend_match_level, + **filters) + return [] if not query else query.all() @require_admin_context @@ -479,14 +520,158 @@ def service_create(context, values): @require_admin_context @_retry_on_deadlock def service_update(context, service_id, values): + if 'disabled' in values: + values = values.copy() + values['modified_at'] = values.get('modified_at', timeutils.utcnow()) + values['updated_at'] = values.get('updated_at', + literal_column('updated_at')) + query = _service_query(context, id=service_id) + result = query.update(values) + if not result: + raise exception.ServiceNotFound(service_id=service_id) + + +################### + +def _cluster_query(context, is_up=None, get_services=False, + services_summary=False, read_deleted='no', + name_match_level=None, name=None, session=None, **filters): + filters = _clean_filters(filters) + if filters and not is_valid_model_filters(models.Cluster, filters): + return None + + query = model_query(context, models.Cluster, session=session, + read_deleted=read_deleted) + + # Cluster is a special case of filter, because we must match exact match + # as well as hosts that specify the backend + if name: + query = query.filter(_filter_host(models.Cluster.name, name, + name_match_level)) + + if filters: + query = query.filter_by(**filters) + + if services_summary: + query = query.options(undefer_group('services_summary')) + # We bind the expiration time to now (as it changes with each query) + # and is required by num_down_hosts + query = query.params(expired=utils.service_expired_time()) + elif 'num_down_hosts' in filters: + query = query.params(expired=utils.service_expired_time()) + + if get_services: + query = query.options(joinedload_all('services')) + + if is_up is not None: + date_limit = utils.service_expired_time() + filter_ = and_(models.Cluster.last_heartbeat.isnot(None), + models.Cluster.last_heartbeat >= date_limit) + query = query.filter(filter_ == is_up) + + return query + + +@require_admin_context +def cluster_get(context, id=None, is_up=None, get_services=False, + services_summary=False, read_deleted='no', + name_match_level=None, **filters): + """Get a cluster that matches the criteria. + + :param id: Id of the cluster. + :param is_up: Boolean value to filter based on the cluster's up status. + :param get_services: If we want to load all services from this cluster. + :param services_summary: If we want to load num_hosts and + num_down_hosts fields. + :param read_deleted: Filtering based on delete status. Default value is + "no". + :param filters: Field based filters in the form of key/value. + :param name_match_level: 'pool', 'backend', or 'host' for name filter (as + defined in _filter_host method) + :raise ClusterNotFound: If cluster doesn't exist. + """ + query = _cluster_query(context, is_up, get_services, services_summary, + read_deleted, name_match_level, id=id, **filters) + cluster = None if not query else query.first() + if not cluster: + cluster_id = id or six.text_type(filters) + raise exception.ClusterNotFound(id=cluster_id) + return cluster + + +@require_admin_context +def cluster_get_all(context, is_up=None, get_services=False, + services_summary=False, read_deleted='no', + name_match_level=None, **filters): + """Get all clusters that match the criteria. + + :param is_up: Boolean value to filter based on the cluster's up status. + :param get_services: If we want to load all services from this cluster. + :param services_summary: If we want to load num_hosts and + num_down_hosts fields. + :param read_deleted: Filtering based on delete status. Default value is + "no". + :param name_match_level: 'pool', 'backend', or 'host' for name filter (as + defined in _filter_host method) + :param filters: Field based filters in the form of key/value. + """ + query = _cluster_query(context, is_up, get_services, services_summary, + read_deleted, name_match_level, **filters) + return [] if not query else query.all() + + +@require_admin_context +def cluster_create(context, values): + """Create a cluster from the values dictionary.""" + cluster_ref = models.Cluster() + cluster_ref.update(values) + # Provided disabled value takes precedence + if values.get('disabled') is None: + cluster_ref.disabled = not CONF.enable_new_services + session = get_session() - with session.begin(): - service_ref = _service_get(context, service_id, session=session) - if ('disabled' in values): - service_ref['modified_at'] = timeutils.utcnow() - service_ref['updated_at'] = literal_column('updated_at') - service_ref.update(values) - return service_ref + try: + with session.begin(): + cluster_ref.save(session) + # We mark that newly created cluster has no hosts to prevent + # problems at the OVO level + cluster_ref.last_heartbeat = None + return cluster_ref + # If we had a race condition (another non deleted cluster exists with the + # same name) raise Duplicate exception. + except db_exc.DBDuplicateEntry: + raise exception.ClusterExists(name=values.get('name')) + + +@require_admin_context +@_retry_on_deadlock +def cluster_update(context, id, values): + """Set the given properties on an cluster and update it. + + Raises ClusterNotFound if cluster does not exist. + """ + query = _cluster_query(context, id=id) + result = query.update(values) + if not result: + raise exception.ClusterNotFound(id=id) + + +@require_admin_context +def cluster_destroy(context, id): + """Destroy the cluster or raise if it does not exist or has hosts.""" + query = _cluster_query(context, id=id) + query = query.filter(models.Cluster.num_hosts == 0) + # If the update doesn't succeed we don't know if it's because the + # cluster doesn't exist or because it has hosts. + result = query.update(models.Cluster.delete_values(), + synchronize_session=False) + + if not result: + # This will fail if the cluster doesn't exist raising the right + # exception + cluster_get(context, id=id) + # If it doesn't fail, then the problem is that there are hosts + raise exception.ClusterHasHosts(id=id) ################### @@ -534,6 +719,37 @@ def _dict_with_extra_specs_if_authorized(context, inst_type_query): ################### +def _dict_with_group_specs_if_authorized(context, inst_type_query): + """Convert group type query result to dict with spec and rate_limit. + + Takes a group type query returned by sqlalchemy and returns it + as a dictionary, converting the extra_specs entry from a list + of dicts. NOTE the contents of extra-specs are admin readable + only. If the context passed in for this request is not admin + then we will return an empty extra-specs dict rather than + providing the admin only details. + + Example response with admin context: + + 'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + to a single dict: + 'group_specs' : {'k1': 'v1'} + + """ + + inst_type_dict = dict(inst_type_query) + if not is_admin_context(context): + del(inst_type_dict['group_specs']) + else: + group_specs = {x['key']: x['value'] + for x in inst_type_query['group_specs']} + inst_type_dict['group_specs'] = group_specs + return inst_type_dict + + +################### + + @require_context def _quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, @@ -634,7 +850,7 @@ def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) - quota_ref.delete(session=session) + return quota_ref.delete(session=session) ################### @@ -737,7 +953,7 @@ def quota_class_destroy(context, class_name, resource): with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) - quota_class_ref.delete(session=session) + return quota_class_ref.delete(session=session) @require_admin_context @@ -1307,13 +1523,14 @@ def volume_destroy(context, volume_id): session = get_session() now = timeutils.utcnow() with session.begin(): + updated_values = {'status': 'deleted', + 'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at'), + 'migration_status': None} model_query(context, models.Volume, session=session).\ filter_by(id=volume_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at'), - 'migration_status': None}) + update(updated_values) model_query(context, models.VolumeMetadata, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, @@ -1329,6 +1546,53 @@ def volume_destroy(context, volume_id): update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) + del updated_values['updated_at'] + return updated_values + + +def _include_in_cluster(context, cluster, model, partial_rename, filters): + """Generic include in cluster method. + + When we include resources in a cluster we have to be careful to preserve + the addressing sections that have not been provided. That's why we allow + partial_renaming, so we can preserve the backend and pool if we are only + providing host/cluster level information, and preserve pool information if + we only provide backend level information. + + For example when we include a host in a cluster we receive calls with + filters like {'host': 'localhost@lvmdriver-1'} and cluster with something + like 'mycluster@lvmdriver-1'. Since in the DB the resources will have the + host field set to something like 'localhost@lvmdriver-1#lvmdriver-1' we + want to include original pool in the new cluster_name. So we want to store + in cluster_name value 'mycluster@lvmdriver-1#lvmdriver-1'. + """ + filters = _clean_filters(filters) + if filters and not is_valid_model_filters(model, filters): + return None + query = model_query(context, model) + + # cluster_name and host are special filter cases + for field in {'cluster_name', 'host'}.intersection(filters): + value = filters.pop(field) + # We do a special backend filter + query = query.filter(_filter_host(getattr(model, field), value)) + # If we want do do a partial rename and we haven't set the cluster + # already, the value we want to set is a SQL replace of existing field + # value. + if partial_rename and isinstance(cluster, six.string_types): + cluster = func.replace(getattr(model, field), value, cluster) + + query = query.filter_by(**filters) + result = query.update({'cluster_name': cluster}, synchronize_session=False) + return result + + +@require_admin_context +def volume_include_in_cluster(context, cluster, partial_rename=True, + **filters): + """Include all volumes matching the filters into a cluster.""" + return _include_in_cluster(context, cluster, models.Volume, + partial_rename, filters) @require_admin_context @@ -1408,14 +1672,16 @@ def _volume_get_query(context, session=None, project_only=False, options(joinedload('volume_admin_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ - options(joinedload('consistencygroup')) + options(joinedload('consistencygroup')).\ + options(joinedload('group')) else: return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ - options(joinedload('consistencygroup')) + options(joinedload('consistencygroup')).\ + options(joinedload('group')) @require_context @@ -1521,6 +1787,23 @@ def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, return query.all() +@require_admin_context +def get_volume_summary_all(context): + """Retrieves all volumes summary. + + :param context: context to query under + :returns: volume summary of all projects + """ + query = model_query(context, func.count(models.Volume.id), + func.sum(models.Volume.size), read_deleted="no") + + if query is None: + return [] + + result = query.first() + return (result[0] or 0, result[1] or 0) + + @require_admin_context def volume_get_all_by_host(context, host, filters=None): """Retrieves all volumes hosted on a host. @@ -1561,7 +1844,7 @@ def volume_get_all_by_group(context, group_id, filters=None): """Retrieves all volumes associated with the group_id. :param context: context to query under - :param group_id: group ID for all volumes being retrieved + :param group_id: consistency group ID for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters @@ -1577,6 +1860,27 @@ def volume_get_all_by_group(context, group_id, filters=None): return query.all() +@require_context +def volume_get_all_by_generic_group(context, group_id, filters=None): + """Retrieves all volumes associated with the group_id. + + :param context: context to query under + :param group_id: group ID for all volumes being retrieved + :param filters: dictionary of filters; values that are in lists, tuples, + or sets cause an 'IN' operation, while exact matching + is used for other values, see _process_volume_filters + function for more information + :returns: list of matching volumes + """ + query = _volume_get_query(context).filter_by(group_id=group_id) + if filters: + query = _process_volume_filters(query, filters) + # No volumes would match, return empty list + if query is None: + return [] + return query.all() + + @require_context def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, @@ -1823,6 +2127,25 @@ def process_sort_params(sort_keys, sort_dirs, default_keys=None, return result_keys, result_dirs +@require_context +def get_volume_summary_by_project(context, project_id): + """Retrieves all volumes summary in a project. + + :param context: context to query under + :param project_id: project for all volumes being retrieved + :returns: volume summary of a project + """ + query = model_query(context, func.count(models.Volume.id), + func.sum(models.Volume.size), read_deleted="no").\ + filter_by(project_id=project_id) + + if query is None: + return [] + + result = query.first() + return (result[0] or 0, result[1] or 0) + + @handle_db_data_error @require_context def volume_update(context, volume_id, values): @@ -1850,6 +2173,38 @@ def volume_update(context, volume_id, values): return volume_ref +@handle_db_data_error +@require_context +def volumes_update(context, values_list): + session = get_session() + with session.begin(): + volume_refs = [] + for values in values_list: + volume_id = values['id'] + values.pop('id') + metadata = values.get('metadata') + if metadata is not None: + _volume_user_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True, + session=session) + + admin_metadata = values.get('admin_metadata') + if is_admin_context(context) and admin_metadata is not None: + _volume_admin_metadata_update(context, + volume_id, + values.pop('admin_metadata'), + delete=True, + session=session) + + volume_ref = _volume_get(context, volume_id, session=session) + volume_ref.update(values) + volume_refs.append(volume_ref) + + return volume_refs + + @require_context def volume_attachment_update(context, attachment_id, values): session = get_session() @@ -1898,6 +2253,8 @@ def volume_has_undeletable_snapshots_filter(): and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None + models.Snapshot.status.notin_(deletable_statuses)), + or_(models.Snapshot.group_snapshot_id != None, # noqa: != None models.Snapshot.status.notin_(deletable_statuses)))) @@ -1908,29 +2265,6 @@ def volume_has_attachments_filter(): ~models.VolumeAttachment.deleted)) -def volume_has_same_encryption_type(new_vol_type): - """Filter to check that encryption matches with new volume type. - - They match if both don't have encryption or both have the same Encryption. - """ - # Query for the encryption in the new volume type - encryption_alias = aliased(models.Encryption) - new_enc = sql.select((encryption_alias.encryption_id,)).where(and_( - ~encryption_alias.deleted, - encryption_alias.volume_type_id == new_vol_type)) - - # Query for the encryption in the old volume type - old_enc = sql.select((models.Encryption.encryption_id,)).where(and_( - ~models.Encryption.deleted, - models.Encryption.volume_type_id == models.Volume.volume_type_id)) - - # NOTE(geguileo): This query is optimizable, but at this moment I can't - # figure out how. - return or_(and_(new_enc.as_scalar().is_(None), - old_enc.as_scalar().is_(None)), - new_enc.as_scalar() == old_enc.as_scalar()) - - def volume_qos_allows_retype(new_vol_type): """Filter to check that qos allows retyping the volume to new_vol_type. @@ -2228,19 +2562,23 @@ def snapshot_create(context, values): @require_admin_context @_retry_on_deadlock def snapshot_destroy(context, snapshot_id): + utcnow = timeutils.utcnow() session = get_session() with session.begin(): + updated_values = {'status': 'deleted', + 'deleted': True, + 'deleted_at': utcnow, + 'updated_at': literal_column('updated_at')} model_query(context, models.Snapshot, session=session).\ filter_by(id=snapshot_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + update(updated_values) model_query(context, models.SnapshotMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, - 'deleted_at': timeutils.utcnow(), + 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) + del updated_values['updated_at'] + return updated_values @require_context @@ -2361,6 +2699,16 @@ def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): all() +@require_context +def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): + return model_query(context, models.Snapshot, read_deleted='no', + project_only=True).\ + filter_by(group_snapshot_id=group_snapshot_id).\ + options(joinedload('volume')).\ + options(joinedload('snapshot_metadata')).\ + all() + + @require_context def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, @@ -2592,6 +2940,48 @@ def volume_type_create(context, values, projects=None): return volume_type_ref +@handle_db_data_error +@require_admin_context +def group_type_create(context, values, projects=None): + """Create a new group type. + + In order to pass in group specs, the values dict should contain a + 'group_specs' key/value pair: + {'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + """ + if not values.get('id'): + values['id'] = six.text_type(uuid.uuid4()) + + projects = projects or [] + + session = get_session() + with session.begin(): + try: + _group_type_get_by_name(context, values['name'], session) + raise exception.GroupTypeExists(id=values['name']) + except exception.GroupTypeNotFoundByName: + pass + try: + _group_type_get(context, values['id'], session) + raise exception.GroupTypeExists(id=values['id']) + except exception.GroupTypeNotFound: + pass + try: + values['group_specs'] = _metadata_refs(values.get('group_specs'), + models.GroupTypeSpecs) + group_type_ref = models.GroupTypes() + group_type_ref.update(values) + session.add(group_type_ref) + except Exception as e: + raise db_exc.DBError(e) + for project in set(projects): + access_ref = models.GroupTypeProjects() + access_ref.update({"group_type_id": group_type_ref.id, + "project_id": project}) + access_ref.save(session=session) + return group_type_ref + + def _volume_type_get_query(context, session=None, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] @@ -2615,6 +3005,29 @@ def _volume_type_get_query(context, session=None, read_deleted='no', return query +def _group_type_get_query(context, session=None, read_deleted='no', + expected_fields=None): + expected_fields = expected_fields or [] + query = model_query(context, + models.GroupTypes, + session=session, + read_deleted=read_deleted).\ + options(joinedload('group_specs')) + + if 'projects' in expected_fields: + query = query.options(joinedload('projects')) + + if not context.is_admin: + the_filter = [models.GroupTypes.is_public == true()] + projects_attr = models.GroupTypes.projects + the_filter.extend([ + projects_attr.any(project_id=context.project_id) + ]) + query = query.filter(or_(*the_filter)) + + return query + + def _process_volume_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: @@ -2650,6 +3063,41 @@ def _process_volume_types_filters(query, filters): return query +def _process_group_types_filters(query, filters): + context = filters.pop('context', None) + if 'is_public' in filters and filters['is_public'] is not None: + the_filter = [models.GroupTypes.is_public == filters['is_public']] + if filters['is_public'] and context.project_id is not None: + projects_attr = getattr(models.GroupTypes, 'projects') + the_filter.extend([ + projects_attr.any(project_id=context.project_id, deleted=0) + ]) + if len(the_filter) > 1: + query = query.filter(or_(*the_filter)) + else: + query = query.filter(the_filter[0]) + if 'is_public' in filters: + del filters['is_public'] + if filters: + # Ensure that filters' keys exist on the model + if not is_valid_model_filters(models.GroupTypes, filters): + return + if filters.get('group_specs') is not None: + the_filter = [] + searchdict = filters.get('group_specs') + group_specs = getattr(models.GroupTypes, 'group_specs') + for k, v in searchdict.items(): + the_filter.extend([group_specs.any(key=k, value=v, + deleted=False)]) + if len(the_filter) > 1: + query = query.filter(and_(*the_filter)) + else: + query = query.filter(the_filter[0]) + del filters['group_specs'] + query = query.filter_by(**filters) + return query + + @handle_db_data_error @require_admin_context def volume_type_update(context, volume_type_id, values): @@ -2694,6 +3142,50 @@ def volume_type_update(context, volume_type_id, values): return volume_type_ref +@handle_db_data_error +@require_admin_context +def group_type_update(context, group_type_id, values): + session = get_session() + with session.begin(): + # Check it exists + group_type_ref = _group_type_ref_get(context, + group_type_id, + session) + if not group_type_ref: + raise exception.GroupTypeNotFound(type_id=group_type_id) + + # No description change + if values['description'] is None: + del values['description'] + + # No is_public change + if values['is_public'] is None: + del values['is_public'] + + # No name change + if values['name'] is None: + del values['name'] + else: + # Group type name is unique. If change to a name that belongs to + # a different group_type , it should be prevented. + check_grp_type = None + try: + check_grp_type = \ + _group_type_get_by_name(context, + values['name'], + session=session) + except exception.GroupTypeNotFoundByName: + pass + else: + if check_grp_type.get('id') != group_type_id: + raise exception.GroupTypeExists(id=values['name']) + + group_type_ref.update(values) + group_type_ref.save(session=session) + + return group_type_ref + + @require_context def volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, @@ -2746,6 +3238,58 @@ def volume_type_get_all(context, inactive=False, filters=None, marker=None, return result +@require_context +def group_type_get_all(context, inactive=False, filters=None, marker=None, + limit=None, sort_keys=None, sort_dirs=None, + offset=None, list_result=False): + """Returns a dict describing all group_types with name as key. + + If no sort parameters are specified then the returned group types are + sorted first by the 'created_at' key and then by the 'id' key in descending + order. + + :param context: context to query under + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys + :param filters: dictionary of filters; values that are in lists, tuples, + or sets cause an 'IN' operation, while exact matching + is used for other values, see _process_volume_type_filters + function for more information + :param list_result: For compatibility, if list_result = True, return a list + instead of dict. + :returns: list/dict of matching group types + """ + session = get_session() + with session.begin(): + # Add context for _process_group_types_filters + filters = filters or {} + filters['context'] = context + # Generate the query + query = _generate_paginate_query(context, session, marker, limit, + sort_keys, sort_dirs, filters, offset, + models.GroupTypes) + # No group types would match, return empty dict or list + if query is None: + if list_result: + return [] + return {} + + rows = query.all() + if list_result: + result = [_dict_with_group_specs_if_authorized(context, row) + for row in rows] + return result + result = {row['name']: _dict_with_group_specs_if_authorized(context, + row) + for row in rows} + return result + + def _volume_type_get_id_from_volume_type_query(context, id, session=None): return model_query( context, models.VolumeTypes.id, read_deleted="no", @@ -2753,6 +3297,13 @@ def _volume_type_get_id_from_volume_type_query(context, id, session=None): filter_by(id=id) +def _group_type_get_id_from_group_type_query(context, id, session=None): + return model_query( + context, models.GroupTypes.id, read_deleted="no", + session=session, base_model=models.GroupTypes).\ + filter_by(id=id) + + def _volume_type_get_id_from_volume_type(context, id, session=None): result = _volume_type_get_id_from_volume_type_query( context, id, session=session).first() @@ -2761,6 +3312,14 @@ def _volume_type_get_id_from_volume_type(context, id, session=None): return result[0] +def _group_type_get_id_from_group_type(context, id, session=None): + result = _group_type_get_id_from_group_type_query( + context, id, session=session).first() + if not result: + raise exception.GroupTypeNotFound(group_type_id=id) + return result[0] + + def _volume_type_get_db_object(context, id, session=None, inactive=False, expected_fields=None): read_deleted = "yes" if inactive else "no" @@ -2771,6 +3330,16 @@ def _volume_type_get_db_object(context, id, session=None, inactive=False, return result +def _group_type_get_db_object(context, id, session=None, inactive=False, + expected_fields=None): + read_deleted = "yes" if inactive else "no" + result = _group_type_get_query( + context, session, read_deleted, expected_fields).\ + filter_by(id=id).\ + first() + return result + + @require_context def _volume_type_get(context, id, session=None, inactive=False, expected_fields=None): @@ -2788,6 +3357,23 @@ def _volume_type_get(context, id, session=None, inactive=False, return vtype +@require_context +def _group_type_get(context, id, session=None, inactive=False, + expected_fields=None): + expected_fields = expected_fields or [] + result = _group_type_get_db_object(context, id, session, inactive, + expected_fields) + if not result: + raise exception.GroupTypeNotFound(group_type_id=id) + + gtype = _dict_with_group_specs_if_authorized(context, result) + + if 'projects' in expected_fields: + gtype['projects'] = [p['project_id'] for p in result['projects']] + + return gtype + + @require_context def volume_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific volume_type.""" @@ -2798,12 +3384,28 @@ def volume_type_get(context, id, inactive=False, expected_fields=None): expected_fields=expected_fields) +@require_context +def group_type_get(context, id, inactive=False, expected_fields=None): + """Return a dict describing specific group_type.""" + + return _group_type_get(context, id, + session=None, + inactive=inactive, + expected_fields=expected_fields) + + def _volume_type_get_full(context, id): """Return dict for a specific volume_type with extra_specs and projects.""" return _volume_type_get(context, id, session=None, inactive=False, expected_fields=('extra_specs', 'projects')) +def _group_type_get_full(context, id): + """Return dict for a specific group_type with group_specs and projects.""" + return _group_type_get(context, id, session=None, inactive=False, + expected_fields=('group_specs', 'projects')) + + @require_context def _volume_type_ref_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" @@ -2821,6 +3423,23 @@ def _volume_type_ref_get(context, id, session=None, inactive=False): return result +@require_context +def _group_type_ref_get(context, id, session=None, inactive=False): + read_deleted = "yes" if inactive else "no" + result = model_query(context, + models.GroupTypes, + session=session, + read_deleted=read_deleted).\ + options(joinedload('group_specs')).\ + filter_by(id=id).\ + first() + + if not result: + raise exception.GroupTypeNotFound(group_type_id=id) + + return result + + @require_context def _volume_type_get_by_name(context, name, session=None): result = model_query(context, models.VolumeTypes, session=session).\ @@ -2834,6 +3453,19 @@ def _volume_type_get_by_name(context, name, session=None): return _dict_with_extra_specs_if_authorized(context, result) +@require_context +def _group_type_get_by_name(context, name, session=None): + result = model_query(context, models.GroupTypes, session=session).\ + options(joinedload('group_specs')).\ + filter_by(name=name).\ + first() + + if not result: + raise exception.GroupTypeNotFoundByName(group_type_name=name) + + return _dict_with_group_specs_if_authorized(context, result) + + @require_context def volume_type_get_by_name(context, name): """Return a dict describing specific volume_type.""" @@ -2841,6 +3473,13 @@ def volume_type_get_by_name(context, name): return _volume_type_get_by_name(context, name) +@require_context +def group_type_get_by_name(context, name): + """Return a dict describing specific group_type.""" + + return _group_type_get_by_name(context, name) + + @require_context def volume_types_get_by_name_or_id(context, volume_type_list): """Return a dict describing specific volume_type.""" @@ -2854,12 +3493,32 @@ def volume_types_get_by_name_or_id(context, volume_type_list): return req_volume_types +@require_context +def group_types_get_by_name_or_id(context, group_type_list): + """Return a dict describing specific group_type.""" + req_group_types = [] + for grp_t in group_type_list: + if not uuidutils.is_uuid_like(grp_t): + grp_type = _group_type_get_by_name(context, grp_t) + else: + grp_type = _group_type_get(context, grp_t) + req_group_types.append(grp_type) + return req_group_types + + @require_admin_context def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): read_deleted = "yes" if inactive else "no" - return model_query(context, models.VolumeTypes, - read_deleted=read_deleted). \ - filter_by(qos_specs_id=qos_specs_id).all() + # Raise QoSSpecsNotFound if no specs found + if not resource_exists(context, + models.QualityOfServiceSpecs, + qos_specs_id): + raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) + vts = (model_query(context, models.VolumeTypes, read_deleted=read_deleted). + options(joinedload('extra_specs')). + options(joinedload('projects')). + filter_by(qos_specs_id=qos_specs_id).all()) + return vts @require_admin_context @@ -2943,28 +3602,62 @@ def volume_type_qos_specs_get(context, type_id): @require_admin_context @_retry_on_deadlock def volume_type_destroy(context, id): + utcnow = timeutils.utcnow() session = get_session() with session.begin(): _volume_type_get(context, id, session) results = model_query(context, models.Volume, session=session). \ filter_by(volume_type_id=id).all() - if results: + group_count = model_query(context, + models.GroupVolumeTypeMapping, + read_deleted="no", + session=session).\ + filter_by(volume_type_id=id).count() + if results or group_count: LOG.error(_LE('VolumeType %s deletion failed, ' 'VolumeType in use.'), id) raise exception.VolumeTypeInUse(volume_type_id=id) + updated_values = {'deleted': True, + 'deleted_at': utcnow, + 'updated_at': literal_column('updated_at')} model_query(context, models.VolumeTypes, session=session).\ filter_by(id=id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + update(updated_values) model_query(context, models.VolumeTypeExtraSpecs, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, - 'deleted_at': timeutils.utcnow(), + 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeTypeProjects, session=session, read_deleted="int_no").filter_by( volume_type_id=id).soft_delete(synchronize_session=False) + del updated_values['updated_at'] + return updated_values + + +@require_admin_context +@_retry_on_deadlock +def group_type_destroy(context, id): + session = get_session() + with session.begin(): + _group_type_get(context, id, session) + # TODO(xyang): Uncomment the following after groups table is added. + # results = model_query(context, models.Group, session=session). \ + # filter_by(group_type_id=id).all() + # if results: + # LOG.error(_LE('GroupType %s deletion failed, ' + # 'GroupType in use.'), id) + # raise exception.GroupTypeInUse(group_type_id=id) + model_query(context, models.GroupTypes, session=session).\ + filter_by(id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + model_query(context, models.GroupTypeSpecs, session=session).\ + filter_by(group_type_id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context @@ -2984,7 +3677,8 @@ def volume_get_active_by_window(context, query = (query.options(joinedload('volume_metadata')). options(joinedload('volume_type')). options(joinedload('volume_attachment')). - options(joinedload('consistencygroup'))) + options(joinedload('consistencygroup')). + options(joinedload('group'))) if is_admin_context(context): query = query.options(joinedload('volume_admin_metadata')) @@ -2997,6 +3691,11 @@ def _volume_type_access_query(context, session=None): read_deleted="int_no") +def _group_type_access_query(context, session=None): + return model_query(context, models.GroupTypeProjects, session=session, + read_deleted="int_no") + + @require_admin_context def volume_type_access_get_all(context, type_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) @@ -3004,6 +3703,44 @@ def volume_type_access_get_all(context, type_id): filter_by(volume_type_id=volume_type_id).all() +@require_admin_context +def group_type_access_get_all(context, type_id): + group_type_id = _group_type_get_id_from_group_type(context, type_id) + return _group_type_access_query(context).\ + filter_by(group_type_id=group_type_id).all() + + +def _group_volume_type_mapping_query(context, session=None): + return model_query(context, models.GroupVolumeTypeMapping, session=session, + read_deleted="no") + + +@require_admin_context +def volume_type_get_all_by_group(context, group_id): + # Generic volume group + mappings = (_group_volume_type_mapping_query(context). + filter_by(group_id=group_id).all()) + session = get_session() + with session.begin(): + volume_type_ids = [mapping.volume_type_id for mapping in mappings] + query = (model_query(context, + models.VolumeTypes, + session=session, + read_deleted='no'). + filter(models.VolumeTypes.id.in_(volume_type_ids)). + options(joinedload('extra_specs')). + all()) + return query + + +def _group_volume_type_mapping_get_all_by_group_volume_type(context, group_id, + volume_type_id): + mappings = _group_volume_type_mapping_query(context).\ + filter_by(group_id=group_id).\ + filter_by(volume_type_id=volume_type_id).all() + return mappings + + @require_admin_context def volume_type_access_add(context, type_id, project_id): """Add given tenant to the volume type access list.""" @@ -3023,6 +3760,25 @@ def volume_type_access_add(context, type_id, project_id): return access_ref +@require_admin_context +def group_type_access_add(context, type_id, project_id): + """Add given tenant to the group type access list.""" + group_type_id = _group_type_get_id_from_group_type(context, type_id) + + access_ref = models.GroupTypeProjects() + access_ref.update({"group_type_id": group_type_id, + "project_id": project_id}) + + session = get_session() + with session.begin(): + try: + access_ref.save(session=session) + except db_exc.DBDuplicateEntry: + raise exception.GroupTypeAccessExists(group_type_id=type_id, + project_id=project_id) + return access_ref + + @require_admin_context def volume_type_access_remove(context, type_id, project_id): """Remove given tenant from the volume type access list.""" @@ -3037,6 +3793,20 @@ def volume_type_access_remove(context, type_id, project_id): volume_type_id=type_id, project_id=project_id) +@require_admin_context +def group_type_access_remove(context, type_id, project_id): + """Remove given tenant from the group type access list.""" + group_type_id = _group_type_get_id_from_group_type(context, type_id) + + count = (_group_type_access_query(context). + filter_by(group_type_id=group_type_id). + filter_by(project_id=project_id). + soft_delete(synchronize_session=False)) + if count == 0: + raise exception.GroupTypeAccessNotFound( + group_type_id=type_id, project_id=project_id) + + #################### @@ -3111,21 +3881,91 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, #################### +def _group_type_specs_query(context, group_type_id, session=None): + return model_query(context, models.GroupTypeSpecs, session=session, + read_deleted="no").\ + filter_by(group_type_id=group_type_id) + + +@require_context +def group_type_specs_get(context, group_type_id): + rows = _group_type_specs_query(context, group_type_id).\ + all() + + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +def group_type_specs_delete(context, group_type_id, key): + session = get_session() + with session.begin(): + _group_type_specs_get_item(context, group_type_id, key, + session) + _group_type_specs_query(context, group_type_id, session).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def _group_type_specs_get_item(context, group_type_id, key, + session=None): + result = _group_type_specs_query( + context, group_type_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.GroupTypeSpecsNotFound( + group_specs_key=key, + group_type_id=group_type_id) + + return result + + +@handle_db_data_error +@require_context +def group_type_specs_update_or_create(context, group_type_id, + specs): + session = get_session() + with session.begin(): + spec_ref = None + for key, value in specs.items(): + try: + spec_ref = _group_type_specs_get_item( + context, group_type_id, key, session) + except exception.GroupTypeSpecsNotFound: + spec_ref = models.GroupTypeSpecs() + spec_ref.update({"key": key, "value": value, + "group_type_id": group_type_id, + "deleted": False}) + spec_ref.save(session=session) + + return specs + + +#################### + + @require_admin_context def qos_specs_create(context, values): """Create a new QoS specs. :param values dictionary that contains specifications for QoS e.g. {'name': 'Name', - 'qos_specs': { - 'consumer': 'front-end', + 'consumer': 'front-end', + 'specs': { 'total_iops_sec': 1000, 'total_bytes_sec': 1024000 } } """ specs_id = str(uuid.uuid4()) - session = get_session() with session.begin(): try: @@ -3145,8 +3985,18 @@ def qos_specs_create(context, values): specs_root.update(root) specs_root.save(session=session) + # Save 'consumer' value directly as it will not be in + # values['specs'] and so we avoid modifying/copying passed in dict + consumer = {'key': 'consumer', + 'value': values['consumer'], + 'specs_id': specs_id, + 'id': six.text_type(uuid.uuid4())} + cons_entry = models.QualityOfServiceSpecs() + cons_entry.update(consumer) + cons_entry.save(session=session) + # Insert all specification entries for QoS specs - for k, v in values['qos_specs'].items(): + for k, v in values.get('specs', {}).items(): item = dict(key=k, value=v, specs_id=specs_id) item['id'] = str(uuid.uuid4()) spec_entry = models.QualityOfServiceSpecs() @@ -3213,12 +4063,10 @@ def _dict_with_qos_specs(rows): result = [] for row in rows: if row['key'] == 'QoS_Specs_Name': - member = {} - member['name'] = row['value'] - member.update(dict(id=row['id'])) + member = {'name': row['value'], 'id': row['id']} if row.specs: spec_dict = _dict_with_children_specs(row.specs) - member.update(dict(consumer=spec_dict['consumer'])) + member['consumer'] = spec_dict['consumer'] del spec_dict['consumer'] member.update(dict(specs=spec_dict)) result.append(member) @@ -3228,7 +4076,6 @@ def _dict_with_qos_specs(rows): @require_admin_context def qos_specs_get(context, qos_specs_id, inactive=False): rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive) - return _dict_with_qos_specs(rows)[0] @@ -3321,8 +4168,6 @@ def qos_specs_associations_get(context, qos_specs_id): extend qos specs association to other entities, such as volumes, sometime in future. """ - # Raise QoSSpecsNotFound if no specs found - _qos_specs_get_ref(context, qos_specs_id, None) return volume_type_qos_associations_get(context, qos_specs_id) @@ -3355,7 +4200,6 @@ def qos_specs_disassociate_all(context, qos_specs_id): def qos_specs_item_delete(context, qos_specs_id, key): session = get_session() with session.begin(): - _qos_specs_get_item(context, qos_specs_id, key) session.query(models.QualityOfServiceSpecs). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ @@ -3369,13 +4213,16 @@ def qos_specs_delete(context, qos_specs_id): session = get_session() with session.begin(): _qos_specs_get_ref(context, qos_specs_id, session) + updated_values = {'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')} session.query(models.QualityOfServiceSpecs).\ filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, models.QualityOfServiceSpecs.specs_id == qos_specs_id)).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + update(updated_values) + del updated_values['updated_at'] + return updated_values @require_admin_context @@ -3396,7 +4243,7 @@ def _qos_specs_get_item(context, qos_specs_id, key, session=None): @handle_db_data_error @require_admin_context -def qos_specs_update(context, qos_specs_id, specs): +def qos_specs_update(context, qos_specs_id, updates): """Make updates to an existing qos specs. Perform add, update or delete key/values to a qos specs. @@ -3406,6 +4253,13 @@ def qos_specs_update(context, qos_specs_id, specs): with session.begin(): # make sure qos specs exists _qos_specs_get_ref(context, qos_specs_id, session) + specs = updates.get('specs', {}) + + if 'consumer' in updates: + # Massage consumer to the right place for DB and copy specs + # before updating so we don't modify dict for caller + specs = specs.copy() + specs['consumer'] = updates['consumer'] spec_ref = None for key in specs.keys(): try: @@ -3873,12 +4727,15 @@ def backup_update(context, backup_id, values): @require_admin_context def backup_destroy(context, backup_id): + updated_values = {'status': fields.BackupStatus.DELETED, + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')} model_query(context, models.Backup).\ filter_by(id=backup_id).\ - update({'status': fields.BackupStatus.DELETED, - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + update(updated_values) + del updated_values['updated_at'] + return updated_values ############################### @@ -3964,6 +4821,7 @@ def transfer_create(context, values): @require_context @_retry_on_deadlock def transfer_destroy(context, transfer_id): + utcnow = timeutils.utcnow() session = get_session() with session.begin(): transfer_ref = _transfer_get(context, @@ -3981,11 +4839,16 @@ def transfer_destroy(context, transfer_id): volume_ref['status'] = 'available' volume_ref.update(volume_ref) volume_ref.save(session=session) + updated_values = {'deleted': True, + 'deleted_at': utcnow, + 'updated_at': literal_column('updated_at')} model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id).\ update({'deleted': True, - 'deleted_at': timeutils.utcnow(), + 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) + del updated_values['updated_at'] + return updated_values @require_context @@ -4209,14 +5072,21 @@ def consistencygroup_update(context, consistencygroup_id, values): @require_admin_context def consistencygroup_destroy(context, consistencygroup_id): + utcnow = timeutils.utcnow() session = get_session() with session.begin(): + updated_values = {'status': fields.ConsistencyGroupStatus.DELETED, + 'deleted': True, + 'deleted_at': utcnow, + 'updated_at': literal_column('updated_at')} model_query(context, models.ConsistencyGroup, session=session).\ filter_by(id=consistencygroup_id).\ update({'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, - 'deleted_at': timeutils.utcnow(), + 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) + del updated_values['updated_at'] + return updated_values def cg_has_cgsnapshot_filter(): @@ -4277,6 +5147,304 @@ def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): return sql.exists([subq]).where(match_id) +@require_admin_context +def consistencygroup_include_in_cluster(context, cluster, + partial_rename=True, **filters): + """Include all consistency groups matching the filters into a cluster.""" + return _include_in_cluster(context, cluster, models.ConsistencyGroup, + partial_rename, filters) + + +############################### + + +@require_admin_context +def _group_data_get_for_project(context, project_id, + session=None): + query = model_query(context, + func.count(models.Group.id), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id) + + result = query.first() + + return (0, result[0] or 0) + + +@require_context +def _group_get(context, group_id, session=None): + result = (model_query(context, models.Group, session=session, + project_only=True). + filter_by(id=group_id). + first()) + + if not result: + raise exception.GroupNotFound(group_id=group_id) + + return result + + +@require_context +def group_get(context, group_id): + return _group_get(context, group_id) + + +def _groups_get_query(context, session=None, project_only=False): + return model_query(context, models.Group, session=session, + project_only=project_only) + + +def _process_groups_filters(query, filters): + if filters: + # Ensure that filters' keys exist on the model + if not is_valid_model_filters(models.Group, filters): + return + query = query.filter_by(**filters) + return query + + +def _group_get_all(context, filters=None, marker=None, limit=None, + offset=None, sort_keys=None, sort_dirs=None): + if filters and not is_valid_model_filters(models.Group, + filters): + return [] + + session = get_session() + with session.begin(): + # Generate the paginate query + query = _generate_paginate_query(context, session, marker, + limit, sort_keys, sort_dirs, filters, + offset, models.Group) + + return query.all()if query else [] + + +@require_admin_context +def group_get_all(context, filters=None, marker=None, limit=None, + offset=None, sort_keys=None, sort_dirs=None): + """Retrieves all groups. + + If no sort parameters are specified then the returned groups are sorted + first by the 'created_at' key and then by the 'id' key in descending + order. + + :param context: context to query under + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys + :param filters: Filters for the query in the form of key/value. + :returns: list of matching groups + """ + return _group_get_all(context, filters, marker, limit, offset, + sort_keys, sort_dirs) + + +@require_context +def group_get_all_by_project(context, project_id, filters=None, + marker=None, limit=None, offset=None, + sort_keys=None, sort_dirs=None): + """Retrieves all groups in a project. + + If no sort parameters are specified then the returned groups are sorted + first by the 'created_at' key and then by the 'id' key in descending + order. + + :param context: context to query under + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys + :param filters: Filters for the query in the form of key/value. + :returns: list of matching groups + """ + authorize_project_context(context, project_id) + if not filters: + filters = {} + else: + filters = filters.copy() + + filters['project_id'] = project_id + return _group_get_all(context, filters, marker, limit, offset, + sort_keys, sort_dirs) + + +@handle_db_data_error +@require_context +def group_create(context, values, group_snapshot_id=None, + source_group_id=None): + group_model = models.Group + + values = values.copy() + if not values.get('id'): + values['id'] = six.text_type(uuid.uuid4()) + + session = get_session() + with session.begin(): + if group_snapshot_id: + conditions = [group_model.id == models.GroupSnapshot.group_id, + models.GroupSnapshot.id == group_snapshot_id] + elif source_group_id: + conditions = [group_model.id == source_group_id] + else: + conditions = None + + if conditions: + # We don't want duplicated field values + values.pop('group_type_id', None) + values.pop('availability_zone', None) + values.pop('host', None) + + sel = session.query(group_model.group_type_id, + group_model.availability_zone, + group_model.host, + *(bindparam(k, v) for k, v in values.items()) + ).filter(*conditions) + names = ['group_type_id', 'availability_zone', 'host'] + names.extend(values.keys()) + insert_stmt = group_model.__table__.insert().from_select( + names, sel) + result = session.execute(insert_stmt) + # If we couldn't insert the row because of the conditions raise + # the right exception + if not result.rowcount: + if source_group_id: + raise exception.GroupNotFound( + group_id=source_group_id) + raise exception.GroupSnapshotNotFound( + group_snapshot_id=group_snapshot_id) + else: + mappings = [] + for item in values.get('volume_type_ids') or []: + mapping = models.GroupVolumeTypeMapping() + mapping['volume_type_id'] = item + mapping['group_id'] = values['id'] + mappings.append(mapping) + + values['volume_types'] = mappings + + group = group_model() + group.update(values) + session.add(group) + + return _group_get(context, values['id'], session=session) + + +@handle_db_data_error +@require_context +def group_volume_type_mapping_create(context, group_id, volume_type_id): + """Add group volume_type mapping entry.""" + # Verify group exists + _group_get(context, group_id) + # Verify volume type exists + _volume_type_get_id_from_volume_type(context, volume_type_id) + + existing = _group_volume_type_mapping_get_all_by_group_volume_type( + context, group_id, volume_type_id) + if existing: + raise exception.GroupVolumeTypeMappingExists( + group_id=group_id, + volume_type_id=volume_type_id) + + mapping = models.GroupVolumeTypeMapping() + mapping.update({"group_id": group_id, + "volume_type_id": volume_type_id}) + + session = get_session() + with session.begin(): + try: + mapping.save(session=session) + except db_exc.DBDuplicateEntry: + raise exception.GroupVolumeTypeMappingExists( + group_id=group_id, + volume_type_id=volume_type_id) + return mapping + + +@handle_db_data_error +@require_context +def group_update(context, group_id, values): + session = get_session() + with session.begin(): + result = (model_query(context, models.Group, + project_only=True). + filter_by(id=group_id). + first()) + + if not result: + raise exception.GroupNotFound( + _("No group with id %s") % group_id) + + result.update(values) + result.save(session=session) + return result + + +@require_admin_context +def group_destroy(context, group_id): + session = get_session() + with session.begin(): + (model_query(context, models.Group, session=session). + filter_by(id=group_id). + update({'status': fields.GroupStatus.DELETED, + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')})) + + (session.query(models.GroupVolumeTypeMapping). + filter_by(group_id=group_id). + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')})) + + +def group_has_group_snapshot_filter(): + return sql.exists().where(and_( + models.GroupSnapshot.group_id == models.Group.id, + ~models.GroupSnapshot.deleted)) + + +def group_has_volumes_filter(attached_or_with_snapshots=False): + query = sql.exists().where( + and_(models.Volume.group_id == models.Group.id, + ~models.Volume.deleted)) + + if attached_or_with_snapshots: + query = query.where(or_( + models.Volume.attach_status == 'attached', + sql.exists().where( + and_(models.Volume.id == models.Snapshot.volume_id, + ~models.Snapshot.deleted)))) + return query + + +def group_creating_from_src(group_id=None, group_snapshot_id=None): + # NOTE(geguileo): As explained in devref api_conditional_updates we use a + # subquery to trick MySQL into using the same table in the update and the + # where clause. + subq = sql.select([models.Group]).where( + and_(~models.Group.deleted, + models.Group.status == 'creating')).alias('group2') + + if group_id: + match_id = subq.c.source_group_id == group_id + elif group_snapshot_id: + match_id = subq.c.group_snapshot_id == group_snapshot_id + else: + msg = _('group_creating_from_src must be called with group_id or ' + 'group_snapshot_id parameter.') + raise exception.ProgrammingError(reason=msg) + + return sql.exists([subq]).where(match_id) + + ############################### @@ -4411,12 +5579,15 @@ def cgsnapshot_update(context, cgsnapshot_id, values): def cgsnapshot_destroy(context, cgsnapshot_id): session = get_session() with session.begin(): + updated_values = {'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')} model_query(context, models.Cgsnapshot, session=session).\ filter_by(id=cgsnapshot_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + update(updated_values) + del updated_values['updated_at'] + return updated_values def cgsnapshot_creating_from_src(): @@ -4430,6 +5601,148 @@ def cgsnapshot_creating_from_src(): ############################### +@require_context +def _group_snapshot_get(context, group_snapshot_id, session=None): + result = model_query(context, models.GroupSnapshot, session=session, + project_only=True).\ + filter_by(id=group_snapshot_id).\ + first() + + if not result: + raise exception.GroupSnapshotNotFound( + group_snapshot_id=group_snapshot_id) + + return result + + +@require_context +def group_snapshot_get(context, group_snapshot_id): + return _group_snapshot_get(context, group_snapshot_id) + + +def _group_snapshot_get_all(context, project_id=None, group_id=None, + filters=None): + query = model_query(context, models.GroupSnapshot) + + if filters: + if not is_valid_model_filters(models.GroupSnapshot, filters): + return [] + query = query.filter_by(**filters) + + if project_id: + query = query.filter_by(project_id=project_id) + + if group_id: + query = query.filter_by(group_id=group_id) + + return query.all() + + +@require_admin_context +def group_snapshot_get_all(context, filters=None): + return _group_snapshot_get_all(context, filters=filters) + + +@require_admin_context +def group_snapshot_get_all_by_group(context, group_id, filters=None): + return _group_snapshot_get_all(context, group_id=group_id, filters=filters) + + +@require_context +def group_snapshot_get_all_by_project(context, project_id, filters=None): + authorize_project_context(context, project_id) + return _group_snapshot_get_all(context, project_id=project_id, + filters=filters) + + +@handle_db_data_error +@require_context +def group_snapshot_create(context, values): + if not values.get('id'): + values['id'] = six.text_type(uuid.uuid4()) + + group_id = values.get('group_id') + session = get_session() + model = models.GroupSnapshot + with session.begin(): + if group_id: + # There has to exist at least 1 volume in the group and the group + # cannot be updating the composing volumes or being created. + conditions = [ + sql.exists().where(and_( + ~models.Volume.deleted, + models.Volume.group_id == group_id)), + ~models.Group.deleted, + models.Group.id == group_id, + ~models.Group.status.in_(('creating', 'updating'))] + + # NOTE(geguileo): We build a "fake" from_select clause instead of + # using transaction isolation on the session because we would need + # SERIALIZABLE level and that would have a considerable performance + # penalty. + binds = (bindparam(k, v) for k, v in values.items()) + sel = session.query(*binds).filter(*conditions) + insert_stmt = model.__table__.insert().from_select(values.keys(), + sel) + result = session.execute(insert_stmt) + # If we couldn't insert the row because of the conditions raise + # the right exception + if not result.rowcount: + msg = _("Source group cannot be empty or in 'creating' or " + "'updating' state. No group snapshot will be created.") + raise exception.InvalidGroup(reason=msg) + else: + group_snapshot = model() + group_snapshot.update(values) + session.add(group_snapshot) + return _group_snapshot_get(context, values['id'], session=session) + + +@require_context +@handle_db_data_error +def group_snapshot_update(context, group_snapshot_id, values): + session = get_session() + with session.begin(): + result = model_query(context, models.GroupSnapshot, + project_only=True).\ + filter_by(id=group_snapshot_id).\ + first() + + if not result: + raise exception.GroupSnapshotNotFound( + _("No group snapshot with id %s") % group_snapshot_id) + + result.update(values) + result.save(session=session) + return result + + +@require_admin_context +def group_snapshot_destroy(context, group_snapshot_id): + session = get_session() + with session.begin(): + updated_values = {'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')} + model_query(context, models.GroupSnapshot, session=session).\ + filter_by(id=group_snapshot_id).\ + update(updated_values) + del updated_values['updated_at'] + return updated_values + + +def group_snapshot_creating_from_src(): + """Get a filter to check if a grp snapshot is being created from a grp.""" + return sql.exists().where(and_( + models.GroupSnapshot.group_id == models.Group.id, + ~models.GroupSnapshot.deleted, + models.GroupSnapshot.status == 'creating')) + + +############################### + + @require_admin_context def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than age from cinder tables.""" @@ -4439,10 +5752,6 @@ def purge_deleted_rows(context, age_in_days): msg = _('Invalid value for age, %(age)s') % {'age': age_in_days} LOG.exception(msg) raise exception.InvalidParameterValue(msg) - if age_in_days <= 0: - msg = _('Must supply a positive value for age') - LOG.error(msg) - raise exception.InvalidParameterValue(msg) engine = get_engine() session = get_session() @@ -4457,7 +5766,8 @@ def purge_deleted_rows(context, age_in_days): # Reorder the list so the volumes and volume_types tables are last # to avoid FK constraints - for table in ("volume_types", "snapshots", "volumes"): + for table in ("volume_types", "quality_of_service_specs", + "snapshots", "volumes", "clusters"): tables.remove(table) tables.append(table) @@ -4469,6 +5779,14 @@ def purge_deleted_rows(context, age_in_days): deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) try: with session.begin(): + # Delete child records first from quality_of_service_specs + # table to avoid FK constraints + if table == "quality_of_service_specs": + session.query(models.QualityOfServiceSpecs).filter( + and_(models.QualityOfServiceSpecs.specs_id.isnot( + None), models.QualityOfServiceSpecs.deleted == 1, + models.QualityOfServiceSpecs.deleted_at < + deleted_age)).delete() result = session.execute( t.delete() .where(t.c.deleted_at < deleted_age)) @@ -4592,11 +5910,14 @@ def message_destroy(context, message): session = get_session() now = timeutils.utcnow() with session.begin(): + updated_values = {'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')} (model_query(context, models.Message, session=session). filter_by(id=message.get('id')). - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')})) + update(updated_values)) + del updated_values['updated_at'] + return updated_values ############################### @@ -4644,7 +5965,12 @@ PAGINATION_HELPERS = { _process_consistencygroups_filters, _consistencygroup_get), models.Message: (_messages_get_query, _process_messages_filters, - _message_get) + _message_get), + models.GroupTypes: (_group_type_get_query, _process_group_types_filters, + _group_type_get_db_object), + models.Group: (_groups_get_query, + _process_groups_filters, + _group_get), } @@ -4710,6 +6036,107 @@ def image_volume_cache_get_all_for_host(context, host): all() +################### + + +def _worker_query(context, session=None, until=None, db_filters=None, + **filters): + # Remove all filters based on the workers table that are set to None + filters = _clean_filters(filters) + + if filters and not is_valid_model_filters(models.Worker, filters): + return None + + query = model_query(context, models.Worker, session=session) + + if until: + db_filters = list(db_filters) if db_filters else [] + # Since we set updated_at at creation time we don't need to check + # created_at field. + db_filters.append(models.Worker.updated_at <= until) + + if db_filters: + query = query.filter(and_(*db_filters)) + + if filters: + query = query.filter_by(**filters) + + return query + + +def worker_create(context, **values): + """Create a worker entry from optional arguments.""" + worker = models.Worker(**values) + session = get_session() + try: + with session.begin(): + worker.save(session) + except db_exc.DBDuplicateEntry: + raise exception.WorkerExists(type=values.get('resource_type'), + id=values.get('resource_id')) + return worker + + +def worker_get(context, **filters): + """Get a worker or raise exception if it does not exist.""" + query = _worker_query(context, **filters) + worker = query.first() if query else None + if not worker: + raise exception.WorkerNotFound(**filters) + return worker + + +def worker_get_all(context, **filters): + """Get all workers that match given criteria.""" + query = _worker_query(context, **filters) + return query.all() if query else [] + + +def _orm_worker_update(worker, values): + if not worker: + return + for key, value in values.items(): + setattr(worker, key, value) + + +def worker_update(context, id, filters=None, orm_worker=None, **values): + """Update a worker with given values.""" + filters = filters or {} + query = _worker_query(context, id=id, **filters) + result = query.update(values) + if not result: + raise exception.WorkerNotFound(id=id, **filters) + _orm_worker_update(orm_worker, values) + return result + + +def worker_claim_for_cleanup(context, claimer_id, orm_worker): + """Claim a worker entry for cleanup.""" + # We set updated_at value so we are sure we update the DB entry even if the + # service_id is the same in the DB, thus flagging the claim. + values = {'service_id': claimer_id, + 'updated_at': timeutils.utcnow()} + + # We only update the worker entry if it hasn't been claimed by other host + # or thread + query = _worker_query(context, + status=orm_worker.status, + service_id=orm_worker.service_id, + until=orm_worker.updated_at, + id=orm_worker.id) + + result = query.update(values, synchronize_session=False) + if result: + _orm_worker_update(orm_worker, values) + return result + + +def worker_destroy(context, **filters): + """Delete a worker (no soft delete).""" + query = _worker_query(context, **filters) + return query.delete() + + ############################### @@ -4731,9 +6158,14 @@ def get_model_for_versioned_object(versioned_object): 'BackupImport': models.Backup, 'VolumeType': models.VolumeTypes, 'CGSnapshot': models.Cgsnapshot, + 'GroupType': models.GroupTypes, + 'GroupSnapshot': models.GroupSnapshot, } - model_name = versioned_object.obj_name() + if isinstance(versioned_object, six.string_types): + model_name = versioned_object + else: + model_name = versioned_object.obj_name() return (VO_TO_MODEL_EXCEPTIONS.get(model_name) or getattr(models, model_name)) @@ -4745,6 +6177,8 @@ def _get_get_method(model): GET_EXCEPTIONS = { models.ConsistencyGroup: consistencygroup_get, models.VolumeTypes: _volume_type_get_full, + models.QualityOfServiceSpecs: qos_specs_get, + models.GroupTypes: _group_type_get_full, } if model in GET_EXCEPTIONS: diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/075_add_cluster_and_fields.py b/cinder/db/sqlalchemy/migrate_repo/versions/075_add_cluster_and_fields.py new file mode 100644 index 000000000..ceb0a414b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/075_add_cluster_and_fields.py @@ -0,0 +1,58 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table, UniqueConstraint + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New cluster table + cluster = Table( + 'clusters', meta, + # Inherited fields from CinderBase + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(), default=False), + + # Cluster specific fields + Column('id', Integer, primary_key=True, nullable=False), + Column('name', String(255), nullable=False), + Column('binary', String(255), nullable=False), + Column('disabled', Boolean(), default=False), + Column('disabled_reason', String(255)), + Column('race_preventer', Integer, nullable=False, default=0), + + # To remove potential races on creation we have a constraint set on + # name and race_preventer fields, and we set value on creation to 0, so + # 2 clusters with the same name will fail this constraint. On deletion + # we change this field to the same value as the id which will be unique + # and will not conflict with the creation of another cluster with the + # same name. + UniqueConstraint('name', 'binary', 'race_preventer'), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + cluster.create() + + # Add the cluster flag to Service, ConsistencyGroup, and Volume tables. + for table_name in ('services', 'consistencygroups', 'volumes'): + table = Table(table_name, meta, autoload=True) + cluster_name = Column('cluster_name', String(255), nullable=True) + table.create_column(cluster_name) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/076_add_workers_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/076_add_workers_table.py new file mode 100644 index 000000000..3480346b5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/076_add_workers_table.py @@ -0,0 +1,52 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table, UniqueConstraint +from migrate.changeset.constraint import ForeignKeyConstraint + + +def upgrade(migrate_engine): + """Add workers table.""" + meta = MetaData() + meta.bind = migrate_engine + + workers = Table( + 'workers', meta, + # Inherited fields from CinderBase + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(), default=False), + + # Workers table specific fields + Column('id', Integer, primary_key=True), + Column('resource_type', String(40), nullable=False), + Column('resource_id', String(36), nullable=False), + Column('status', String(255), nullable=False), + Column('service_id', Integer, nullable=True), + UniqueConstraint('resource_type', 'resource_id'), + + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + workers.create() + + services = Table('services', meta, autoload=True) + + ForeignKeyConstraint( + columns=[workers.c.service_id], + refcolumns=[services.c.id]).create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/077_add_group_types_and_group_specs_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/077_add_group_types_and_group_specs_table.py new file mode 100644 index 000000000..2357dbf3a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/077_add_group_types_and_group_specs_table.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import ForeignKey, MetaData, String, Table, UniqueConstraint + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + group_types = Table( + 'group_types', + meta, + Column('id', String(36), primary_key=True, nullable=False), + Column('name', String(255), nullable=False), + Column('description', String(255)), + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean), + Column('is_public', Boolean), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + group_types.create() + + # New table + group_type_specs = Table( + 'group_type_specs', + meta, + Column('id', Integer, primary_key=True, nullable=False), + Column('key', String(255)), + Column('value', String(255)), + Column('group_type_id', String(36), + ForeignKey('group_types.id'), + nullable=False), + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + group_type_specs.create() + + # New table + group_type_projects = Table( + 'group_type_projects', meta, + Column('id', Integer, primary_key=True, nullable=False), + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('group_type_id', String(36), + ForeignKey('group_types.id')), + Column('project_id', String(length=255)), + Column('deleted', Boolean(create_constraint=True, name=None)), + UniqueConstraint('group_type_id', 'project_id', 'deleted'), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + group_type_projects.create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/078_add_groups_and_group_volume_type_mapping_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_groups_and_group_volume_type_mapping_table.py new file mode 100644 index 000000000..7acb83094 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_groups_and_group_volume_type_mapping_table.py @@ -0,0 +1,97 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import ForeignKey, MetaData, String, Table + +# Default number of quota groups. We should not read from config file. +DEFAULT_QUOTA_GROUPS = 10 + +CLASS_NAME = 'default' +CREATED_AT = datetime.datetime.now() # noqa + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + groups = Table( + 'groups', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean), + Column('id', String(36), primary_key=True, nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('cluster_name', String(255)), + Column('host', String(length=255)), + Column('availability_zone', String(length=255)), + Column('name', String(length=255)), + Column('description', String(length=255)), + Column('group_type_id', String(length=36)), + Column('status', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + groups.create() + + # Add column to volumes table + volumes = Table('volumes', meta, autoload=True) + group_id = Column('group_id', String(36), + ForeignKey('groups.id')) + volumes.create_column(group_id) + volumes.update().values(group_id=None).execute() + + # New group_volume_type_mapping table + Table('volume_types', meta, autoload=True) + + grp_vt_mapping = Table( + 'group_volume_type_mapping', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('volume_type_id', String(36), ForeignKey('volume_types.id'), + nullable=False), + Column('group_id', String(36), + ForeignKey('groups.id'), nullable=False), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + grp_vt_mapping.create() + + # Add group quota data into DB. + quota_classes = Table('quota_classes', meta, autoload=True) + + rows = (quota_classes.count(). + where(quota_classes.c.resource == 'groups'). + execute().scalar()) + + # Do not add entries if there are already 'groups' entries. + if rows: + return + + # Set groups + qci = quota_classes.insert() + qci.execute({'created_at': CREATED_AT, + 'class_name': CLASS_NAME, + 'resource': 'groups', + 'hard_limit': DEFAULT_QUOTA_GROUPS, + 'deleted': False, }) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/079_add_group_snapshots.py b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_group_snapshots.py new file mode 100644 index 000000000..5c52d425a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_group_snapshots.py @@ -0,0 +1,63 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import ForeignKey, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + groups = Table('groups', meta, autoload=True) + + # New table + group_snapshots = Table( + 'group_snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', String(36), primary_key=True), + Column('group_id', String(36), + ForeignKey('groups.id'), + nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('name', String(length=255)), + Column('description', String(length=255)), + Column('status', String(length=255)), + Column('group_type_id', String(length=36)), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + group_snapshots.create() + + # Add group_snapshot_id column to snapshots table + snapshots = Table('snapshots', meta, autoload=True) + group_snapshot_id = Column('group_snapshot_id', String(36), + ForeignKey('group_snapshots.id')) + + snapshots.create_column(group_snapshot_id) + snapshots.update().values(group_snapshot_id=None).execute() + + # Add group_snapshot_id column to groups table + group_snapshot_id = Column('group_snapshot_id', String(36)) + groups.create_column(group_snapshot_id) + + # Add source_group_id column to groups table + source_group_id = Column('source_group_id', String(36)) + groups.create_column(source_group_id) diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py index d7879cf98..1cbfd5477 100644 --- a/cinder/db/sqlalchemy/models.py +++ b/cinder/db/sqlalchemy/models.py @@ -23,10 +23,12 @@ SQLAlchemy models for cinder data. from oslo_config import cfg from oslo_db.sqlalchemy import models from oslo_utils import timeutils +from sqlalchemy import and_, func, select +from sqlalchemy import bindparam from sqlalchemy import Column, Integer, String, Text, schema from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import ForeignKey, DateTime, Boolean -from sqlalchemy.orm import relationship, backref, validates +from sqlalchemy import ForeignKey, DateTime, Boolean, UniqueConstraint +from sqlalchemy.orm import backref, column_property, relationship, validates CONF = cfg.CONF @@ -45,11 +47,17 @@ class CinderBase(models.TimestampMixin, deleted = Column(Boolean, default=False) metadata = None + @staticmethod + def delete_values(): + return {'deleted': True, + 'deleted_at': timeutils.utcnow()} + def delete(self, session): """Delete this object.""" - self.deleted = True - self.deleted_at = timeutils.utcnow() + updated_values = self.delete_values() + self.update(updated_values) self.save(session=session) + return updated_values class Service(BASE, CinderBase): @@ -57,8 +65,13 @@ class Service(BASE, CinderBase): __tablename__ = 'services' id = Column(Integer, primary_key=True) + cluster_name = Column(String(255), nullable=True) host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) + # We want to overwrite default updated_at definition so we timestamp at + # creation as well, so we only need to check updated_at for the heartbeat + updated_at = Column(DateTime, default=timeutils.utcnow, + onupdate=timeutils.utcnow) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) @@ -81,6 +94,63 @@ class Service(BASE, CinderBase): active_backend_id = Column(String(255)) frozen = Column(Boolean, nullable=False, default=False) + cluster = relationship('Cluster', + backref='services', + foreign_keys=cluster_name, + primaryjoin='and_(' + 'Service.cluster_name == Cluster.name,' + 'Service.deleted == False)') + + +class Cluster(BASE, CinderBase): + """Represents a cluster of hosts.""" + __tablename__ = 'clusters' + # To remove potential races on creation we have a constraint set on name + # and race_preventer fields, and we set value on creation to 0, so 2 + # clusters with the same name will fail this constraint. On deletion we + # change this field to the same value as the id which will be unique and + # will not conflict with the creation of another cluster with the same + # name. + __table_args__ = (UniqueConstraint('name', 'binary', 'race_preventer'),) + + id = Column(Integer, primary_key=True) + # NOTE(geguileo): Name is constructed in the same way that Server.host but + # using cluster configuration option instead of host. + name = Column(String(255), nullable=False) + binary = Column(String(255), nullable=False) + disabled = Column(Boolean, default=False) + disabled_reason = Column(String(255)) + race_preventer = Column(Integer, nullable=False, default=0) + + # Last heartbeat reported by any of the services of this cluster. This is + # not deferred since we always want to load this field. + last_heartbeat = column_property( + select([func.max(Service.updated_at)]). + where(and_(Service.cluster_name == name, ~Service.deleted)). + correlate_except(Service), deferred=False) + + # Number of existing services for this cluster + num_hosts = column_property( + select([func.count(Service.id)]). + where(and_(Service.cluster_name == name, ~Service.deleted)). + correlate_except(Service), + group='services_summary', deferred=True) + + # Number of services that are down for this cluster + num_down_hosts = column_property( + select([func.count(Service.id)]). + where(and_(Service.cluster_name == name, + ~Service.deleted, + Service.updated_at < bindparam('expired'))). + correlate_except(Service), + group='services_summary', deferred=True) + + @staticmethod + def delete_values(): + return {'race_preventer': Cluster.id, + 'deleted': True, + 'deleted_at': timeutils.utcnow()} + class ConsistencyGroup(BASE, CinderBase): """Represents a consistencygroup.""" @@ -90,6 +160,7 @@ class ConsistencyGroup(BASE, CinderBase): user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) + cluster_name = Column(String(255), nullable=True) host = Column(String(255)) availability_zone = Column(String(255)) name = Column(String(255)) @@ -100,6 +171,25 @@ class ConsistencyGroup(BASE, CinderBase): source_cgid = Column(String(36)) +class Group(BASE, CinderBase): + """Represents a generic volume group.""" + __tablename__ = 'groups' + id = Column(String(36), primary_key=True) + + user_id = Column(String(255), nullable=False) + project_id = Column(String(255), nullable=False) + + cluster_name = Column(String(255)) + host = Column(String(255)) + availability_zone = Column(String(255)) + name = Column(String(255)) + description = Column(String(255)) + status = Column(String(255)) + group_type_id = Column(String(36)) + group_snapshot_id = Column(String(36)) + source_group_id = Column(String(36)) + + class Cgsnapshot(BASE, CinderBase): """Represents a cgsnapshot.""" __tablename__ = 'cgsnapshots' @@ -120,6 +210,27 @@ class Cgsnapshot(BASE, CinderBase): primaryjoin='Cgsnapshot.consistencygroup_id == ConsistencyGroup.id') +class GroupSnapshot(BASE, CinderBase): + """Represents a group snapshot.""" + __tablename__ = 'group_snapshots' + id = Column(String(36), primary_key=True) + + group_id = Column(String(36), nullable=False) + user_id = Column(String(255)) + project_id = Column(String(255)) + + name = Column(String(255)) + description = Column(String(255)) + status = Column(String(255)) + group_type_id = Column(String(36)) + + group = relationship( + Group, + backref="group_snapshots", + foreign_keys=group_id, + primaryjoin='GroupSnapshot.group_id == Group.id') + + class Volume(BASE, CinderBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' @@ -144,6 +255,7 @@ class Volume(BASE, CinderBase): snapshot_id = Column(String(36)) + cluster_name = Column(String(255), nullable=True) host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? @@ -168,6 +280,7 @@ class Volume(BASE, CinderBase): encryption_key_id = Column(String(36)) consistencygroup_id = Column(String(36)) + group_id = Column(String(36)) bootable = Column(Boolean, default=False) multiattach = Column(Boolean, default=False) @@ -184,6 +297,12 @@ class Volume(BASE, CinderBase): foreign_keys=consistencygroup_id, primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id') + group = relationship( + Group, + backref="volumes", + foreign_keys=group_id, + primaryjoin='Volume.group_id == Group.id') + class VolumeMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a volume.""" @@ -251,6 +370,42 @@ class VolumeTypes(BASE, CinderBase): 'VolumeTypes.deleted == False)') +class GroupTypes(BASE, CinderBase): + """Represent possible group_types of groups offered.""" + __tablename__ = "group_types" + id = Column(String(36), primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + is_public = Column(Boolean, default=True) + groups = relationship(Group, + backref=backref('group_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Group.group_type_id == GroupTypes.id, ' + 'GroupTypes.deleted == False)') + + +class GroupVolumeTypeMapping(BASE, CinderBase): + """Represent mapping between groups and volume_types.""" + __tablename__ = "group_volume_type_mapping" + id = Column(Integer, primary_key=True, nullable=False) + volume_type_id = Column(String(36), + ForeignKey('volume_types.id'), + nullable=False) + group_id = Column(String(36), + ForeignKey('groups.id'), + nullable=False) + + group = relationship( + Group, + backref="volume_types", + foreign_keys=group_id, + primaryjoin='and_(' + 'GroupVolumeTypeMapping.group_id == Group.id,' + 'GroupVolumeTypeMapping.deleted == False)' + ) + + class VolumeTypeProjects(BASE, CinderBase): """Represent projects associated volume_types.""" __tablename__ = "volume_type_projects" @@ -273,6 +428,28 @@ class VolumeTypeProjects(BASE, CinderBase): 'VolumeTypeProjects.deleted == 0)') +class GroupTypeProjects(BASE, CinderBase): + """Represent projects associated group_types.""" + __tablename__ = "group_type_projects" + __table_args__ = (schema.UniqueConstraint( + "group_type_id", "project_id", "deleted", + name="uniq_group_type_projects0group_type_id0project_id0deleted"), + ) + id = Column(Integer, primary_key=True) + group_type_id = Column(Integer, ForeignKey('group_types.id'), + nullable=False) + project_id = Column(String(255)) + deleted = Column(Integer, default=0) + + group_type = relationship( + GroupTypes, + backref="projects", + foreign_keys=group_type_id, + primaryjoin='and_(' + 'GroupTypeProjects.group_type_id == GroupTypes.id,' + 'GroupTypeProjects.deleted == 0)') + + class VolumeTypeExtraSpecs(BASE, CinderBase): """Represents additional specs as key/value pairs for a volume_type.""" __tablename__ = 'volume_type_extra_specs' @@ -292,6 +469,25 @@ class VolumeTypeExtraSpecs(BASE, CinderBase): ) +class GroupTypeSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for a group_type.""" + __tablename__ = 'group_type_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + group_type_id = Column(String(36), + ForeignKey('group_types.id'), + nullable=False) + group_type = relationship( + GroupTypes, + backref="group_specs", + foreign_keys=group_type_id, + primaryjoin='and_(' + 'GroupTypeSpecs.group_type_id == GroupTypes.id,' + 'GroupTypeSpecs.deleted == False)' + ) + + class QualityOfServiceSpecs(BASE, CinderBase): """Represents QoS specs as key/value pairs. @@ -467,6 +663,7 @@ class Snapshot(BASE, CinderBase): volume_id = Column(String(36)) cgsnapshot_id = Column(String(36)) + group_snapshot_id = Column(String(36)) status = Column(String(255)) progress = Column(String(255)) volume_size = Column(Integer) @@ -491,6 +688,12 @@ class Snapshot(BASE, CinderBase): foreign_keys=cgsnapshot_id, primaryjoin='Snapshot.cgsnapshot_id == Cgsnapshot.id') + group_snapshot = relationship( + GroupSnapshot, + backref="snapshots", + foreign_keys=group_snapshot_id, + primaryjoin='Snapshot.group_snapshot_id == GroupSnapshot.id') + class SnapshotMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a snapshot.""" @@ -628,28 +831,38 @@ class ImageVolumeCacheEntry(BASE, models.ModelBase): last_used = Column(DateTime, default=lambda: timeutils.utcnow()) -def register_models(): - """Register Models and create metadata. +class Worker(BASE, CinderBase): + """Represents all resources that are being worked on by a node.""" + __tablename__ = 'workers' + __table_args__ = (schema.UniqueConstraint('resource_type', 'resource_id'), + {'mysql_engine': 'InnoDB'}) - Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, - it will never need to be called explicitly elsewhere unless the - connection is lost and needs to be reestablished. - """ - from sqlalchemy import create_engine - models = (Backup, - Service, - Volume, - VolumeMetadata, - VolumeAdminMetadata, - VolumeAttachment, - SnapshotMetadata, - Transfer, - VolumeTypeExtraSpecs, - VolumeTypes, - VolumeGlanceMetadata, - ConsistencyGroup, - Cgsnapshot - ) - engine = create_engine(CONF.database.connection, echo=False) - for model in models: - model.metadata.create_all(engine) + # We want to overwrite default updated_at definition so we timestamp at + # creation as well + updated_at = Column(DateTime, default=timeutils.utcnow, + onupdate=timeutils.utcnow) + + # Id added for convenience and speed on some operations + id = Column(Integer, primary_key=True) + + # Type of the resource we are working on (Volume, Snapshot, Backup) it must + # match the Versioned Object class name. + resource_type = Column(String(40), primary_key=True, nullable=False) + # UUID of the resource we are working on + resource_id = Column(String(36), primary_key=True, nullable=False) + + # Status that should be cleaned on service failure + status = Column(String(255), nullable=False) + + # Service that is currently processing the operation + service_id = Column(Integer, nullable=True) + + # This is a flag we don't need to store in the DB as it is only used when + # we are doing the cleanup to let decorators know + cleaning = False + + service = relationship( + 'Service', + backref="workers", + foreign_keys=service_id, + primaryjoin='Worker.service_id == Service.id') diff --git a/cinder/exception.py b/cinder/exception.py index 246ab6459..79a763bdc 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -204,6 +204,10 @@ class InvalidVolumeType(Invalid): message = _("Invalid volume type: %(reason)s") +class InvalidGroupType(Invalid): + message = _("Invalid group type: %(reason)s") + + class InvalidVolume(Invalid): message = _("Invalid volume: %(reason)s") @@ -265,6 +269,10 @@ class InvalidGlobalAPIVersion(Invalid): "is %(min_ver)s and maximum is %(max_ver)s.") +class MissingRequired(Invalid): + message = _("Missing required element '%(element)s' in request body.") + + class APIException(CinderException): message = _("Error while requesting %(service)s API.") @@ -284,6 +292,10 @@ class RPCTimeout(CinderException): code = 502 +class Duplicate(CinderException): + pass + + class NotFound(CinderException): message = _("Resource could not be found.") code = 404 @@ -350,6 +362,30 @@ class VolumeTypeInUse(CinderException): "volumes present with the type.") +class GroupTypeNotFound(NotFound): + message = _("Group type %(group_type_id)s could not be found.") + + +class GroupTypeNotFoundByName(GroupTypeNotFound): + message = _("Group type with name %(group_type_name)s " + "could not be found.") + + +class GroupTypeAccessNotFound(NotFound): + message = _("Group type access not found for %(group_type_id)s / " + "%(project_id)s combination.") + + +class GroupTypeSpecsNotFound(NotFound): + message = _("Group Type %(group_type_id)s has no specs with " + "key %(group_specs_key)s.") + + +class GroupTypeInUse(CinderException): + message = _("Group Type %(group_type_id)s deletion is not allowed with " + "groups present with the type.") + + class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") @@ -394,6 +430,32 @@ class ServiceTooOld(Invalid): message = _("Service is too old to fulfil this request.") +class WorkerNotFound(NotFound): + message = _("Worker with %s could not be found.") + + def __init__(self, message=None, **kwargs): + keys_list = ('{0}=%({0})s'.format(key) for key in kwargs) + placeholder = ', '.join(keys_list) + self.message = self.message % placeholder + super(WorkerNotFound, self).__init__(message, **kwargs) + + +class WorkerExists(Duplicate): + message = _("Worker for %(type)s %(id)s already exists.") + + +class ClusterNotFound(NotFound): + message = _('Cluster %(id)s could not be found.') + + +class ClusterHasHosts(Invalid): + message = _("Cluster %(id)s still has hosts.") + + +class ClusterExists(Duplicate): + message = _("Cluster %(name)s already exists.") + + class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") @@ -452,10 +514,6 @@ class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") -class Duplicate(CinderException): - pass - - class VolumeTypeExists(Duplicate): message = _("Volume Type %(id)s already exists.") @@ -473,6 +531,28 @@ class VolumeTypeEncryptionNotFound(NotFound): message = _("Volume type encryption for type %(type_id)s does not exist.") +class GroupTypeExists(Duplicate): + message = _("Group Type %(id)s already exists.") + + +class GroupTypeAccessExists(Duplicate): + message = _("Group type access for %(group_type_id)s / " + "%(project_id)s combination already exists.") + + +class GroupVolumeTypeMappingExists(Duplicate): + message = _("Group volume type mapping for %(group_id)s / " + "%(volume_type_id)s combination already exists.") + + +class GroupTypeEncryptionExists(Invalid): + message = _("Group type encryption for type %(type_id)s already exists.") + + +class GroupTypeEncryptionNotFound(NotFound): + message = _("Group type encryption for type %(type_id)s does not exist.") + + class MalformedRequestBody(CinderException): message = _("Malformed message body: %(reason)s") @@ -565,6 +645,15 @@ class VolumeTypeUpdateFailed(CinderException): message = _("Cannot update volume_type %(id)s") +class GroupTypeCreateFailed(CinderException): + message = _("Cannot create group_type with " + "name %(name)s and specs %(group_specs)s") + + +class GroupTypeUpdateFailed(CinderException): + message = _("Cannot update group_type %(id)s") + + class UnknownCmd(VolumeDriverException): message = _("Unknown or unsupported command %(cmd)s") @@ -791,6 +880,10 @@ class VolumeGroupCreationFailed(CinderException): message = _('Failed to create Volume Group: %(vg_name)s') +class VolumeNotDeactivated(CinderException): + message = _('Volume %(name)s was not deactivated in time.') + + class VolumeDeviceNotFound(CinderException): message = _('Volume device not found at %(device)s.') @@ -956,6 +1049,15 @@ class InvalidConsistencyGroup(Invalid): message = _("Invalid ConsistencyGroup: %(reason)s") +# Group +class GroupNotFound(NotFound): + message = _("Group %(group_id)s could not be found.") + + +class InvalidGroup(Invalid): + message = _("Invalid Group: %(reason)s") + + # CgSnapshot class CgSnapshotNotFound(NotFound): message = _("CgSnapshot %(cgsnapshot_id)s could not be found.") @@ -965,6 +1067,15 @@ class InvalidCgSnapshot(Invalid): message = _("Invalid CgSnapshot: %(reason)s") +# GroupSnapshot +class GroupSnapshotNotFound(NotFound): + message = _("GroupSnapshot %(group_snapshot_id)s could not be found.") + + +class InvalidGroupSnapshot(Invalid): + message = _("Invalid GroupSnapshot: %(reason)s") + + # Hitachi Block Storage Driver class HBSDError(CinderException): message = _("HBSD error occurs.") @@ -1187,13 +1298,17 @@ class KaminarioCinderDriverException(VolumeDriverException): message = _("KaminarioCinderDriver failure: %(reason)s") +class KaminarioRetryableException(VolumeDriverException): + message = _("Kaminario retryable exception: %(reason)s") + + # Synology driver class SynoAPIHTTPError(CinderException): message = _("HTTP exit code: [%(code)s]") class SynoAuthError(CinderException): - pass + message = _("Synology driver authentication failed: %(reason)s.") class SynoLUNNotExist(CinderException): diff --git a/cinder/group/__init__.py b/cinder/group/__init__.py new file mode 100644 index 000000000..ff7c58ed3 --- /dev/null +++ b/cinder/group/__init__.py @@ -0,0 +1,27 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.transfer import ' elsewhere. + +from oslo_utils import importutils + +from cinder.common import config + + +CONF = config.CONF + +API = importutils.import_class( + CONF.group_api_class) diff --git a/cinder/group/api.py b/cinder/group/api.py new file mode 100644 index 000000000..eac10495a --- /dev/null +++ b/cinder/group/api.py @@ -0,0 +1,838 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to groups. +""" + + +import functools + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import timeutils + +from cinder.common import constants +from cinder import db +from cinder.db import base +from cinder import exception +from cinder.i18n import _, _LE, _LI, _LW +from cinder import objects +from cinder.objects import base as objects_base +from cinder.objects import fields as c_fields +import cinder.policy +from cinder import quota +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder.volume import api as volume_api +from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import utils as vol_utils +from cinder.volume import volume_types + + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) +GROUP_QUOTAS = quota.GROUP_QUOTAS +VALID_REMOVE_VOL_FROM_GROUP_STATUS = ( + 'available', + 'in-use', + 'error', + 'error_deleting') +VALID_ADD_VOL_TO_GROUP_STATUS = ( + 'available', + 'in-use') + + +def wrap_check_policy(func): + """Check policy corresponding to the wrapped methods prior to execution. + + This decorator requires the first 3 args of the wrapped function + to be (self, context, group) + """ + @functools.wraps(func) + def wrapped(self, context, target_obj, *args, **kwargs): + check_policy(context, func.__name__, target_obj) + return func(self, context, target_obj, *args, **kwargs) + + return wrapped + + +def check_policy(context, action, target_obj=None): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + + if isinstance(target_obj, objects_base.CinderObject): + # Turn object into dict so target.update can work + target.update( + target_obj.obj_to_primitive()['versioned_object.data'] or {}) + else: + target.update(target_obj or {}) + + _action = 'group:%s' % action + cinder.policy.enforce(context, _action, target) + + +class API(base.Base): + """API for interacting with the volume manager for groups.""" + + def __init__(self, db_driver=None): + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.volume_rpcapi = volume_rpcapi.VolumeAPI() + self.volume_api = volume_api.API() + + super(API, self).__init__(db_driver) + + def _extract_availability_zone(self, availability_zone): + raw_zones = self.volume_api.list_availability_zones(enable_cache=True) + availability_zones = set([az['name'] for az in raw_zones]) + if CONF.storage_availability_zone: + availability_zones.add(CONF.storage_availability_zone) + + if availability_zone is None: + if CONF.default_availability_zone: + availability_zone = CONF.default_availability_zone + else: + # For backwards compatibility use the storage_availability_zone + availability_zone = CONF.storage_availability_zone + + if availability_zone not in availability_zones: + if CONF.allow_availability_zone_fallback: + original_az = availability_zone + availability_zone = ( + CONF.default_availability_zone or + CONF.storage_availability_zone) + LOG.warning(_LW("Availability zone '%(s_az)s' " + "not found, falling back to " + "'%(s_fallback_az)s'."), + {'s_az': original_az, + 's_fallback_az': availability_zone}) + else: + msg = _("Availability zone '%(s_az)s' is invalid.") + msg = msg % {'s_az': availability_zone} + raise exception.InvalidInput(reason=msg) + + return availability_zone + + def create(self, context, name, description, group_type, + volume_types, availability_zone=None): + check_policy(context, 'create') + + req_volume_types = [] + # NOTE: Admin context is required to get extra_specs of volume_types. + req_volume_types = (self.db.volume_types_get_by_name_or_id( + context.elevated(), volume_types)) + + req_group_type = self.db.group_type_get(context, group_type) + + availability_zone = self._extract_availability_zone(availability_zone) + kwargs = {'user_id': context.user_id, + 'project_id': context.project_id, + 'availability_zone': availability_zone, + 'status': c_fields.GroupStatus.CREATING, + 'name': name, + 'description': description, + 'volume_type_ids': volume_types, + 'group_type_id': group_type} + group = None + try: + group = objects.Group(context=context, **kwargs) + group.create() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error occurred when creating group" + " %s."), name) + + request_spec_list = [] + filter_properties_list = [] + for req_volume_type in req_volume_types: + request_spec = {'volume_type': req_volume_type.copy(), + 'group_id': group.id} + filter_properties = {} + request_spec_list.append(request_spec) + filter_properties_list.append(filter_properties) + + group_spec = {'group_type': req_group_type.copy(), + 'group_id': group.id} + group_filter_properties = {} + + # Update quota for groups + self.update_quota(context, group, 1) + + self._cast_create_group(context, group, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list) + + return group + + def create_from_src(self, context, name, description=None, + group_snapshot_id=None, source_group_id=None): + check_policy(context, 'create') + + kwargs = { + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': c_fields.GroupStatus.CREATING, + 'name': name, + 'description': description, + 'group_snapshot_id': group_snapshot_id, + 'source_group_id': source_group_id, + } + + group = None + try: + group = objects.Group(context=context, **kwargs) + group.create(group_snapshot_id=group_snapshot_id, + source_group_id=source_group_id) + except exception.GroupNotFound: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Source Group %(source_group)s not found when " + "creating group %(group)s from " + "source."), + {'group': name, 'source_group': source_group_id}) + except exception.GroupSnapshotNotFound: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Group snapshot %(group_snap)s not found when " + "creating group %(group)s from source."), + {'group': name, 'group_snap': group_snapshot_id}) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error occurred when creating group" + " %(group)s from group_snapshot %(grp_snap)s."), + {'group': name, 'grp_snap': group_snapshot_id}) + + # Update quota for groups + self.update_quota(context, group, 1) + + if not group.host: + msg = _("No host to create group %s.") % group.id + LOG.error(msg) + raise exception.InvalidGroup(reason=msg) + + if group_snapshot_id: + self._create_group_from_group_snapshot(context, group, + group_snapshot_id) + elif source_group_id: + self._create_group_from_source_group(context, group, + source_group_id) + + return group + + def _create_group_from_group_snapshot(self, context, group, + group_snapshot_id): + try: + group_snapshot = objects.GroupSnapshot.get_by_id( + context, group_snapshot_id) + snapshots = objects.SnapshotList.get_all_for_group_snapshot( + context, group_snapshot.id) + + if not snapshots: + msg = _("Group snapshot is empty. No group will be created.") + raise exception.InvalidGroup(reason=msg) + + for snapshot in snapshots: + kwargs = {} + kwargs['availability_zone'] = group.availability_zone + kwargs['group_snapshot'] = group_snapshot + kwargs['group'] = group + kwargs['snapshot'] = snapshot + volume_type_id = snapshot.volume_type_id + if volume_type_id: + kwargs['volume_type'] = volume_types.get_volume_type( + context, volume_type_id) + # Create group volume_type mapping entries + try: + db.group_volume_type_mapping_create(context, group.id, + volume_type_id) + except exception.GroupVolumeTypeMappingExists: + # Only need to create one group volume_type mapping + # entry for the same combination, skipping. + LOG.info(_LI("A mapping entry already exists for group" + " %(grp)s and volume type %(vol_type)s. " + "Do not need to create again."), + {'grp': group.id, + 'vol_type': volume_type_id}) + pass + + # Since group snapshot is passed in, the following call will + # create a db entry for the volume, but will not call the + # volume manager to create a real volume in the backend yet. + # If error happens, taskflow will handle rollback of quota + # and removal of volume entry in the db. + try: + self.volume_api.create(context, + snapshot.volume_size, + None, + None, + **kwargs) + except exception.CinderException: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error occurred when creating volume " + "entry from snapshot in the process of " + "creating group %(group)s " + "from group snapshot %(group_snap)s."), + {'group': group.id, + 'group_snap': group_snapshot.id}) + except Exception: + with excutils.save_and_reraise_exception(): + try: + group.destroy() + finally: + LOG.error(_LE("Error occurred when creating group " + "%(group)s from group snapshot " + "%(group_snap)s."), + {'group': group.id, + 'group_snap': group_snapshot.id}) + + volumes = objects.VolumeList.get_all_by_generic_group(context, + group.id) + for vol in volumes: + # Update the host field for the volume. + vol.host = group.host + vol.save() + + self.volume_rpcapi.create_group_from_src( + context, group, group_snapshot) + + def _create_group_from_source_group(self, context, group, + source_group_id): + try: + source_group = objects.Group.get_by_id(context, + source_group_id) + source_vols = objects.VolumeList.get_all_by_generic_group( + context, source_group.id) + + if not source_vols: + msg = _("Source Group is empty. No group " + "will be created.") + raise exception.InvalidGroup(reason=msg) + + for source_vol in source_vols: + kwargs = {} + kwargs['availability_zone'] = group.availability_zone + kwargs['source_group'] = source_group + kwargs['group'] = group + kwargs['source_volume'] = source_vol + volume_type_id = source_vol.volume_type_id + if volume_type_id: + kwargs['volume_type'] = volume_types.get_volume_type( + context, volume_type_id) + # Create group volume_type mapping entries + try: + db.group_volume_type_mapping_create(context, group.id, + volume_type_id) + except exception.GroupVolumeTypeMappingExists: + # Only need to create one group volume_type mapping + # entry for the same combination, skipping. + LOG.info(_LI("A mapping entry already exists for group" + " %(grp)s and volume type %(vol_type)s. " + "Do not need to create again."), + {'grp': group.id, + 'vol_type': volume_type_id}) + pass + + # Since source_group is passed in, the following call will + # create a db entry for the volume, but will not call the + # volume manager to create a real volume in the backend yet. + # If error happens, taskflow will handle rollback of quota + # and removal of volume entry in the db. + try: + self.volume_api.create(context, + source_vol.size, + None, + None, + **kwargs) + except exception.CinderException: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error occurred when creating cloned " + "volume in the process of creating " + "group %(group)s from " + "source group %(source_group)s."), + {'group': group.id, + 'source_group': source_group.id}) + except Exception: + with excutils.save_and_reraise_exception(): + try: + group.destroy() + finally: + LOG.error(_LE("Error occurred when creating " + "group %(group)s from source group " + "%(source_group)s."), + {'group': group.id, + 'source_group': source_group.id}) + + volumes = objects.VolumeList.get_all_by_generic_group(context, + group.id) + for vol in volumes: + # Update the host field for the volume. + vol.host = group.host + vol.save() + + self.volume_rpcapi.create_group_from_src(context, group, + None, source_group) + + def _cast_create_group(self, context, group, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list): + + try: + for request_spec in request_spec_list: + volume_type = request_spec.get('volume_type') + volume_type_id = None + if volume_type: + volume_type_id = volume_type.get('id') + + specs = {} + if volume_type_id: + qos_specs = volume_types.get_volume_type_qos_specs( + volume_type_id) + specs = qos_specs['qos_specs'] + if not specs: + # to make sure we don't pass empty dict + specs = None + + volume_properties = { + 'size': 0, # Need to populate size for the scheduler + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': 'creating', + 'attach_status': 'detached', + 'encryption_key_id': request_spec.get('encryption_key_id'), + 'display_description': request_spec.get('description'), + 'display_name': request_spec.get('name'), + 'volume_type_id': volume_type_id, + 'group_type_id': group.group_type_id, + } + + request_spec['volume_properties'] = volume_properties + request_spec['qos_specs'] = specs + + group_properties = { + 'size': 0, # Need to populate size for the scheduler + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': 'creating', + 'display_description': group_spec.get('description'), + 'display_name': group_spec.get('name'), + 'group_type_id': group.group_type_id, + } + + group_spec['volume_properties'] = group_properties + group_spec['qos_specs'] = None + + except Exception: + with excutils.save_and_reraise_exception(): + try: + group.destroy() + finally: + LOG.error(_LE("Error occurred when building " + "request spec list for group " + "%s."), group.id) + + # Cast to the scheduler and let it handle whatever is needed + # to select the target host for this group. + self.scheduler_rpcapi.create_group( + context, + constants.VOLUME_TOPIC, + group, + group_spec=group_spec, + request_spec_list=request_spec_list, + group_filter_properties=group_filter_properties, + filter_properties_list=filter_properties_list) + + def update_quota(self, context, group, num, project_id=None): + reserve_opts = {'groups': num} + try: + reservations = GROUP_QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + if reservations: + GROUP_QUOTAS.commit(context, reservations) + except Exception: + with excutils.save_and_reraise_exception(): + try: + group.destroy() + finally: + LOG.error(_LE("Failed to update quota for " + "group %s."), group.id) + + @wrap_check_policy + def delete(self, context, group, delete_volumes=False): + if not group.host: + self.update_quota(context, group, -1, group.project_id) + + LOG.debug("No host for group %s. Deleting from " + "the database.", group.id) + group.destroy() + + return + + if not delete_volumes and group.status not in ( + [c_fields.GroupStatus.AVAILABLE, + c_fields.GroupStatus.ERROR]): + msg = _("Group status must be available or error, " + "but current status is: %s") % group.status + raise exception.InvalidGroup(reason=msg) + + volumes = self.db.volume_get_all_by_generic_group(context.elevated(), + group.id) + if volumes and not delete_volumes: + msg = (_("Group %s still contains volumes. " + "The delete-volumes flag is required to delete it.") + % group.id) + LOG.error(msg) + raise exception.InvalidGroup(reason=msg) + + volumes_model_update = [] + for volume in volumes: + if volume['attach_status'] == "attached": + msg = _("Volume in group %s is attached. " + "Need to detach first.") % group.id + LOG.error(msg) + raise exception.InvalidGroup(reason=msg) + + snapshots = objects.SnapshotList.get_all_for_volume(context, + volume['id']) + if snapshots: + msg = _("Volume in group still has " + "dependent snapshots.") + LOG.error(msg) + raise exception.InvalidGroup(reason=msg) + + volumes_model_update.append({'id': volume['id'], + 'status': 'deleting'}) + + self.db.volumes_update(context, volumes_model_update) + + group.status = c_fields.GroupStatus.DELETING + group.terminated_at = timeutils.utcnow() + group.save() + + self.volume_rpcapi.delete_group(context, group) + + def update(self, context, group, name, description, + add_volumes, remove_volumes): + """Update group.""" + if group.status != c_fields.GroupStatus.AVAILABLE: + msg = _("Group status must be available, " + "but current status is: %s.") % group.status + raise exception.InvalidGroup(reason=msg) + + add_volumes_list = [] + remove_volumes_list = [] + if add_volumes: + add_volumes = add_volumes.strip(',') + add_volumes_list = add_volumes.split(',') + if remove_volumes: + remove_volumes = remove_volumes.strip(',') + remove_volumes_list = remove_volumes.split(',') + + invalid_uuids = [] + for uuid in add_volumes_list: + if uuid in remove_volumes_list: + invalid_uuids.append(uuid) + if invalid_uuids: + msg = _("UUIDs %s are in both add and remove volume " + "list.") % invalid_uuids + raise exception.InvalidVolume(reason=msg) + + volumes = self.db.volume_get_all_by_generic_group(context, group.id) + + # Validate name. + if name == group.name: + name = None + + # Validate description. + if description == group.description: + description = None + + # Validate volumes in add_volumes and remove_volumes. + add_volumes_new = "" + remove_volumes_new = "" + if add_volumes_list: + add_volumes_new = self._validate_add_volumes( + context, volumes, add_volumes_list, group) + if remove_volumes_list: + remove_volumes_new = self._validate_remove_volumes( + volumes, remove_volumes_list, group) + + if (name is None and description is None and not add_volumes_new and + not remove_volumes_new): + msg = (_("Cannot update group %(group_id)s " + "because no valid name, description, add_volumes, " + "or remove_volumes were provided.") % + {'group_id': group.id}) + raise exception.InvalidGroup(reason=msg) + + fields = {'updated_at': timeutils.utcnow()} + + # Update name and description in db now. No need to + # to send them over through an RPC call. + if name is not None: + fields['name'] = name + if description is not None: + fields['description'] = description + if not add_volumes_new and not remove_volumes_new: + # Only update name or description. Set status to available. + fields['status'] = 'available' + else: + fields['status'] = 'updating' + + group.update(fields) + group.save() + + # Do an RPC call only if the update request includes + # adding/removing volumes. add_volumes_new and remove_volumes_new + # are strings of volume UUIDs separated by commas with no spaces + # in between. + if add_volumes_new or remove_volumes_new: + self.volume_rpcapi.update_group( + context, group, + add_volumes=add_volumes_new, + remove_volumes=remove_volumes_new) + + def _validate_remove_volumes(self, volumes, remove_volumes_list, group): + # Validate volumes in remove_volumes. + remove_volumes_new = "" + for volume in volumes: + if volume['id'] in remove_volumes_list: + if volume['status'] not in VALID_REMOVE_VOL_FROM_GROUP_STATUS: + msg = (_("Cannot remove volume %(volume_id)s from " + "group %(group_id)s because volume " + "is in an invalid state: %(status)s. Valid " + "states are: %(valid)s.") % + {'volume_id': volume['id'], + 'group_id': group.id, + 'status': volume['status'], + 'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS}) + raise exception.InvalidVolume(reason=msg) + # Volume currently in group. It will be removed from group. + if remove_volumes_new: + remove_volumes_new += "," + remove_volumes_new += volume['id'] + + for rem_vol in remove_volumes_list: + if rem_vol not in remove_volumes_new: + msg = (_("Cannot remove volume %(volume_id)s from " + "group %(group_id)s because it " + "is not in the group.") % + {'volume_id': rem_vol, + 'group_id': group.id}) + raise exception.InvalidVolume(reason=msg) + + return remove_volumes_new + + def _validate_add_volumes(self, context, volumes, add_volumes_list, group): + add_volumes_new = "" + for volume in volumes: + if volume['id'] in add_volumes_list: + # Volume already in group. Remove from add_volumes. + add_volumes_list.remove(volume['id']) + + for add_vol in add_volumes_list: + try: + add_vol_ref = self.db.volume_get(context, add_vol) + except exception.VolumeNotFound: + msg = (_("Cannot add volume %(volume_id)s to " + "group %(group_id)s because volume cannot be " + "found.") % + {'volume_id': add_vol, + 'group_id': group.id}) + raise exception.InvalidVolume(reason=msg) + orig_group = add_vol_ref.get('group_id', None) + if orig_group: + # If volume to be added is already in the group to be updated, + # it should have been removed from the add_volumes_list in the + # beginning of this function. If we are here, it means it is + # in a different group. + msg = (_("Cannot add volume %(volume_id)s to group " + "%(group_id)s because it is already in " + "group %(orig_group)s.") % + {'volume_id': add_vol_ref['id'], + 'group_id': group.id, + 'orig_group': orig_group}) + raise exception.InvalidVolume(reason=msg) + if add_vol_ref: + add_vol_type_id = add_vol_ref.get('volume_type_id', None) + if not add_vol_type_id: + msg = (_("Cannot add volume %(volume_id)s to group " + "%(group_id)s because it has no volume " + "type.") % + {'volume_id': add_vol_ref['id'], + 'group_id': group.id}) + raise exception.InvalidVolume(reason=msg) + vol_type_ids = [v_type.id for v_type in group.volume_types] + if add_vol_type_id not in vol_type_ids: + msg = (_("Cannot add volume %(volume_id)s to group " + "%(group_id)s because volume type " + "%(volume_type)s is not supported by the " + "group.") % + {'volume_id': add_vol_ref['id'], + 'group_id': group.id, + 'volume_type': add_vol_type_id}) + raise exception.InvalidVolume(reason=msg) + if (add_vol_ref['status'] not in + VALID_ADD_VOL_TO_GROUP_STATUS): + msg = (_("Cannot add volume %(volume_id)s to group " + "%(group_id)s because volume is in an " + "invalid state: %(status)s. Valid states are: " + "%(valid)s.") % + {'volume_id': add_vol_ref['id'], + 'group_id': group.id, + 'status': add_vol_ref['status'], + 'valid': VALID_ADD_VOL_TO_GROUP_STATUS}) + raise exception.InvalidVolume(reason=msg) + + # group.host and add_vol_ref['host'] are in this format: + # 'host@backend#pool'. Extract host (host@backend) before + # doing comparison. + vol_host = vol_utils.extract_host(add_vol_ref['host']) + group_host = vol_utils.extract_host(group.host) + if group_host != vol_host: + raise exception.InvalidVolume( + reason=_("Volume is not local to this node.")) + + # Volume exists. It will be added to CG. + if add_volumes_new: + add_volumes_new += "," + add_volumes_new += add_vol_ref['id'] + + else: + msg = (_("Cannot add volume %(volume_id)s to group " + "%(group_id)s because volume does not exist.") % + {'volume_id': add_vol_ref['id'], + 'group_id': group.id}) + raise exception.InvalidVolume(reason=msg) + + return add_volumes_new + + def get(self, context, group_id): + group = objects.Group.get_by_id(context, group_id) + check_policy(context, 'get', group) + return group + + def get_all(self, context, filters=None, marker=None, limit=None, + offset=None, sort_keys=None, sort_dirs=None): + check_policy(context, 'get_all') + if filters is None: + filters = {} + + if filters: + LOG.debug("Searching by: %s", filters) + + if (context.is_admin and 'all_tenants' in filters): + del filters['all_tenants'] + groups = objects.GroupList.get_all( + context, filters=filters, marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) + else: + groups = objects.GroupList.get_all_by_project( + context, context.project_id, filters=filters, marker=marker, + limit=limit, offset=offset, sort_keys=sort_keys, + sort_dirs=sort_dirs) + return groups + + def create_group_snapshot(self, context, group, name, description): + options = {'group_id': group.id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'name': name, + 'description': description} + + group_snapshot = None + group_snapshot_id = None + try: + group_snapshot = objects.GroupSnapshot(context, **options) + group_snapshot.create() + group_snapshot_id = group_snapshot.id + + snap_name = group_snapshot.name + snap_desc = group_snapshot.description + with group.obj_as_admin(): + self.volume_api.create_snapshots_in_db( + context, group.volumes, snap_name, snap_desc, + None, group_snapshot_id) + + except Exception: + with excutils.save_and_reraise_exception(): + try: + # If the group_snapshot has been created + if group_snapshot.obj_attr_is_set('id'): + group_snapshot.destroy() + finally: + LOG.error(_LE("Error occurred when creating group_snapshot" + " %s."), group_snapshot_id) + + self.volume_rpcapi.create_group_snapshot(context, group_snapshot) + + return group_snapshot + + def delete_group_snapshot(self, context, group_snapshot, force=False): + check_policy(context, 'delete_group_snapshot') + values = {'status': 'deleting'} + expected = {'status': ('available', 'error')} + filters = [~db.group_creating_from_src( + group_snapshot_id=group_snapshot.id)] + res = group_snapshot.conditional_update(values, expected, filters) + + if not res: + msg = _('GroupSnapshot status must be available or error, and no ' + 'Group can be currently using it as source for its ' + 'creation.') + raise exception.InvalidGroupSnapshot(reason=msg) + + snapshots = objects.SnapshotList.get_all_for_group_snapshot( + context, group_snapshot.id) + + # TODO(xyang): Add a new db API to update all snapshots statuses + # in one db API call. + for snap in snapshots: + snap.status = c_fields.SnapshotStatus.DELETING + snap.save() + + self.volume_rpcapi.delete_group_snapshot(context.elevated(), + group_snapshot) + + def update_group_snapshot(self, context, group_snapshot, fields): + check_policy(context, 'update_group_snapshot') + group_snapshot.update(fields) + group_snapshot.save() + + def get_group_snapshot(self, context, group_snapshot_id): + check_policy(context, 'get_group_snapshot') + group_snapshots = objects.GroupSnapshot.get_by_id(context, + group_snapshot_id) + return group_snapshots + + def get_all_group_snapshots(self, context, search_opts=None): + check_policy(context, 'get_all_group_snapshots') + search_opts = search_opts or {} + + if context.is_admin and 'all_tenants' in search_opts: + # Need to remove all_tenants to pass the filtering below. + del search_opts['all_tenants'] + group_snapshots = objects.GroupSnapshotList.get_all(context, + search_opts) + else: + group_snapshots = objects.GroupSnapshotList.get_all_by_project( + context.elevated(), context.project_id, search_opts) + return group_snapshots diff --git a/cinder/image/glance.py b/cinder/image/glance.py index 1041a45c3..8acb81d44 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -36,7 +36,7 @@ from six.moves import range from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _, _LE glance_opts = [ @@ -157,13 +157,6 @@ class GlanceClientWrapper(object): self.api_servers = None self.version = version - if CONF.glance_num_retries < 0: - LOG.warning(_LW( - "glance_num_retries shouldn't be a negative value. " - "The number of retries will be set to 0 until this is" - "corrected in the cinder.conf.")) - CONF.set_override('glance_num_retries', 0) - def _create_static_client(self, context, netloc, use_ssl, version): """Create a client that we'll use for every call.""" self.netloc = netloc diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index c51179ba3..fac5cd927 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -56,10 +56,10 @@ CONF.register_opts(image_helper_opts) # NOTE(abhishekk): qemu-img convert command supports raw, qcow2, qed, -# vdi, vmdk and vhd disk-formats but glance doesn't support qed and -# vhd(vpc) disk-formats. +# vdi, vmdk, vhd and vhdx disk-formats but glance doesn't support qed +# disk-format. # Ref: http://docs.openstack.org/image-guide/convert-images.html -VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2') +VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2', 'vhd', 'vhdx') def validate_disk_format(disk_format): @@ -395,15 +395,20 @@ def upload_volume(context, image_service, image_meta, volume_path, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file}) - convert_image(volume_path, tmp, image_meta['disk_format'], + out_format = image_meta['disk_format'] + # qemu-img accepts 'vpc' as argument for vhd format + if out_format == 'vhd': + out_format = 'vpc' + + convert_image(volume_path, tmp, out_format, run_as_root=run_as_root) data = qemu_img_info(tmp, run_as_root=run_as_root) - if data.file_format != image_meta['disk_format']: + if data.file_format != out_format: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % - {'f1': image_meta['disk_format'], 'f2': data.file_format}) + {'f1': out_format, 'f2': data.file_format}) with open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) diff --git a/cinder/interface/backup_chunked_driver.py b/cinder/interface/backup_chunked_driver.py index 3f32cc293..03f0a8bf8 100644 --- a/cinder/interface/backup_chunked_driver.py +++ b/cinder/interface/backup_chunked_driver.py @@ -21,7 +21,7 @@ Backup driver with 'chunked' backup operations. from cinder.interface import backup_driver -class BackupDriverWithVerify(backup_driver.BackupDriver): +class BackupChunkedDriver(backup_driver.BackupDriver): """Backup driver that supports 'chunked' backups.""" def put_container(self, container): diff --git a/cinder/interface/util.py b/cinder/interface/util.py index 32adb0222..8bab173b8 100644 --- a/cinder/interface/util.py +++ b/cinder/interface/util.py @@ -67,6 +67,7 @@ class DriverInfo(object): self.class_fqn = '{}.{}'.format(inspect.getmodule(cls).__name__, self.class_name) self.version = getattr(cls, 'VERSION', None) + self.ci_wiki_name = getattr(cls, 'CI_WIKI_NAME', None) def __str__(self): return self.class_name diff --git a/cinder/interface/volume_driver.py b/cinder/interface/volume_driver.py index 05bd8f21a..564ec581b 100644 --- a/cinder/interface/volume_driver.py +++ b/cinder/interface/volume_driver.py @@ -78,9 +78,18 @@ class VolumeDriverCore(base.CinderInterface): string such as: "iSCSI", "FC", "nfs", "ceph", etc. * total_capacity_gb The total capacity in gigabytes (GiB) of the storage backend being - used to store Cinder volumes. + used to store Cinder volumes. Use keyword 'unknown' if the backend + cannot report the value or 'infinite' if there is no upper limit. + But, it is recommended to report real values as the Cinder + scheduler assigns lowest weight to any storage backend reporting + 'unknown' or 'infinite'. + * free_capacity_gb - The free capacity in gigabytes (GiB). + The free capacity in gigabytes (GiB). Use keyword 'unknown' if the + backend cannot report the value or 'infinite' if there is no upper + limit. But, it is recommended to report real values as the Cinder + scheduler assigns lowest weight to any storage backend reporting + 'unknown' or 'infinite'. And the following optional fields: diff --git a/cinder/interface/volume_management_driver.py b/cinder/interface/volume_management_driver.py index 966cb50bc..fac607445 100644 --- a/cinder/interface/volume_management_driver.py +++ b/cinder/interface/volume_management_driver.py @@ -53,8 +53,15 @@ class VolumeManagementDriver(base.CinderInterface): ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume + :param existing_ref: Dictionary with keys 'source-id', 'source-name' + with driver-specific values to identify a backend + storage object. + :raises: ManageExistingInvalidReference If the existing_ref doesn't + make sense, or doesn't refer to an existing backend storage + object. + :raises: ManageExistingVolumeTypeMismatch If there is a mismatch + between the volume type and the properties of the existing + backend storage object. """ def manage_existing_get_size(self, volume, existing_ref): @@ -63,8 +70,12 @@ class VolumeManagementDriver(base.CinderInterface): When calculating the size, round up to the next GB. :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume + :param existing_ref: Dictionary with keys 'source-id', 'source-name' + with driver-specific values to identify a backend + storage object. + :raises: ManageExistingInvalidReference If the existing_ref doesn't + make sense, or doesn't refer to an existing backend storage + object. """ def unmanage(self, volume): diff --git a/cinder/interface/volume_snapshotmanagement_driver.py b/cinder/interface/volume_snapshotmanagement_driver.py index 8bce8949c..0eb3475d3 100644 --- a/cinder/interface/volume_snapshotmanagement_driver.py +++ b/cinder/interface/volume_snapshotmanagement_driver.py @@ -44,7 +44,9 @@ class VolumeSnapshotManagementDriver(base.CinderInterface): backend storage object when required. :param snapshot: The snapshot to manage. - :param existing_ref: A reference to the existing snap. + :param existing_ref: Dictionary with keys 'source-id', 'source-name' + with driver-specific values to identify a backend + storage object. :raises: ManageExistingInvalidReference If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object. @@ -55,8 +57,13 @@ class VolumeSnapshotManagementDriver(base.CinderInterface): When calculating the size, round up to the next GB. - :param snapshot: The snapshot. - :param existing_ref: A reference to the existing snap. + :param snapshot: The snapshot to manage. + :param existing_ref: Dictionary with keys 'source-id', 'source-name' + with driver-specific values to identify a backend + storage object. + :raises: ManageExistingInvalidReference If the existing_ref doesn't + make sense, or doesn't refer to an existing backend storage + object. """ def unmanage_snapshot(self, snapshot): diff --git a/cinder/keymgr/__init__.py b/cinder/keymgr/__init__.py index 846e74691..ac705dc0f 100644 --- a/cinder/keymgr/__init__.py +++ b/cinder/keymgr/__init__.py @@ -13,19 +13,64 @@ # License for the specific language governing permissions and limitations # under the License. +from castellan import options as castellan_opts from oslo_config import cfg +from oslo_log import log as logging +from oslo_log import versionutils from oslo_utils import importutils -keymgr_opts = [ - cfg.StrOpt('api_class', - default='cinder.keymgr.conf_key_mgr.ConfKeyManager', - help='The full class name of the key manager API class'), -] +from cinder.i18n import _LW + +LOG = logging.getLogger(__name__) CONF = cfg.CONF -CONF.register_opts(keymgr_opts, group='keymgr') + +castellan_opts.set_defaults(cfg.CONF) + +# NOTE(kfarr): This line can be removed when a value is assigned in DevStack +CONF.set_default('api_class', 'cinder.keymgr.conf_key_mgr.ConfKeyManager', + group='key_manager') + +# NOTE(kfarr): For backwards compatibility, everything below this comment +# is deprecated for removal +api_class = None +try: + api_class = CONF.key_manager.api_class +except cfg.NoSuchOptError: + LOG.warning(_LW("key_manager.api_class is not set, will use deprecated " + "option keymgr.api_class if set")) + try: + api_class = CONF.keymgr.api_class + except cfg.NoSuchOptError: + LOG.warning(_LW("keymgr.api_class is not set")) + +deprecated_barbican = 'cinder.keymgr.barbican.BarbicanKeyManager' +barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager' +deprecated_mock = 'cinder.tests.unit.keymgr.mock_key_mgr.MockKeyManager' +castellan_mock = ('castellan.tests.unit.key_manager.mock_key_manager.' + 'MockKeyManager') -def API(): - cls = importutils.import_class(CONF.keymgr.api_class) - return cls() +def log_deprecated_warning(deprecated, castellan): + versionutils.deprecation_warning(deprecated, versionutils.NEWTON, + in_favor_of=castellan, logger=LOG) + +if api_class == deprecated_barbican: + log_deprecated_warning(deprecated_barbican, barbican) + api_class = barbican +elif api_class == deprecated_mock: + log_deprecated_warning(deprecated_mock, castellan_mock) + api_class = castellan_mock +elif api_class is None: + # TODO(kfarr): key_manager.api_class should be set in DevStack, and this + # block can be removed + LOG.warning(_LW("key manager not set, using insecure default %s"), + castellan_mock) + api_class = castellan_mock + +CONF.set_override('api_class', api_class, 'key_manager') + + +def API(conf=CONF): + cls = importutils.import_class(conf.key_manager.api_class) + return cls(conf) diff --git a/cinder/keymgr/barbican.py b/cinder/keymgr/barbican.py deleted file mode 100644 index ee59cbc5a..000000000 --- a/cinder/keymgr/barbican.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Key manager implementation for Barbican -""" - -import array -import base64 -import binascii -import re - -from barbicanclient import client as barbican_client -from keystoneclient.auth import identity -from keystoneclient import session -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _, _LE -from cinder.keymgr import key as keymgr_key -from cinder.keymgr import key_mgr - -CONF = cfg.CONF -CONF.import_opt('encryption_auth_url', 'cinder.keymgr.key_mgr', group='keymgr') -CONF.import_opt('encryption_api_url', 'cinder.keymgr.key_mgr', group='keymgr') -LOG = logging.getLogger(__name__) -URL_PATTERN = re.compile( - "(?Phttp[s]?://[^/]*)[/]?(?P(v[0-9.]+)?).*") - - -class BarbicanKeyManager(key_mgr.KeyManager): - """Key Manager Interface that wraps the Barbican client API.""" - - def __init__(self): - self._base_url = CONF.keymgr.encryption_api_url - self._parse_barbican_api_url() - self._barbican_client = None - self._current_context = None - - def _parse_barbican_api_url(self): - """Setup member variables to reference the Barbican URL. - - The key manipulation functions in this module need to use the - barbican URL with the version appended. But the barbicanclient - Client() class needs the URL without the version appended. - So set up a member variables here for each case. - """ - m = URL_PATTERN.search(self._base_url) - if m is None: - raise exception.KeyManagerError(_( - "Invalid url: must be in the form " - "'http[s]://|[:port]/', " - "url specified is: %s"), self._base_url) - url_info = dict(m.groupdict()) - if 'url_version' not in url_info or url_info['url_version'] == "": - raise exception.KeyManagerError(_( - "Invalid barbican api url: version is required, " - "e.g. 'http[s]://|[:port]/' " - "url specified is: %s") % self._base_url) - # We will also need the barbican API URL without the '/v1'. - # So save that now. - self._barbican_endpoint = url_info['url_base'] - - def _get_barbican_client(self, ctxt): - """Creates a client to connect to the Barbican service. - - :param ctxt: the user context for authentication - :return: a Barbican Client object - :throws NotAuthorized: if the ctxt is None - :throws KeyManagerError: if ctxt is missing project_id - or project_id is None - """ - - # Confirm context is provided, if not raise not authorized - if not ctxt: - msg = _("User is not authorized to use key manager.") - LOG.error(msg) - raise exception.NotAuthorized(msg) - - if not hasattr(ctxt, 'project_id') or ctxt.project_id is None: - msg = _("Unable to create Barbican Client without project_id.") - LOG.error(msg) - raise exception.KeyManagerError(msg) - - # If same context, return cached barbican client - if self._barbican_client and self._current_context == ctxt: - return self._barbican_client - - try: - auth = identity.v3.Token( - auth_url=CONF.keymgr.encryption_auth_url, - token=ctxt.auth_token, - project_id=ctxt.project_id) - sess = session.Session(auth=auth) - self._barbican_client = barbican_client.Client( - session=sess, - endpoint=self._barbican_endpoint) - self._current_context = ctxt - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error creating Barbican client.")) - - return self._barbican_client - - def create_key(self, ctxt, expiration=None, name='Cinder Volume Key', - payload_content_type='application/octet-stream', mode='CBC', - algorithm='AES', length=256): - """Creates a key. - - :param ctxt: contains information of the user and the environment - for the request (cinder/context.py) - :param expiration: the date the key will expire - :param name: a friendly name for the secret - :param payload_content_type: the format/type of the secret data - :param mode: the algorithm mode (e.g. CBC or CTR mode) - :param algorithm: the algorithm associated with the secret - :param length: the bit length of the secret - - :return: the UUID of the new key - :throws Exception: if key creation fails - """ - barbican_client = self._get_barbican_client(ctxt) - - try: - key_order = barbican_client.orders.create_key( - name, - algorithm, - length, - mode, - payload_content_type, - expiration) - order_ref = key_order.submit() - order = barbican_client.orders.get(order_ref) - secret_uuid = order.secret_ref.rpartition('/')[2] - return secret_uuid - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error creating key.")) - - def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key', - payload_content_type='application/octet-stream', - payload_content_encoding='base64', algorithm='AES', - bit_length=256, mode='CBC', from_copy=False): - """Stores (i.e., registers) a key with the key manager. - - :param ctxt: contains information of the user and the environment for - the request (cinder/context.py) - :param key: the unencrypted secret data. Known as "payload" to the - barbicanclient api - :param expiration: the expiration time of the secret in ISO 8601 - format - :param name: a friendly name for the key - :param payload_content_type: the format/type of the secret data - :param payload_content_encoding: the encoding of the secret data - :param algorithm: the algorithm associated with this secret key - :param bit_length: the bit length of this secret key - :param mode: the algorithm mode used with this secret key - :param from_copy: establishes whether the function is being used - to copy a key. In case of the latter, it does not - try to decode the key - - :returns: the UUID of the stored key - :throws Exception: if key storage fails - """ - barbican_client = self._get_barbican_client(ctxt) - - try: - if key.get_algorithm(): - algorithm = key.get_algorithm() - if payload_content_type == 'text/plain': - payload_content_encoding = None - encoded_key = key.get_encoded() - elif (payload_content_type == 'application/octet-stream' and - not from_copy): - key_list = key.get_encoded() - string_key = ''.join(map(lambda byte: "%02x" % byte, key_list)) - encoded_key = base64.b64encode(binascii.unhexlify(string_key)) - else: - encoded_key = key.get_encoded() - secret = barbican_client.secrets.create(name, - encoded_key, - payload_content_type, - payload_content_encoding, - algorithm, - bit_length, - None, - mode, - expiration) - secret_ref = secret.store() - secret_uuid = secret_ref.rpartition('/')[2] - return secret_uuid - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error storing key.")) - - def copy_key(self, ctxt, key_id): - """Copies (i.e., clones) a key stored by barbican. - - :param ctxt: contains information of the user and the environment for - the request (cinder/context.py) - :param key_id: the UUID of the key to copy - :return: the UUID of the key copy - :throws Exception: if key copying fails - """ - barbican_client = self._get_barbican_client(ctxt) - - try: - secret_ref = self._create_secret_ref(key_id, barbican_client) - secret = self._get_secret(ctxt, secret_ref) - con_type = secret.content_types['default'] - secret_data = self._get_secret_data(secret, - payload_content_type=con_type) - key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) - copy_uuid = self.store_key(ctxt, key, secret.expiration, - secret.name, con_type, - 'base64', - secret.algorithm, secret.bit_length, - secret.mode, True) - return copy_uuid - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error copying key.")) - - def _create_secret_ref(self, key_id, barbican_client): - """Creates the URL required for accessing a secret. - - :param key_id: the UUID of the key to copy - :param barbican_client: barbican key manager object - - :return: the URL of the requested secret - """ - if not key_id: - msg = "Key ID is None" - raise exception.KeyManagerError(msg) - return self._base_url + "/secrets/" + key_id - - def _get_secret_data(self, - secret, - payload_content_type='application/octet-stream'): - """Retrieves the secret data given a secret_ref and content_type. - - :param ctxt: contains information of the user and the environment for - the request (cinder/context.py) - :param secret_ref: URL to access the secret - :param payload_content_type: the format/type of the secret data - - :returns: the secret data - :throws Exception: if data cannot be retrieved - """ - try: - generated_data = secret.payload - if payload_content_type == 'application/octet-stream': - secret_data = base64.b64encode(generated_data) - else: - secret_data = generated_data - return secret_data - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error getting secret data.")) - - def _get_secret(self, ctxt, secret_ref): - """Creates the URL required for accessing a secret's metadata. - - :param ctxt: contains information of the user and the environment for - the request (cinder/context.py) - :param secret_ref: URL to access the secret - - :return: the secret's metadata - :throws Exception: if there is an error retrieving the data - """ - - barbican_client = self._get_barbican_client(ctxt) - - try: - return barbican_client.secrets.get(secret_ref) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error getting secret metadata.")) - - def get_key(self, ctxt, key_id, - payload_content_type='application/octet-stream'): - """Retrieves the specified key. - - :param ctxt: contains information of the user and the environment for - the request (cinder/context.py) - :param key_id: the UUID of the key to retrieve - :param payload_content_type: The format/type of the secret data - - :return: SymmetricKey representation of the key - :throws Exception: if key retrieval fails - """ - try: - secret_ref = self._create_secret_ref(key_id, barbican_client) - secret = self._get_secret(ctxt, secret_ref) - secret_data = self._get_secret_data(secret, - payload_content_type) - if payload_content_type == 'application/octet-stream': - # convert decoded string to list of unsigned ints for each byte - key_data = array.array('B', - base64.b64decode(secret_data)).tolist() - else: - key_data = secret_data - key = keymgr_key.SymmetricKey(secret.algorithm, key_data) - return key - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error getting key.")) - - def delete_key(self, ctxt, key_id): - """Deletes the specified key. - - :param ctxt: contains information of the user and the environment for - the request (cinder/context.py) - :param key_id: the UUID of the key to delete - :throws Exception: if key deletion fails - """ - barbican_client = self._get_barbican_client(ctxt) - - try: - secret_ref = self._create_secret_ref(key_id, barbican_client) - barbican_client.secrets.delete(secret_ref) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error deleting key.")) diff --git a/cinder/keymgr/conf_key_mgr.py b/cinder/keymgr/conf_key_mgr.py index aeea6767d..9fd50ccb0 100644 --- a/cinder/keymgr/conf_key_mgr.py +++ b/cinder/keymgr/conf_key_mgr.py @@ -31,31 +31,30 @@ encrypted with a key provided by this key manager actually share the same encryption key so *any* volume can be decrypted once the fixed key is known. """ -import array import binascii +from castellan.common.objects import symmetric_key +from castellan.key_manager import key_manager from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LW -from cinder.keymgr import key -from cinder.keymgr import key_mgr key_mgr_opts = [ cfg.StrOpt('fixed_key', - help='Fixed key returned by key manager, specified in hex'), + help='Fixed key returned by key manager, specified in hex', + deprecated_group='keymgr'), ] CONF = cfg.CONF -CONF.register_opts(key_mgr_opts, group='keymgr') - +CONF.register_opts(key_mgr_opts, group='key_manager') LOG = logging.getLogger(__name__) -class ConfKeyManager(key_mgr.KeyManager): +class ConfKeyManager(key_manager.KeyManager): """Key Manager that supports one key defined by the fixed_key conf option. This key manager implementation supports all the methods specified by the @@ -64,73 +63,79 @@ class ConfKeyManager(key_mgr.KeyManager): for each method are handled as specified by the key manager interface. """ - def __init__(self): - super(ConfKeyManager, self).__init__() + def __init__(self, configuration): + LOG.warning(_LW('This key manager is insecure and is not recommended ' + 'for production deployments')) + super(ConfKeyManager, self).__init__(configuration) + self.conf = configuration + self.conf.register_opts(key_mgr_opts, group='key_manager') self.key_id = '00000000-0000-0000-0000-000000000000' - def _generate_key(self, **kwargs): - _hex = self._generate_hex_key(**kwargs) - key_list = array.array('B', binascii.unhexlify(_hex)).tolist() - return key.SymmetricKey('AES', key_list) + def _get_key(self): + if self.conf.key_manager.fixed_key is None: + raise ValueError(_('config option key_manager.fixed_key is not ' + 'defined')) + hex_key = self.conf.key_manager.fixed_key + key_bytes = bytes(binascii.unhexlify(hex_key)) + return symmetric_key.SymmetricKey('AES', + len(key_bytes) * 8, + key_bytes) - def _generate_hex_key(self, **kwargs): - if CONF.keymgr.fixed_key is None: - LOG.warning( - _LW('config option keymgr.fixed_key has not been defined:' - ' some operations may fail unexpectedly')) - raise ValueError(_('keymgr.fixed_key not defined')) - return CONF.keymgr.fixed_key + def create_key(self, context, **kwargs): + """Creates a symmetric key. - def create_key(self, ctxt, **kwargs): - """Creates a key. - - This implementation returns a UUID for the created key. A - NotAuthorized exception is raised if the specified context is None. + This implementation returns a UUID for the key read from the + configuration file. A NotAuthorized exception is raised if the + specified context is None. """ - if ctxt is None: + if context is None: raise exception.NotAuthorized() return self.key_id - def store_key(self, ctxt, key, **kwargs): + def create_key_pair(self, context, **kwargs): + raise NotImplementedError( + "ConfKeyManager does not support asymmetric keys") + + def store(self, context, managed_object, **kwargs): """Stores (i.e., registers) a key with the key manager.""" - if ctxt is None: + if context is None: raise exception.NotAuthorized() - if key != self._generate_key(): + if managed_object != self._get_key(): raise exception.KeyManagerError( reason="cannot store arbitrary keys") return self.key_id - def copy_key(self, ctxt, key_id, **kwargs): - if ctxt is None: - raise exception.NotAuthorized() - - return self.key_id - - def get_key(self, ctxt, key_id, **kwargs): + def get(self, context, managed_object_id): """Retrieves the key identified by the specified id. This implementation returns the key that is associated with the specified UUID. A NotAuthorized exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ - if ctxt is None: + if context is None: raise exception.NotAuthorized() - if key_id != self.key_id: - raise KeyError(key_id) + if managed_object_id != self.key_id: + raise KeyError(str(managed_object_id) + " != " + str(self.key_id)) - return self._generate_key() + return self._get_key() - def delete_key(self, ctxt, key_id, **kwargs): - if ctxt is None: + def delete(self, context, managed_object_id): + """Represents deleting the key. + + Because the ConfKeyManager has only one key, which is read from the + configuration file, the key is not actually deleted when this is + called. + """ + if context is None: raise exception.NotAuthorized() - if key_id != self.key_id: + if managed_object_id != self.key_id: raise exception.KeyManagerError( reason="cannot delete non-existent key") - LOG.warning(_LW("Not deleting key %s"), key_id) + LOG.warning(_LW("Not deleting key %s"), managed_object_id) diff --git a/cinder/keymgr/key.py b/cinder/keymgr/key.py deleted file mode 100644 index 54080ab64..000000000 --- a/cinder/keymgr/key.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base Key and SymmetricKey Classes - -This module defines the Key and SymmetricKey classes. The Key class is the base -class to represent all encryption keys. The basis for this class was copied -from Java. -""" - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Key(object): - """Base class to represent all keys.""" - - @abc.abstractmethod - def get_algorithm(self): - """Returns the key's algorithm. - - Returns the key's algorithm. For example, "DSA" indicates that this key - is a DSA key and "AES" indicates that this key is an AES key. - """ - pass - - @abc.abstractmethod - def get_format(self): - """Returns the encoding format. - - Returns the key's encoding format or None if this key is not encoded. - """ - pass - - @abc.abstractmethod - def get_encoded(self): - """Returns the key in the format specified by its encoding.""" - pass - - -class SymmetricKey(Key): - """This class represents symmetric keys.""" - - def __init__(self, alg, key): - """Create a new SymmetricKey object. - - The arguments specify the algorithm for the symmetric encryption and - the bytes for the key. - """ - self.alg = alg - self.key = key - - def get_algorithm(self): - """Returns the algorithm for symmetric encryption.""" - return self.alg - - def get_format(self): - """This method returns 'RAW'.""" - return "RAW" - - def get_encoded(self): - """Returns the key in its encoded format.""" - return self.key - - def __eq__(self, other): - if isinstance(other, SymmetricKey): - return (self.alg == other.alg and - self.key == other.key) - return NotImplemented - - def __ne__(self, other): - result = self.__eq__(other) - if result is NotImplemented: - return result - return not result diff --git a/cinder/keymgr/key_mgr.py b/cinder/keymgr/key_mgr.py deleted file mode 100644 index ccf3f38cf..000000000 --- a/cinder/keymgr/key_mgr.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Key manager API -""" - -import abc - -from oslo_config import cfg -import six - -encryption_opts = [ - cfg.StrOpt('encryption_auth_url', - default='http://localhost:5000/v3', - help='Authentication url for encryption service.'), - cfg.StrOpt('encryption_api_url', - default='http://localhost:9311/v1', - help='Url for encryption service.'), -] - -CONF = cfg.CONF -CONF.register_opts(encryption_opts, group='keymgr') - - -@six.add_metaclass(abc.ABCMeta) -class KeyManager(object): - """Base Key Manager Interface - - A Key Manager is responsible for managing encryption keys for volumes. A - Key Manager is responsible for creating, reading, and deleting keys. - """ - - @abc.abstractmethod - def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, - **kwargs): - """Creates a key. - - This method creates a key and returns the key's UUID. If the specified - context does not permit the creation of keys, then a NotAuthorized - exception should be raised. - """ - pass - - @abc.abstractmethod - def store_key(self, ctxt, key, expiration=None, **kwargs): - """Stores (i.e., registers) a key with the key manager. - - This method stores the specified key and returns its UUID that - identifies it within the key manager. If the specified context does - not permit the creation of keys, then a NotAuthorized exception should - be raised. - """ - pass - - @abc.abstractmethod - def copy_key(self, ctxt, key_id, **kwargs): - """Copies (i.e., clones) a key stored by the key manager. - - This method copies the specified key and returns the copy's UUID. If - the specified context does not permit copying keys, then a - NotAuthorized error should be raised. - - Implementation note: This method should behave identically to - - .. code-block:: python - - store_key(context, get_key(context, )) - - although it is preferable to perform this operation within the key - manager to avoid unnecessary handling of the key material. - """ - pass - - @abc.abstractmethod - def get_key(self, ctxt, key_id, **kwargs): - """Retrieves the specified key. - - Implementations should verify that the caller has permissions to - retrieve the key by checking the context object passed in as ctxt. If - the user lacks permission then a NotAuthorized exception is raised. - - If the specified key does not exist, then a KeyError should be raised. - Implementations should preclude users from discerning the UUIDs of - keys that belong to other users by repeatedly calling this method. - That is, keys that belong to other users should be considered "non- - existent" and completely invisible. - """ - pass - - @abc.abstractmethod - def delete_key(self, ctxt, key_id, **kwargs): - """Deletes the specified key. - - Implementations should verify that the caller has permission to delete - the key by checking the context object (ctxt). A NotAuthorized - exception should be raised if the caller lacks permission. - - If the specified key does not exist, then a KeyError should be raised. - Implementations should preclude users from discerning the UUIDs of - keys that belong to other users by repeatedly calling this method. - That is, keys that belong to other users should be considered "non- - existent" and completely invisible. - """ - pass diff --git a/cinder/keymgr/not_implemented_key_mgr.py b/cinder/keymgr/not_implemented_key_mgr.py deleted file mode 100644 index e8d2a607d..000000000 --- a/cinder/keymgr/not_implemented_key_mgr.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Key manager implementation that raises NotImplementedError -""" - -from cinder.keymgr import key_mgr - - -class NotImplementedKeyManager(key_mgr.KeyManager): - """Key Manager interface that raises NotImplementedError""" - - def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, - **kwargs): - raise NotImplementedError() - - def store_key(self, ctxt, key, expiration=None, **kwargs): - raise NotImplementedError() - - def copy_key(self, ctxt, key_id, **kwargs): - raise NotImplementedError() - - def get_key(self, ctxt, key_id, **kwargs): - raise NotImplementedError() - - def delete_key(self, ctxt, key_id, **kwargs): - raise NotImplementedError() diff --git a/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po b/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po index 926843e0c..f8c39aa41 100644 --- a/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po +++ b/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po @@ -9,9 +9,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -102,10 +102,6 @@ msgstr "" msgid "Array Serial Number must be in the file %(fileName)s." msgstr "Sériové číslo pole musí být v souboru %(fileName)s." -#, python-format -msgid "Array mismatch %(myid)s vs %(arid)s" -msgstr "Neshoda pole: %(myid)s proti %(arid)s" - #, python-format msgid "Array query failed - No response (%d)!" msgstr "Dotaz na pole selhal - Žádná odpověď (%d)!" @@ -177,10 +173,6 @@ msgstr "Volání Nova pro vytvoření snímku selhalo" msgid "Call to json.loads() raised an exception: %s." msgstr "Volání json.loads() vyvolalo výjimku: %s." -#, python-format -msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." -msgstr "Nelze přidat lun %(lun)s do skupiny jednotnosti %(cg_name)s." - #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "Nelze zjišťovat v %(target_ip)s pomocí %(target_iqn)s." @@ -188,14 +180,6 @@ msgstr "Nelze zjišťovat v %(target_ip)s pomocí %(target_iqn)s." msgid "Can not open the recent url, login again." msgstr "Nelze otevřít nedávnou adresu url, probíhá znovu přihlašování." -#, python-format -msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "Nelze umístit nové LUN %(luns)s do skupiny jednotnosti %(cg_name)s." - -#, python-format -msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "Nelze odebrat lun %(luns)s ze skupiny jednotnosti %(cg_name)s." - #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "Nelze najít svazek k mapování %(key)s, %(msg)s" @@ -358,10 +342,6 @@ msgstr "" msgid "Connection to %s failed and no secondary!" msgstr "Připojení k %s selhalo a žádné druhotné připojení není nastaveno!" -#, python-format -msgid "Consistency group %s: create failed" -msgstr "Skupina jednotnosti %s: Vytvoření selhalo" - #, python-format msgid "Controller GET failed (%d)" msgstr "Získání kontroléru selhalo (%d)!" @@ -398,22 +378,6 @@ msgstr "Nelze smazat nezdařený svazek obrazu %(id)s." msgid "Could not delete the image volume %(id)s." msgstr "Nelze smazat svazek obrazu %(id)s." -#, python-format -msgid "" -"Could not do delete of snapshot %s on filer, falling back to exec of \"rm\" " -"command." -msgstr "" -"Nelze smazat snímek %s ve správci souborů, smazání bude provedeno spuštěním " -"příkazu \"rm\"." - -#, python-format -msgid "" -"Could not do delete of volume %s on filer, falling back to exec of \"rm\" " -"command." -msgstr "" -"Nelze smazat svazek %s ve správci souborů, smazání bude provedeno spuštěním " -"příkazu \"rm\"." - #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "Nelze najít hostitele pro skupinu jednotnosti %(group_id)s." @@ -447,10 +411,6 @@ msgstr "Nelze vyhodnotit soubor voleb plánovače %(filename)s." msgid "Could not validate device %s" msgstr "Nelze ověřit zařízení %s" -#, python-format -msgid "Create cg snapshot %s failed." -msgstr "Vytvoření snímku skupiny jednotnosti %s selhalo." - #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " @@ -459,10 +419,6 @@ msgstr "" "Vytvoření klonu svazku obrazu: %(volume_id)s pro obraz %(image_id)s selhalo " "(Výjimka: %(except)s)" -#, python-format -msgid "Create consistency group %s failed." -msgstr "Vytvoření skupiny jednotnosti %s selhalo." - #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." @@ -533,14 +489,6 @@ msgstr "" "Výchozí typ svazku nenalezen. Prosím zkontrolujte nastavení " "default_volume_type:" -#, python-format -msgid "Delete cgsnapshot %s failed." -msgstr "Smazání snímku skupiny jednotnosti %s selhalo." - -#, python-format -msgid "Delete consistency group %s failed." -msgstr "Smazání skupiny jednotnosti %s selhalo." - msgid "Delete consistency group failed to update usages." msgstr "Nelze aktualizovat využití při mazání skupin jednotnosti." @@ -644,9 +592,6 @@ msgstr "" msgid "Error activating LV" msgstr "Chyba při aktivaci logického svazku" -msgid "Error adding HBA to server" -msgstr "Chyba při přidávání HBA k serveru" - #, python-format msgid "Error cleaning up failed volume creation. Msg - %s." msgstr "Chyba při čištění selhaného vytváření svazku. Zpráva - %s" @@ -664,12 +609,6 @@ msgstr "" "Chyba při kontaktování serveru glance '%(netloc)s' pro '%(method)s', " "%(extra)s." -msgid "Error copying key." -msgstr "Chyba při kopírování klíče." - -msgid "Error creating Barbican client." -msgstr "Chyba při vytváření klienta Barbican." - #, python-format msgid "Error creating QOS rule %s" msgstr "Chyba při vytváření pravidla QOS %s" @@ -686,9 +625,6 @@ msgstr "Chyba při vytváření záznamu chap." msgid "Error creating cloned volume" msgstr "Chyba při vytváření klonovaného svazku" -msgid "Error creating key." -msgstr "Chyba při vytváření klíče." - msgid "Error creating snapshot" msgstr "Chyba při vytváření snímku" @@ -702,9 +638,6 @@ msgstr "Chyba při vytváření svazku. Zpráva - %s." msgid "Error deactivating LV" msgstr "Chyba při deaktivaci logického svazku" -msgid "Error deleting key." -msgstr "Chyba při mazání klíče." - msgid "Error deleting snapshot" msgstr "Chyba při mazání snímku" @@ -776,32 +709,15 @@ msgstr "Chyba při získávání pole, zásobu, SLO a vytížení." msgid "Error getting chap record." msgstr "Chyba při získávání záznamu chap." -#, python-format -msgid "Error getting iSCSI target info from EVS %(evs)s." -msgstr "Chyba při získávání informací o cíli iSCSI z EVS %(evs)s." - -msgid "Error getting key." -msgstr "Chyba při získávání klíče." - msgid "Error getting name server info." msgstr "Při získávání informací o jmenném serveru nastala chyba." -msgid "Error getting secret data." -msgstr "Chyba při získávání tajných dat." - -msgid "Error getting secret metadata." -msgstr "Chyba při získávání tajných popisných dat." - msgid "Error getting show fcns database info." msgstr "Při získávání informací o zobrazení databáze fcns nastala chyba." msgid "Error getting target pool name and array." msgstr "Chyba při získávání názvu cílového pole a zásoby." -#, python-format -msgid "Error happened during storage pool querying, %s." -msgstr "Při dotazu na zásobu úložiště se stala chyba, %s." - #, python-format msgid "Error in copying volume: %s" msgstr "Chyba při kopírování svazku %s" @@ -939,14 +855,6 @@ msgstr "Při vytváření dočasné zálohy nastala chyba." msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "Při vytváření svazku nastala chyba: %(id)s z obrazu %(image_id)s." -#, python-format -msgid "Error on adding lun to consistency group. %s" -msgstr "Chyba při přidávání lun do skupiny jednotnosti. %s" - -#, python-format -msgid "Error on enable compression on lun %s." -msgstr "Chyba při povolení komprimace v lun %s." - #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" @@ -1019,9 +927,6 @@ msgstr "Chyba při nastavování zásady mezipaměti Flash na %s - došlo k výj msgid "Error starting coordination backend." msgstr "Chyba při spouštění podpůrné vrstvy pro koordinaci." -msgid "Error storing key." -msgstr "Chyba při ukládání klíče." - #, python-format msgid "Error unmapping volume: %s" msgstr "Chyba při zrušení mapování svazku: %s" @@ -1530,10 +1435,6 @@ msgstr "Nelze rozšířit svazek %(name)s z %(current_size)sGB na %(new_size)sGB msgid "Failed to find %(s)s. Result %(r)s" msgstr "Nelze najít %(s)s. Výsledek %(r)s" -#, python-format -msgid "Failed to find available iSCSI targets for %s." -msgstr "Nelze získat dostupné cíle iSCSI pro %s." - msgid "Failed to get IQN!" msgstr "Nelze získat IQN!" @@ -1657,9 +1558,6 @@ msgstr "Nelze otevřít svazek z %(path)s." msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "Nelze darovat svazek %(name)s (%(status)d)!" -msgid "Failed to query migration status of LUN." -msgstr "Dotaz na stav přesunu LUN selhal." - msgid "Failed to re-export volume, setting to ERROR." msgstr "Nelze znovu exportovat svazek, je nastavován na ERROR." @@ -1823,14 +1721,6 @@ msgstr "" "Nelze aktualizovat popisná data %(volume_id)s pomocí popisných dat zadaného " "snímku %(snapshot_id)s." -#, python-format -msgid "" -"Failed to update initiator data for initiator %(initiator)s and backend " -"%(backend)s" -msgstr "" -"Nelze aktualizovat data zavedení ovladače pro zavaděč %(initiator)s a " -"podpůrnou vrstvu %(backend)s" - #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "Nelze aktualizovat id přenosu svazku dodávajícího kvótu %s." @@ -1878,10 +1768,6 @@ msgstr "Nelze zapisovat do/etc/scst.conf." msgid "Failed to write persistence file: %(path)s." msgstr "Nelze zapsat soubor přetrvání: %(path)s." -#, python-format -msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" -msgstr "Nelze aktualizovat %(object_type)s %(object_id)s pomocí %(update)s" - #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " @@ -2000,10 +1886,6 @@ msgstr "Chyba získávání metody." msgid "Get replication status for volume failed." msgstr "Získání stavu replikace svazku selhalo." -#, python-format -msgid "HDP not found: %s" -msgstr "HDP nenalezeno: %s" - #, python-format msgid "Host PUT failed (%s)." msgstr "PUT hostitele selhal (%s)." @@ -2042,10 +1924,6 @@ msgstr "Neplatné předání seznamu zpětného načtení: %s" msgid "Invalid hostname %(host)s" msgstr "Neplatný název hostitele %(host)s" -#, python-format -msgid "Invalid value for %(key)s, value is %(value)s." -msgstr "Neplatná hodnota pro %(key)s, hodnota je %(value)s." - msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" @@ -2102,10 +1980,6 @@ msgstr "Vytvoření snímku Lun pro svazek %(vol)s a snímek %(snap)s selhalo!" msgid "Lun delete for %s failed!" msgstr "Smazání Lun pro %s selhalo!" -#, python-format -msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "Smazání snímku Lun pro svazek %(vol)s a snímek %(snap)s selhalo!" - msgid "Lun mapping returned null!" msgstr "Mapování Lun vrátilo prázdný obsah!" @@ -2140,10 +2014,6 @@ msgstr "" msgid "Message: %s" msgstr "Zpráva: %s" -#, python-format -msgid "Migration of LUN %s failed to complete." -msgstr "Přesun LUN %s nelze dokončit." - msgid "Model update failed." msgstr "Aktualizace modelu selhala." @@ -2163,10 +2033,6 @@ msgstr "Selhání připojení pro %(share)s." msgid "Multiple replay profiles under name %s" msgstr "Nalezeno mnoho profilů rychlého načtení s názvem %s." -#, python-format -msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" -msgstr "Sdílení NFS %(share)s nemá žádnou položku o službě: %(svc)s -> %(hdp)s" - msgid "No CLI output for firmware version check" msgstr "" "U kontroly verzi firmware nebyl žádný výstup v rozhraní příkazového řádku" @@ -2190,14 +2056,6 @@ msgstr "" "Není vyžadována žádná činnost. Svazek: %(volumeName)s již je součástí " "kombinace slo/vytížení: %(targetCombination)s." -#, python-format -msgid "No configuration found for service: %s" -msgstr "Pro službu nebylo nalezeno žádné nastavení: %s" - -#, python-format -msgid "No configuration found for service: %s." -msgstr "Pro službu nebylo nalezeno žádné nastavení: %s." - #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " @@ -2496,9 +2354,6 @@ msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "Konektor neobsahuje požadované informace: wwpns chybí" -msgid "The given extra_spec or valid_values is None." -msgstr "Zadané dodatečné specifikace, nebo platné hodnoty jsou None." - msgid "The list of iscsi_ip_addresses is empty" msgstr "Seznam ip adres iscsi je prázdný." @@ -3027,9 +2882,6 @@ msgstr "" "smazání: %(vol_id)s selhalo, standardní výstup: %(out)s.\n" "chybový výstup: %(err)s." -msgid "delete_vol: provider location empty." -msgstr "Smazání svazku: Umístění poskytovatele je prázdné." - #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "Zajištění exportu: Svazek %s nebyl nalezen v úložišti." @@ -3044,10 +2896,6 @@ msgstr "Při obnově statistik svazku došlo k chybě" msgid "horcm command timeout." msgstr "Příkazu horcm vypršel časový limit." -#, python-format -msgid "iSCSI portal not found for service: %s" -msgstr "Nenalezen žádný portál iSCSI pro službu: %s" - #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " diff --git a/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po b/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po index 5370c598a..c83cda8d2 100644 --- a/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po +++ b/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev522\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-31 10:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -66,10 +66,6 @@ msgstr "" "Vrácená data:%(res)s\n" "\n" -#, python-format -msgid "%(element)s: %(val)s" -msgstr "%(element)s: %(val)s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" @@ -106,10 +102,6 @@ msgstr "" "3PAR vlun pro svazek '%(name)s' byl smazán, ale hostitel '%(host)s' nebyl, " "protože: %(reason)s" -#, python-format -msgid "AUTH properties: %(authProps)s" -msgstr "Vlastnosti ověření: %(authProps)s" - #, python-format msgid "AUTH properties: %s." msgstr "Vlastnosti ověření: %s." @@ -157,22 +149,10 @@ msgstr "Připojení svazku úspěšně dokončeno." msgid "Availability Zones retrieved successfully." msgstr "Zóny dostupnosti úspěšně získány." -#, python-format -msgid "Available services: %s" -msgstr "Dostupné služby: %s" - -#, python-format -msgid "Available services: %s." -msgstr "Dostupné služby: %s." - #, python-format msgid "Backend name is %s." msgstr "Název podpůrné vrstvy je %s." -#, python-format -msgid "Backend type: %s" -msgstr "Typ podpůrné vrstvy: %s" - #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "Záloha virtuálního stroje %(backing)s přejmenována na %(new_name)s." @@ -204,10 +184,6 @@ msgstr "" msgid "Backup service: %s." msgstr "Zálohovací služba: %s." -#, python-format -msgid "Bandwidth limit is: %s." -msgstr "Limit šířky pásma je: %s." - #, python-format msgid "Begin backup of volume %s." msgstr "Spuštění zálohování svazku %s." @@ -225,10 +201,6 @@ msgstr "Verze CONCERTO: %s" msgid "Calling os-brick to detach ScaleIO volume." msgstr "Volání os-brick pro odpojení svazku ScaleIO." -#, python-format -msgid "Cancelling Migration from LUN %s." -msgstr "Rušení přesunu z LUN %s." - #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " @@ -269,16 +241,6 @@ msgstr "" "Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože je " "z jiné podpůrné vrstvy." -#, python-format -msgid "" -"Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu" -msgstr "" -"Statistiky kapacity pro zásobu SRP %(poolName)s v poli %(arrayName)s: " -"celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita v gb: " -"%(free_capacity_gb)lu" - #, python-format msgid "Cgsnapshot %s: creating." msgstr "Snímek skupiny jednotnosti %s: vytváření" @@ -295,23 +257,11 @@ msgstr "Kontrolování klona obrazu %s ze sdílení glance." msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "Kontrola původu %(origin)s svazku %(volume)s." -#, python-format -msgid "" -"Cinder ISCSI volume with current path %(path)s is no longer being managed. " -"The new name is %(unm)s." -msgstr "" -"Svazek ISCSI Cinder se současnou cestou %(path)s již není spravován. Nový " -"název je %(unm)s." - #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "Svazek NFS Cinder se současnou cestou \"%(cr)s\" již není spravován." -#, python-format -msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." -msgstr "Svazek NFS Cinder se současnou cestou %(cr)s již není spravován." - msgid "Cinder secure environment indicator file exists." msgstr "Soubor indikující bezpečné prostředí Cinder existuje." @@ -366,13 +316,6 @@ msgstr "" "název zásady výkonu %(perfpol-name)s, šifrování %(encryption)s, šifra " "%(cipher)s, vícenásobný zavaděč %(multi-initiator)s" -#, python-format -msgid "" -"Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" -msgstr "" -"Klonování svazku s názvem %(vname)s, název klonu %(cname)s, cesta pro export " -"%(epath)s" - #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "CloudByte API úspěšně provedeno pro příkaz [%s]." @@ -391,10 +334,6 @@ msgstr "Dokončení přenosu svazku úspěšně provedeno." msgid "Completed: convert_to_base_volume: id=%s." msgstr "Dokončeno: Převod na základní svazek: id=%s." -#, python-format -msgid "Configured pools: %s" -msgstr "Nastavené zásoby: %s" - #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " @@ -416,22 +355,6 @@ msgstr "" msgid "Connector returning fcnsinfo-%s" msgstr "Konektor vrací fcnsinfo-%s" -#, python-format -msgid "Consistency group %(cg)s is created successfully." -msgstr "Skupina jednotnosti %(cg)s byla úspěšně vytvořena." - -#, python-format -msgid "Consistency group %s was deleted successfully." -msgstr "Skupina jednotnosti %s byla úspěšně smazána." - -#, python-format -msgid "Consistency group %s: created successfully" -msgstr "Skupina jednotnosti %s: úspěšně vytvořena" - -#, python-format -msgid "Consistency group %s: creating" -msgstr "Skupina jednotnosti %s: vytváření" - #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "Převeden obraz o velikosti %(sz).2f MB rychlostí %(mbps).2f MB/s" @@ -539,14 +462,6 @@ msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "" "Vytváření svazku %(volume_id)s ze snímku %(snapshot_id)s bylo dokončeno." -#, python-format -msgid "" -"Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " -"%(provisioning)s tiering: %(tiering)s " -msgstr "" -"Vytvoření svazku: %(volume)s, velikost: %(size)s, zásoba: %(pool)s, " -"poskytování: %(provisioning)s, vrstvení: %(tiering)s " - #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " @@ -564,9 +479,6 @@ msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" "Vytváření zálohy bylo zahájeno: záloha: %(backup_id)s svazek: %(volume_id)s." -msgid "Create consistency group completed successfully." -msgstr "Vytvoření skupiny jednotnosti úspěšně dokončeno." - #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "Vytvoření skupiny jednotnosti ze zdroje %(source)s úspěšně dokončeno." @@ -588,10 +500,6 @@ msgstr "" msgid "Create snapshot from volume %s" msgstr "Vytvořit snímek ze svazku %s" -#, python-format -msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" -msgstr "Vytvoření snímku: %(snapshot)s, svazek: %(volume)s" - #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " @@ -784,10 +692,6 @@ msgstr "Smazat skupiny jednotnosti: %(group)s." msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "Mazání snímku %(snapshot_id)s dokončeno." -#, python-format -msgid "Delete Snapshot: %(snapshot)s" -msgstr "Smazat snímek: %(snapshot)s" - #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "Smazání snímku: %(snapshot)s." @@ -914,10 +818,6 @@ msgstr "Mazání snímku: %s" msgid "Deleting stale snapshot: %s" msgstr "Mazání starého snímku: %s" -#, python-format -msgid "Deleting unneeded host %(host_name)r." -msgstr "Mazání nepotřebného hostitele %(host_name)r." - #, python-format msgid "Deleting volume %s " msgstr "Mazání svazku %s" @@ -989,10 +889,6 @@ msgstr "" msgid "Driver initialization completed successfully." msgstr "Zavedení ovladače úspěšně dokončeno." -#, python-format -msgid "Driver stats: %s" -msgstr "Statistiky ovladače: %s" - #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " @@ -1017,10 +913,6 @@ msgstr "Ovladač EQL: spouštění \"%s\"." msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "Upravování svazku %(vol)s maskou %(mask)s" -#, python-format -msgid "Elapsed time for clear volume: %.2f sec" -msgstr "Uplynulá doba čištění svazku: %.2f vteřin" - msgid "Embedded mode detected." msgstr "Zjištěn režim vnoření." @@ -1092,10 +984,6 @@ msgstr "" "%(arrayName)s. Celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita " "v gb: %(free_capacity_gb)lu." -#, python-format -msgid "FC Initiators %(in)s of %(ins)s need registration" -msgstr "Zavaděče FC %(in)s v %(ins)s je třeba registrovat" - #, python-format msgid "Failed to create host: %(name)s. Check if it exists on the array." msgstr "Nelze vytvořit hostitele: %(name)s. Zkontrolujte zda existuje v poli." @@ -1119,10 +1007,6 @@ msgstr "Vyvolána chyba: %s" msgid "Fetched vCenter server version: %s" msgstr "Získaná verze serveru vCenter: %s" -#, python-format -msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" -msgstr "Filtr %(cls_name)s vrátil %(obj_len)d hostitelů" - #, python-format msgid "Filtered targets for SAN is: %s" msgstr "Filtrované cíle pro SAN jsou: %s" @@ -1195,14 +1079,6 @@ msgstr "" msgid "Generating transfer record for volume %s" msgstr "Vytváření záznamu o přenosu pro svazek %s" -#, python-format -msgid "Get FC targets %(tg)s to register initiator %(in)s." -msgstr "Získávání cílů FC %(tg)s pro registraci zavaděče %(in)s." - -#, python-format -msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." -msgstr "Získávání cílů ISCSI %(tg)s pro registraci zavaděče %(in)s." - msgid "Get all snapshots completed successfully." msgstr "Získání všech snímků úspěšně dokončeno." @@ -1213,10 +1089,6 @@ msgstr "Získání všech svazků úspěšně dokončeno." msgid "Get domain by name response: %s" msgstr "Získání domény pomocí odpovědi názvem: %s" -#, python-format -msgid "Get service: %(lbl)s->%(svc)s" -msgstr "Získání služby: %(lbl)s->%(svc)s" - msgid "Get snapshot metadata completed successfully." msgstr "Získání popisných dat snímku úspěšně dokončeno." @@ -1247,10 +1119,6 @@ msgstr "Získávání informací o svazku s názvem %s" msgid "Going to perform request again %s with valid token." msgstr "Žádost %s bude znovu vytvořena s novou známkou." -#, python-format -msgid "HDP list: %s" -msgstr "Seznam HDP: %s" - #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "Společné části HPE3PAR %(common_ver)s, klient hp3par %(rest_ver)s" @@ -1267,17 +1135,9 @@ msgstr "Vyvolána výjimka HTTP: %s" msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "ID hypermetra: %(metro_id)s. ID vzdáleného LUN: %(remote_lun_id)s." -#, python-format -msgid "ISCSI properties: %(properties)s" -msgstr "Vlastnosti ISCSI: %(properties)s" - msgid "ISCSI provider_location not stored, using discovery." msgstr "Umístění poskytovatele ISCSI neuloženo, bude se zjišťovat." -#, python-format -msgid "ISCSI volume is: %(volume)s" -msgstr "Svazek ISCSI je: %(volume)s" - #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "Obraz %(pool)s/%(image)s závisí na snímku %(snap)s." @@ -1343,14 +1203,6 @@ msgstr "Název skupiny zavaděče je %(grp)s pro zavaděč %(iname)s" msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s rozšířen na %(size)s GB." -#, python-format -msgid "LUN %(lun)s extended to %(size)s GB." -msgstr "LUN %(lun)s rozšířen na %(size)s GB." - -#, python-format -msgid "LUN %(lun)s of size %(sz)s MB is created." -msgstr "Vytvořen LUn %(lun)s o velikosti %(sz)s MB." - #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" @@ -1482,13 +1334,6 @@ msgstr "Je třeba odstranit zónu FC, probíhá sestavování mapu cílů zavad msgid "Need to remove FC Zone, building initiator target map." msgstr "Je třeba odstranit zónu FC, probíhá sestavování mapu cílů zavaděče." -msgid "" -"Neither security file nor plain text credentials are specified. Security " -"file under home directory will be used for authentication if present." -msgstr "" -"Není zadán bezpečnostní soubor ani ověřovací údaje v prostém textu. Pokud " -"existuje bezpečnostní soubor v domovském adresáři, bude použit pro ověření." - #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " @@ -1558,10 +1403,6 @@ msgstr "Přepisování svazku %(volume_id)s obnovou zálohy %(backup_id)s" msgid "Params for add volume request: %s." msgstr "Parametry pro žádost o přidání svazku: %s." -#, python-format -msgid "Parse_loc: %s" -msgstr "Zpracování umístění: %s" - #, python-format msgid "Performing post clone for %s" msgstr "Provádění operací po klonování pro %s" @@ -1570,9 +1411,6 @@ msgstr "Provádění operací po klonování pro %s" msgid "Performing secure delete on volume: %s" msgstr "Provádění bezpečného smazání svazku: %s" -msgid "Plain text credentials are being used for authentication" -msgstr "Pro ověření jsou použity přihlašovací údaje v prostém textu" - #, python-format msgid "Pool id is %s." msgstr "ID zásoby je %s." @@ -1793,14 +1631,6 @@ msgstr "Obnovení smazání svazku úspěšné dokončeno." msgid "Resuming delete on backup: %s." msgstr "Pokračování ve smazání zálohy: %s." -#, python-format -msgid "Retrieving secret for service: %s." -msgstr "Získávání tajného klíče pro službu: %s." - -#, python-format -msgid "Retrieving target for service: %s." -msgstr "Získávání cílů pro službu: %s." - #, python-format msgid "Return FC info is: %s." msgstr "Informace o FC vrátily: %s." @@ -1877,33 +1707,13 @@ msgstr "Žádost o přetypování svazku úspěšně vytvořena." msgid "Retype was to same Storage Profile." msgstr "Přetypování bylo na stejný profil úložiště." -#, python-format -msgid "Review shares: %s" -msgstr "Kontrola sdílení: %s" - msgid "Roll detaching of volume completed successfully." msgstr "Provedení odpojení svazku úspěšně dokončeno." -#, python-format -msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" -msgstr "" -"Spouštění úkolu shlukování nejnovějšího ssc pro %(server)s a virtuálního " -"serveru %(vs)s" - -#, python-format -msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" -msgstr "" -"Spouštění úkolu obnovy zastaralého ssc pro %(server)s a virtuálního serveru " -"%(vs)s" - #, python-format msgid "Running with vmemclient version: %s" msgstr "Spuštěno s vmemclient verze %s" -#, python-format -msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" -msgstr "Uložit informace o službě pro %(svc)s -> %(hdp)s, %(path)s" - #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " @@ -1973,10 +1783,6 @@ msgstr "Sezení mohlo vypršet. Bude proveden pokus o další přihlášení" msgid "Set newly managed Cinder volume name to %(name)s." msgstr "Nastavit název nově spravovaného svazku Cinder na %(name)s." -#, python-format -msgid "Set tgt CHAP secret for service: %s." -msgstr "Nastavování tajného klíče cíle CHAP pro službu: %s." - #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "Nastavování hostitele %(host)s na %(state)s." @@ -1993,11 +1799,6 @@ msgstr "Nastavování svazku %(vol)s příznakem online %(flag)s" msgid "Skipping deletion of volume %s as it does not exist." msgstr "Přeskakování mazání svazku %s protože neexistuje." -#, python-format -msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" -msgstr "" -"Zajištění exportu přeskočeno. Svazku %s není poskytnut žádný cíl iscsi." - #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current Tenant." @@ -2047,10 +1848,6 @@ msgstr "Snímek %s neexistuje v podpůrné vrstvě," msgid "Snapshot %s not found" msgstr "Snímek %s nenalezen" -#, python-format -msgid "Snapshot %s was deleted successfully." -msgstr "Snímek %s byl úspěšně smazán." - #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "Snímek '%(ref)s' přejmenován na '%(new)s'. " @@ -2124,10 +1921,6 @@ msgstr "Spouštění ovladače svazku %(driver_name)s (%(version)s)" msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "Skupina úložiště %(storageGroupName)s úspěšně smazána." -#, python-format -msgid "Storage Group %s was empty." -msgstr "Skupina úložiště %s byla prázdná." - #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "Skupina úložiště není přidružena k zásadě. Výjimka je %s." @@ -2148,17 +1941,6 @@ msgstr "Uživatel %s se úspěšně přihlásil" msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "%(volumeName)s úspěšně přidáno do %(sgGroupName)s." -#, python-format -msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" -msgstr "Úspěšně proveden úkol ssc pro %(server)s a virtuální server %(vs)s" - -#, python-format -msgid "" -"Successfully completed stale refresh job for %(server)s and vserver %(vs)s" -msgstr "" -"Úspěšně proveden úkol obnovy zastaralého ssc pro %(server)s a virtuální " -"server %(vs)s" - #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "Úspěšně zkopírován disk %(src)s do %(dest)s." @@ -2308,10 +2090,6 @@ msgid "" msgstr "" "Systém s adresami kontroléru [%s] není zaregistrován u internetové služby." -#, python-format -msgid "Target is %(map)s! Targetlist = %(tgtl)s." -msgstr "Cíl je %(map)s! Seznam cílů = %(tgtl)s." - #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "Cílové wwns v zamaskování %(maskingView)s: %(targetWwns)s." @@ -2586,18 +2364,6 @@ msgstr "Použita potlačena verze hostitele vmware z nastavení: %s" msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "Používání zásoby %(pool)s místo %(cpg)s" -#, python-format -msgid "Using security file in %s for authentication" -msgstr "Pro ověření je použit bezpečnostní soubor v %s" - -#, python-format -msgid "Using service label: %s" -msgstr "Použita jmenovka služby: %s" - -#, python-format -msgid "Using target label: %s." -msgstr "Použita jmenovka cíle: %s." - #, python-format msgid "Value with type=%s is not serializable" msgstr "Hodnota typ=%s není serializovatelná" @@ -2764,10 +2530,6 @@ msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" "Svazek zadaným odkazem %s není třeba během operace správy přejmenovávat." -#, python-format -msgid "Volume with the name %s wasn't found, can't unmanage" -msgstr "Svazek s názvem %s nebyl nalezen, nelze zrušit správu" - #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " @@ -2834,10 +2596,6 @@ msgstr "" "Operace kontroly kopie svazku: Svazek %(vol)s nemá zadanou operaci " "kopírování virtuálního disku: původní=%(orig)s, nové=%(new)s." -#, python-format -msgid "_get_service_target hdp: %s." -msgstr "Získávání cíle služby hdp: %s" - #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "Získání cílové iup adrey ze skupiny portů: Získaná IP adresa: %s." @@ -2875,10 +2633,6 @@ msgstr "Snímek skupiny jednotnosti %s: úspěšně smazán" msgid "cgsnapshot %s: deleting" msgstr "Snímek skupiny jednotnosti %s: mazání" -#, python-format -msgid "config[services]: %s." -msgstr "nastavení[služby]: %s." - #, python-format msgid "" "create_hostgroup_with_check. Create hostgroup success. hostgroup name: " @@ -2894,10 +2648,6 @@ msgstr "" "Vytvořit skupinu hostitele s kontrolou, název skupiny hostitele: %(name)s, " "id skupiny hostitele: %(id)s" -#, python-format -msgid "create_volume: create_lu returns %s" -msgstr "Vytvoření svazku: Vytvoření LU vrátilo %s" - #, python-format msgid "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " @@ -2906,14 +2656,6 @@ msgstr "" "Vytváření svazku ze snímku: ID zdrojového lun: %(src_lun_id)s,ID cílového " "lun: %(tgt_lun_id)s, název kopie: %(copy_name)s." -#, python-format -msgid "del_iscsi_conn: hlun not found %s." -msgstr "Smazání připojení iSCSI: hlun nenalezen %s." - -#, python-format -msgid "delete lun loc %s" -msgstr "Smazání umístění lun %s" - #, python-format msgid "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " @@ -2922,35 +2664,15 @@ msgstr "" "Provést mapování, skupina lun: %(lun_group)s, id zobrazení: %(view_id)s, id " "lun: %(lun_id)s." -#, python-format -msgid "do_setup: %s" -msgstr "Zavedení: %s" - #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "" "Volná kapacita zásoby %(pool)s je: %(free)s, celková kapacita: %(total)s." -#, python-format -msgid "iSCSI Initiators %(in)s of %(ins)s need registration." -msgstr "Zavaděče iSCSI %(in)s v %(ins)s je třeba registrovat." - -#, python-format -msgid "iSCSI portal found for service: %s" -msgstr "Portál iSCSI nalezen pro službu: %s" - #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "Pro zavaděč %(iname)s nalezena skupina zavaděče %(grp)s" -#, python-format -msgid "initialize volume %(vol)s connector %(conn)s" -msgstr "Zavedení konektoru %(conn)s pro svazek %(vol)s" - -#, python-format -msgid "initialize_ connection: %(vol)s:%(initiator)s" -msgstr "Zavedení spojení: %(vol)s:%(initiator)s" - #, python-format msgid "initialize_connection success. Return data: %s." msgstr "Spojení úspěšně zavedeno. Vrácená data: %s" @@ -2980,32 +2702,9 @@ msgid "" "initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." msgstr "Zavedení spojení s FC: Zavaděč %(wwpns)s, název svazku: %(volume)s." -#, python-format -msgid "initiate: connection %s" -msgstr "Zavedení: Spojení %s" - msgid "initiator has no password while using chap,adding it" msgstr "Zavaděč nemá žádné heslo při používání chap, heslo je přidáno" -msgid "" -"initiator_auto_registration: False. Initiator auto registration is not " -"enabled. Please register initiator manually." -msgstr "" -"initiator_auto_registration: False. Automatická registrace zavaděče není " -"povolena. Prosím registrujte ho ručně." - -#, python-format -msgid "iops limit is: %s." -msgstr "Limit iops je: %s." - -#, python-format -msgid "iscsi_initiators: %s" -msgstr "Zavaděče iscsi: %s" - -#, python-format -msgid "location is: %(location)s" -msgstr "Umístění je: %(location)s" - #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " @@ -3018,10 +2717,6 @@ msgstr "" msgid "new cloned volume: %s" msgstr "Nový klonovaný svazek: %s" -#, python-format -msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" -msgstr "Informace NFS: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" - #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "Připojení otevřeno do %(ssn)s na adrese %(ip)s" @@ -3030,14 +2725,6 @@ msgstr "Připojení otevřeno do %(ssn)s na adrese %(ip)s" msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "Nastavování svazku %s na error_restoring (stav byl restoring-backup)." -#, python-format -msgid "share: %(share)s -> %(info)s" -msgstr "Sdílení: %(share)s -> %(info)s" - -#, python-format -msgid "share: %s incorrect entry" -msgstr "Sdílení: Nesprávná položka %s" - #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "Provedení zjištění iSCSI pomocí SMI-S předalo: %(out)s." @@ -3050,23 +2737,11 @@ msgstr "snímek %s neexistuje" msgid "source volume for cloning: %s" msgstr "Zdrojový svazek pro klonování: %s" -#, python-format -msgid "stats: stats: %s." -msgstr "Statistiky: %s." - #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "" "Zastavení snímku: Název snímku: %(snapshot)s, název svazku: %(volume)s." -#, python-format -msgid "targetlist: %s" -msgstr "seznam cílů: %s" - -#, python-format -msgid "terminate: connection %s" -msgstr "Ukončení: Spojení %s" - #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "Ukončení připojení se svazkem: %(volume)s, konektor: %(con)s" diff --git a/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po b/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po index 855c6211a..33d6865bc 100644 --- a/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po +++ b/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -51,10 +51,6 @@ msgstr "" msgid "3PAR vlun for volume %(name)s not found on host %(host)s" msgstr "3PAR vlun pro svazek %(name)s nenalezen v hostiteli %(host)s" -#, python-format -msgid "AttachSnapTask.revert: detach mount point %s" -msgstr "Vrácení úkolu připojení snímku: odpojení bodu připojení %s" - msgid "Attempted to delete a space that's not there." msgstr "Pokus smazat prostor který neexistuje." @@ -88,23 +84,6 @@ msgstr "" "Zálohovací služba %(service)s nepodporuje ověřování. Záloha s id %(id)s není " "ověřena. Ověřování přeskočeno." -msgid "" -"Both 'storagetype:prvosioning' and 'provisioning:type' are set in the extra " -"specs, the value of 'provisioning:type' will be used. The key 'storagetype:" -"provisioning' may be deprecated in the next release." -msgstr "" -"V dodatečných specifikacích jsou zadány 'storagetype:prvosioning' a " -"'provisioning:type', bude použita hodnota z 'provisioning:type'. Klíč " -"'storagetype:provisioning' může být v příští verzi zastaralý." - -#, python-format -msgid "CG %(cg_name)s does not exist. Message: %(msg)s" -msgstr "Skupina jednotnosti %(cg_name)s neexistuje. Zpráva: %(msg)s" - -#, python-format -msgid "CG %(cg_name)s is deleting. Message: %(msg)s" -msgstr "Probíhá mazání skupiny jednotnosti %(cg_name)s. Zpráva: %(msg)s" - #, python-format msgid "CHAP is enabled, but server secret not configured on server %s" msgstr "CHAP je povolen, ale soukromý klíč serveru není nastaven na serveru %s" @@ -140,10 +119,6 @@ msgstr "" "Nelze vrátit přejmenování svazku zpět; starý název byl %(old_name)s a nový " "název je %(new_name)s." -#, python-format -msgid "Cgsnapshot name %(name)s already exists. Message: %(msg)s" -msgstr "Snímek skupiny jednotnosti %(name)s již existuje. Zpráva: %(msg)s" - #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "Změna využití se sníží na méně než 0 pro následující zdroje: %s" @@ -175,18 +150,6 @@ msgstr "" "zastaralé. Byly nahrazeny volbami use_chap_auth, chap_username a " "chap_password respectively se stejnou funkcí." -#, python-format -msgid "Consistency group %(name)s already exists. Message: %(msg)s" -msgstr "Skupina jednotnosti %(name)s již existuje. Zpráva: %(msg)s" - -#, python-format -msgid "" -"CopySnapshotTask.revert: delete the copied snapshot %(new_name)s of " -"%(source_name)s." -msgstr "" -"Vrácení úkolu kopírování snímku: Mazání kopírovaného snímku %(new_name)s z " -"%(source_name)s." - #, python-format msgid "Could not create target because it already exists for volume: %s" msgstr "Nelze vytvořit cíl protože již ve svazku existuje: %s" @@ -203,24 +166,6 @@ msgstr "Nelze získat informace o zásobě (%s)!" msgid "Could not get status for %(name)s (%(status)d)." msgstr "Nelze získat stav %(name)s (%(status)d)." -#, python-format -msgid "CreateDestLunTask.revert: delete temp lun %s" -msgstr "Vrácení úkolu vytvoření cílového LUN: Mazání dočasného lun %s" - -#, python-format -msgid "CreateSMPTask.revert: delete mount point %s" -msgstr "Vrácení vytvoření úkolu SMPT: Smazání bodu připojení %s" - -#, python-format -msgid "CreateSnapshotTask.revert: delete temp cgsnapshot %s" -msgstr "" -"Vrácení úkolu vytvoření snímku: Mazání dočasného snímku skupiny jednotnosti " -"%s" - -#, python-format -msgid "CreateSnapshotTask.revert: delete temp snapshot %s" -msgstr "Vrácení úkolu vytvoření snímku: Mazání dočasného snímku %s" - #, python-format msgid "" "CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, ret=" @@ -244,14 +189,6 @@ msgid "Delete Snapshot id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" "ID mazaného snímku nenalezeno. Odstraňování z cinder: %(id)s Výjimka: %(msg)s" -#, python-format -msgid "" -"Delete the temporary cgsnapshot %(name)s failed. This temporary cgsnapshot " -"can be deleted manually. Message: %(msg)s" -msgstr "" -"Smazání dočasného snímku skupiny jednotnosti %(name)s selhalo. Tento snímek " -"lze smazat ručně. Zpráva: %(msg)s." - #, python-format msgid "Delete volume id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" @@ -280,14 +217,6 @@ msgid "Detected volume stuck in %(curr_status)s status, setting to ERROR." msgstr "" "Objeven svazek zaseknutý ve stavu %(curr_status)s, nastavování na ERROR." -#, python-format -msgid "" -"Didn't get the pool information of the host %s. Storage assisted Migration " -"is not supported. The host may be using a legacy driver." -msgstr "" -"Z hostitele %s nebyly získány informace o zásobě. Přesun za pomocí úložiště " -"není podporován. Hostitel možná používá zastaralý ovladač." - msgid "Discover file retries exhausted." msgstr "Vyčerpány pokusy o zjištění souboru." @@ -342,9 +271,6 @@ msgstr "Při mazání popisovače nastala chyba: %s." msgid "Error occurred while deleting temporary disk: %s." msgstr "Při mazání dočasného disku nastala chyba: %s." -msgid "Error on parsing target_pool_name/target_array_serial." -msgstr "Chyba při zpracování názvu cílové zásoby a pole." - #, python-format msgid "Error refreshing volume info. Message: %s" msgstr "Chyba při obnovování informaci o svazku. Zpráva: %s" @@ -422,21 +348,10 @@ msgstr "" "Klíč dodatečné specifikace 'storagetype:pool' je zastaralý od verze ovladače " "5.1.0. Tento klíč bude ignorován." -msgid "" -"Extra spec key 'storagetype:provisioning' may be deprecated in the next " -"release. It is recommended to use extra spec key 'provisioning:type' instead." -msgstr "" -"Klíč dodatečné specifikace 'storagetype:provisioning' může být v příští " -"verzi zastaralý. Doporučuje se místo toho použít klíč 'provisioning:type'." - #, python-format msgid "FAST is enabled. Policy: %(fastPolicyName)s." msgstr "FAST je povoleno. Zásada: %(fastPolicyName)s." -#, python-format -msgid "Fail to connect host %(host)s back to storage group %(sg)s." -msgstr "Nelze připojit hostitele %(host)s zpět do skupiny úložiště %(sg)s." - #, python-format msgid "" "Failed target removal because target or ACL's couldn't be found for iqn: %s." @@ -480,10 +395,6 @@ msgstr "" "Nelze vytvořit svazek z mezipaměti obrazu-svazku, bude použito výchozí " "chování. Chyba: %(exception)s" -#, python-format -msgid "Failed to deregister %(itor)s because: %(msg)s." -msgstr "Nelze zrušit registraci %(itor)s protože: %(msg)s." - #, python-format msgid "Failed to destroy Storage Group %s." msgstr "Nelze zničit skupinu úložiště %s." @@ -500,24 +411,12 @@ msgstr "Odloučení specifikace qos %s selhalo." msgid "Failed to discard zero page: %s" msgstr "Nelze zahodit nulovou stránku: %s" -#, python-format -msgid "Failed to extract initiators of %s, so ignore deregistration operation." -msgstr "" -"Nelze extrahovat zavaděče %s, operace pro zrušení registrace je ignorována." - msgid "Failed to get Raid Snapshot ID and did not store in snapshot." msgstr "Nelze získat ID Raid snímku a ve snímku nebylo uloženo." msgid "Failed to get target pool id." msgstr "Nelze získat id cílové zásoby." -msgid "" -"Failed to get target_pool_name and target_array_serial. 'location_info' is " -"not in host['capabilities']." -msgstr "" -"Nelze získat název cílové zásoby a pole. 'location_info' není v " -"hostiteli['capabilities']." - #, python-format msgid "Failed to invoke ems. Message : %s" msgstr "Nelze zavolat ems. Zpráva: %s" @@ -563,12 +462,6 @@ msgstr "Dotaz na stav %(ret)d zásoby %(id)s selhal." msgid "Failed to refresh mounts, reason=%s" msgstr "Nelze obnovit připojení, důvod=%s" -#, python-format -msgid "" -"Failed to register %(itor)s to SP%(sp)s port %(portid)s because: %(msg)s." -msgstr "" -"Nelze registrovat %(itor)s do SP%(sp)s port %(portid)s protože: %(msg)s." - #, python-format msgid "Failed to restart horcm: %s" msgstr "Nelze restartovat horcm: %s" @@ -655,10 +548,6 @@ msgstr "" "Název synchronizace skupiny nenalezen v cílové skupině %(target)s na " "%(storageSystem)s." -#, python-format -msgid "HLU %(hlu)s has already been removed from %(sgname)s. Message: %(msg)s" -msgstr "HLU %(hlu)s již bylo odstraněno z %(sgname)s. Zpráva: %(msg)s" - #, python-format msgid "" "HPELeftHand API is version %(current)s. A minimum version of %(min)s is " @@ -684,14 +573,6 @@ msgstr "" "třeba změnit zásadu Nova, nebo je třeba v nastavení zadat výsadní účet pro " "Nova se všemi potřebnými oprávněními." -#, python-format -msgid "" -"Host %(host)s has already disconnected from storage group %(sgname)s. " -"Message: %(msg)s" -msgstr "" -"Hostitel %(host)s již byl odpojen od skupiny úložiště %(sgname)s. Zpráva: " -"%(msg)s" - msgid "" "Host exists without CHAP credentials set and has iSCSI attachments but CHAP " "is enabled. Updating host with new CHAP credentials." @@ -776,14 +657,6 @@ msgstr "" "Neplatný výsledek funkce goodness. Výsledek musí být mezi 0 až 100. " "Vypočtený výsledek '%s' :: Je použita její minimální hodnota 0" -#, python-format -msgid "" -"Invalid iSCSI port %(sp)s-%(port)s-%(vlan)s found in io_port_list, will be " -"ignored." -msgstr "" -"V seznamu portů vstupu/výstupu nalezen neplatný port iSCSI %(sp)s-%(port)s-" -"%(vlan)s, bude ignorován." - #, python-format msgid "Invalid trace flag: %s" msgstr "Neplatný příznak sledování: %s" @@ -795,34 +668,6 @@ msgstr "" "Toto není doporučený způsob používání ovladačů od NetApp. Pro dosažení této " "funkce prosím použijte NetAppDriver." -#, python-format -msgid "LUN %(name)s is already expanded. Message: %(msg)s" -msgstr "LUN %(name)s již je rozšířen. Zpráva: %(msg)s" - -#, python-format -msgid "LUN %(name)s is not ready for extension: %(out)s" -msgstr "LUN %(name)s není připraven na rozšíření: %(out)s" - -#, python-format -msgid "LUN %(name)s is not ready for snapshot: %(out)s" -msgstr "LUN %(name)s není připraven k pořízeni snímku: %(out)s" - -#, python-format -msgid "LUN already exists, LUN name %(name)s. Message: %(msg)s" -msgstr "LUN již existuje, název LUN %(name)s. Zpráva: %(msg)s" - -#, python-format -msgid "" -"LUN corresponding to %s is still in some Storage Groups.Try to bring the LUN " -"out of Storage Groups and retry the deletion." -msgstr "" -"LUN odpovídající %s je stále v některých skupinách úložiště. Zkuste ho " -"vyjmout ze skupin a znovu proveďte smazání." - -#, python-format -msgid "LUN is already deleted, LUN name %(name)s. Message: %(msg)s" -msgstr "LUN již je smazán, název LUN %(name)s. Zpráva: %(msg)s" - #, python-format msgid "" "LUN misalignment may occur for current initiator group %(ig_nm)s) with host " @@ -833,12 +678,6 @@ msgstr "" "se může objevit nevyrovnanost LUN. Prosím nastavte skupinu zavaděče ručně " "podle typu OS na hostiteli." -#, python-format -msgid "LUN with id %(remove_id)s is not present in cg %(cg_name)s, skip it." -msgstr "" -"LUN s id %(remove_id)s není přítomna ve skupině jednotnosti %(cg_name)s, je " -"přeskočena." - msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" "Nejméně zaneprázdněný port iSCSI nenalezen, použit první port v seznamu." @@ -849,13 +688,6 @@ msgid "" msgstr "" "Lun není ve skupine lun. ID LUN %(lun_id)s, ID skupiny LUN: %(lungroup_id)s." -#, python-format -msgid "" -"Maximum number of Pool LUNs, %s, have been created. No more LUN creation can " -"be done." -msgstr "" -"Byl vytvořen maximální počet LUN zásoby %s. Nelze vytvořit žádné další LUN." - #, python-format msgid "Message - %s." msgstr "Zpráva - %s." @@ -872,9 +704,6 @@ msgid "No VLUN contained CHAP credentials. Generating new CHAP key." msgstr "" "Žádný VLUN neobsahoval přihlašovací údaje CHAP. Vytváření nového klíče CHAP." -msgid "No array serial number returned, set as unknown." -msgstr "Nebylo předáno sériové číslo pole, nastaveno na neznámé." - #, python-format msgid "No backing file found for %s, allowing snapshot to be deleted." msgstr "Pro %s nenalezen žádný zálohovací soubor, smazání snímku povoleno." @@ -896,9 +725,6 @@ msgstr "V maskování %(mv)s nenalezena žádná skupiny portů." msgid "No protection domain name or id was specified in configuration." msgstr "V nastavení nebylo zadán žádný název nebo id ochranné domény." -msgid "No shares found hence skipping ssc refresh." -msgstr "Sdílení nenalezena a proto bude obnovení ssc přeskočeno." - #, python-format msgid "No status payload for volume %s." msgstr "Žádný obsah stavu svazku %s." @@ -966,74 +792,6 @@ msgstr "" msgid "Property %s already exists." msgstr "Vlastnost %s již existuje." -#, python-format -msgid "Purity host deletion failed: %(msg)s." -msgstr "Smazání hostitele Purity selhalo: %(msg)s." - -#, python-format -msgid "" -"Quota %(s_name)s exceeded for %(s_pid)s, tried to create volume " -"(%(d_consumed)d volume(s) already consumed)." -msgstr "" -"Kvóta %(s_name)s překročena u %(s_pid)s, pokus o vytvoření svazku (již " -"využíváno (%(d_consumed)d svazků)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG backup " -"(%(d_consumed)dG of %(d_quota)dG already consumed)" -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření zálohy o velikosti " -"%(s_size)sG - (již využíváno (%(d_consumed)dG z %(d_quota)dG)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " -"(%(d_consumed)dG of %(d_quota)dG already consumed)." -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření snímku o velikosti " -"%(s_size)sG (již využíváno (%(d_consumed)dG z %(d_quota)dG)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " -"(%(d_consumed)dG of %(d_quota)dG already consumed)" -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření svazku o velikosti " -"%(s_size)sG (již využíváno (%(d_consumed)dG z %(d_quota)dG)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " -"(%(d_consumed)dG of %(d_quota)dG already consumed)" -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření svazku o velikosti " -"%(s_size)sG - (již využíváno (%(d_consumed)dG z %(d_quota)dG)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create backups (%(d_consumed)d " -"backups already consumed)" -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření záloh (již využíváno " -"(%(d_consumed)d záloh)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " -"snapshots already consumed)." -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření snímku (již využíváno " -"(%(d_consumed)d snímků)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d volumes " -"already consumed)" -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o vytvoření svazku (již využíváno " -"(%(d_consumed)d svazků)." - #, python-format msgid "" "RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " @@ -1090,10 +848,6 @@ msgstr "" "ScaleIO podporuje pouze svazky s rozdělením po 8GB. Nová velikost svazku je: " "%d." -#, python-format -msgid "See unavailable iSCSI target: %s" -msgstr "Prohlédněte si nedostupný cíl iSCSI: %s" - #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export." @@ -1112,27 +866,6 @@ msgstr "Snímek \"%s\" nenalezen." msgid "Snapshot %(name)s already exists. Message: %(msg)s" msgstr "Snímek %(name)s již existuje. Zpráva: %(msg)s" -#, python-format -msgid "" -"Snapshot %(name)s for consistency group does not exist. Message: %(msg)s" -msgstr "Snímek %(name)s pro skupinu jednotnosti neexistuje. Zpráva: %(msg)s" - -#, python-format -msgid "Snapshot %(name)s is in use, retry. Message: %(msg)s" -msgstr "Snímek %(name)s je používán, bude proveden nový pokus. Zpráva: %(msg)s" - -#, python-format -msgid "Snapshot %(name)s may deleted already. Message: %(msg)s" -msgstr "Snímek %(name)s již mohl být smazán. Zpráva: %(msg)s" - -#, python-format -msgid "" -"Snapshot %(snapname)s is attached to snapshot mount point %(mpname)s " -"already. Message: %(msg)s" -msgstr "" -"Snímek %(snapname)s již je připojen k bodu připojení %(mpname)s. Zpráva: " -"%(msg)s" - #, python-format msgid "Snapshot %s already deleted." msgstr "Snímek %s již je smazán." @@ -1141,37 +874,10 @@ msgstr "Snímek %s již je smazán." msgid "Snapshot still %(status)s Cannot delete snapshot." msgstr "Snímek je stále ve stavu %(status)s. Nelze ho smazat." -#, python-format -msgid "Start migration failed. Message: %s" -msgstr "Zahájení přesunu selhalo. Zpráva: %s" - -#, python-format -msgid "Storage Group %s is not found." -msgstr "Skupina úložiště %s nebyla nalezena." - -#, python-format -msgid "Storage Group %s is not found. Create it." -msgstr "Skupina úložiště %s nenalezena. Bude vytvořena." - -#, python-format -msgid "Storage Group %s is not found. terminate_connection() is unnecessary." -msgstr "Skupina úložiště %s nenalezena. Není třeba ukončovat připojení." - -#, python-format -msgid "Storage Pool '%(pool)s' is '%(state)s'." -msgstr "Zásoba úložiště '%(pool)s' je '%(state)s'." - #, python-format msgid "Storage group %(name)s already exists. Message: %(msg)s" msgstr "Skupina úložiště %(name)s již existuje. Zpráva: %(msg)s" -#, python-format -msgid "" -"Storage group %(name)s doesn't exist, may have already been deleted. " -"Message: %(msg)s" -msgstr "" -"Skupina úložiště %(name)s neexistuje, možná byla již smazána. Zpráva: %(msg)s" - #, python-format msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." msgstr "" @@ -1252,14 +958,6 @@ msgstr "" msgid "The device %s won't be cleared." msgstr "Zařízení %s nebude vyčištěno." -#, python-format -msgid "" -"The following specified storage pools do not exist: %(unexist)s. This host " -"will only manage the storage pools: %(exist)s" -msgstr "" -"Následující zadané zásoby úložiště neexistují: %(unexist)s. Tento hostitel " -"bude spravovat pouze tyto zásoby: %(exist)s" - msgid "" "The option 'netapp_storage_pools' is deprecated and will be removed in the " "future releases. Please use the option 'netapp_pool_name_search_pattern' " @@ -1288,18 +986,6 @@ msgstr "" msgid "The provisioning: %(provisioning)s is not valid." msgstr "Poskytování %(provisioning)s není platné." -#, python-format -msgid "" -"The source volume is a legacy volume. Create volume in the pool where the " -"source volume %s is created." -msgstr "" -"Zdrojový svazek je zastaralý. Vytvořte svazek v zásobě, kde byl vytvořen " -"zdrojový svazek %s." - -#, python-format -msgid "The specified Snapshot mount point %s is not currently attached." -msgstr "Zadaný bod připojení snímku %s není v současnosti připojen." - #, python-format msgid "" "The volume: %(volumename)s was not first part of the default storage group " @@ -1458,10 +1144,6 @@ msgstr "Svazek \"%s\" nenalezen." msgid "Volume %(name)s already presented (%(status)d)!" msgstr "Svazek %(name)s již je přítomen na (%(status)d)!" -#, python-format -msgid "Volume %(vol)s was not in Storage Group %(sg)s." -msgstr "Svazek %(vol)s není ve skupině úložiště %(sg)s." - #, python-format msgid "Volume %(volume)s is not in any masking view." msgstr "Svazek %(volume)s není v žádném maskování." @@ -1603,41 +1285,9 @@ msgstr "" "Odmapování virtuálního disku od hostitele: Nenalezeno žádné mapování svazku " "%(vol_name)s k žádnému hostiteli." -msgid "" -"config option keymgr.fixed_key has not been defined: some operations may " -"fail unexpectedly" -msgstr "" -"Volba nastavení keymgr.fixed_key není zadána: některé operace mohou nečekaně " -"selhat" - -msgid "" -"destroy_empty_storage_group: True. Empty storage group will be deleted after " -"volume is detached." -msgstr "" -"destroy_empty_storage_group: True. Prázdná skupina úložiště bude smazána po " -"odpojení svazku." - msgid "flush() not supported in this version of librbd" msgstr "flush() není podporován touto verzí librbd" -msgid "force_delete_lun_in_storagegroup=True" -msgstr "force_delete_lun_in_storagegroup=True" - -#, python-format -msgid "get_evs: %(out)s -- No find for %(fsid)s" -msgstr "Získání evs: %(out)s -- Nenalezeno pro %(fsid)s" - -#, python-format -msgid "get_fsid: %(out)s -- No info for %(fslabel)s" -msgstr "Získání fsid: %(out)s -- Žádné informace pro %(fslabel)s" - -msgid "" -"glance_num_retries shouldn't be a negative value. The number of retries will " -"be set to 0 until this iscorrected in the cinder.conf." -msgstr "" -"Počet pokusů pro glance by neměl mí zápornou hodnotu. Počet pokusů bude " -"nastaven na 0, dokud nebude opraven v conder.conf." - #, python-format msgid "" "iSCSI IP: '%s' was not found in hpe3par_iscsi_ips list defined in cinder." @@ -1646,13 +1296,6 @@ msgstr "" "iSCSI IP: '%s' nebylo nalezeno v seznamu hpe3par_iscsi_ips zadaném v cinder." "conf." -msgid "" -"ignore_pool_full_threshold: True. LUN creation will still be forced even if " -"the pool full threshold is exceeded." -msgstr "" -"ignore_pool_full_threshold: True. Vytvoření LUN bude vynuceno i když bude " -"překročen práh naplnění zásoby." - #, python-format msgid "initialize_connection: Did not find a preferred node for volume %s." msgstr "Zavedení spojení: Nenalezen upřednostňovaný uzel pro svazek %s." @@ -1668,15 +1311,6 @@ msgstr "Objekt %(key)s typu %(typ)s nenalezen, %(err_msg)s" msgid "qemu-img is not installed." msgstr "qemu-img není nainstalováno." -msgid "refresh stale ssc job in progress. Returning... " -msgstr "Úkol obnovy starého ssc probíhá. Vrácení..." - -msgid "san_secondary_ip is configured as the same value as san_ip." -msgstr "san_secondary_ip je nastaven na stejnou hodnotu jako san_ip." - -msgid "snapcopy metadata is ignored when creating volume." -msgstr "popisná data kopie snímku jsou při vytváření snímku ignorována." - #, python-format msgid "snapshot: %s not found, skipping delete operation" msgstr "snímek: %s nenalezeno, operace mazání je přeskočena" @@ -1693,12 +1327,6 @@ msgstr "" "srstatld vyžaduje verzi WSAPI '%(srstatld_version)s'. Naisntalovaná verze je " "'%(version)s'." -msgid "ssc job in progress. Returning... " -msgstr "Úkol ssc probíhá. Vrácení..." - -msgid "terminate_conn: provider location empty." -msgstr "Ukončení připojení: Umístění poskytovatele je prázdné." - msgid "terminate_connection: lun map not found" msgstr "Ukončení připojení: Mapa lun nenalezena" diff --git a/cinder/locale/cs/LC_MESSAGES/cinder.po b/cinder/locale/cs/LC_MESSAGES/cinder.po index 702bd5a7b..82cfd546f 100644 --- a/cinder/locale/cs/LC_MESSAGES/cinder.po +++ b/cinder/locale/cs/LC_MESSAGES/cinder.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev522\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-31 10:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -244,9 +244,6 @@ msgstr "'status' musí být zadáno." msgid "'volume_id' must be specified" msgstr "'volume_id' svazku musí být zadáno." -msgid "'{}' object has no attribute '{}'" -msgstr "Objekt '{}' nemá vlastnost '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -394,17 +391,9 @@ msgstr "" "Žádné ze zadaných zásob úložiště, které mají být spravovány, neexistují. " "Prosím zkontrolujte své nastavení. Neexistující zásoby: %s" -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "V ovladači SheepDog nastala chyba. (Důvod: %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Během operace zálohování nastala chyba" -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Při hledání svazku \"%s\" nastal problém." - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -492,12 +481,6 @@ msgstr "Údaje ověřovací skupiny [%s] nebyly nalezeny v úložišti CloudByte msgid "Auth user details not found in CloudByte storage." msgstr "Přihlašovací údaje uživatele nebyly nalezeny v úložišti CloudByte." -msgid "Authentication error" -msgstr "Chyba ověření" - -msgid "Authorization error" -msgstr "Chyba oprávnění" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "Zóna dostupnosti '%(s_az)s' je neplatná." @@ -526,9 +509,6 @@ msgstr "Podpůrná vrstva hlásí: Položka již existuje" msgid "Backend reports: item not found" msgstr "Podpůrná vrstva hlásí: Položka nenalezena" -msgid "Backend server not NaServer." -msgstr "Server podpůrné vrstvy není NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" @@ -625,12 +605,6 @@ msgstr "" msgid "Bad project format: project is not in proper format (%s)" msgstr "Špatný formát projektu: projekt není ve správném formátu (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Clusteru Datera zaslán špatný požadavek:neplatné argumenty: %(args)s | " -"%(message)s" - msgid "Bad response from Datera API" msgstr "Špatná odpověď od API Datera" @@ -647,18 +621,6 @@ msgstr "Binární soubor" msgid "Blank components" msgstr "Prázdné součásti" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Způsob ověření Blockbridge API (příznak nebo heslo)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Heslo API Blockbridge (pro způsob ověření 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Příznak API Blockbridge (pro způsob ověření 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Uživatel API Blockbridge (pro způsob ověření 'password')" - msgid "Blockbridge api host not configured" msgstr "Hostitel API Blockbridge není nastaven" @@ -785,10 +747,6 @@ msgstr "V polích nelze najít stejné id hostitele ." msgid "Can't get volume id. Volume name: %s." msgstr "Nelze získat id svazku. Název svazku: %s." -#, python-format -msgid "Can't open config file: %s" -msgstr "Nelze otevřít soubor s nastavením: %s" - msgid "Can't parse backup record." msgstr "Nelze zpracovat záznam zálohy." @@ -851,13 +809,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "Nelze se připojit k serveru ECOM." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"Nelze vytvořit klon s velikostí %(vol_size)s ze svazku s velikostí " -"%(src_vol_size)s" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -903,13 +854,6 @@ msgstr "" msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "Nelze vytvořit nebo najít skupinu úložiště s názvem %(sgGroupName)s." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"Nelze vytvořit svazek o velikosti %(vol_size)s ze snímku s velikostí " -"%(snap_size)s" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Nelze vytvořit svazek o velikosti %s: není násobkem 8GB." @@ -1220,10 +1164,6 @@ msgstr "RPC port Coho není nastaven" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Příkaz %(cmd)s byl v klientském řádku zablokován a zrušen" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper. čekání na podmínku: vypršel časový limit %s" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" @@ -1363,18 +1303,10 @@ msgstr "Nelze najít id clusteru GPFS: %s." msgid "Could not find GPFS file system device: %s." msgstr "Nelze najít zařízení systému souborů GPFS: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "Nelze najít hostitele pro svazek %(volume_id)s s typem %(type_id)s." - #, python-format msgid "Could not find config at %(path)s" msgstr "Nelze najít nastavení v %(path)s" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "Nelze najít export iSCSI pro svazek %(volumeName)s." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Nelze najít export iSCSI pro svazek %s" @@ -1429,17 +1361,6 @@ msgstr "" "Vytvoření zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." -msgid "Create consistency group failed." -msgstr "Vytvoření skupiny jednotnosti selhalo." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"Vytváření šifrovaných svazků typu %(type)s z obrazu %(image)s není " -"podporováno." - msgid "Create export for volume failed." msgstr "Vytvoření exportu pro svazek selhalo." @@ -1515,9 +1436,6 @@ msgstr "" "Současný mapovaný hostitel svazku %(vol)s je v nepodporované skupině " "hostitele s %(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "ZASTARALÉ: Nasazení Cinder API v1." - msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." @@ -1549,14 +1467,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "Deduplikované lun nemohou být rozšířeny" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"Povolovač komprimace není nainstalován. Nelze vytvořit deduplikovaný svazek." - -msgid "Default pool name if unspecified." -msgstr "Název výchozí zásoby není zadán." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1570,12 +1480,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "Výchozí typ svazku nenalezen." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Určuje sadu vystavených zásob a jejich přidružené dotazovací řetězce z " -"podpůrné vrstvy." - msgid "Delete LUNcopy error." msgstr "Chyba při mazání kopírování LUN." @@ -1699,13 +1603,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Zavedení připojení ovladačem selhalo (chyba: %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"Ovladač není schopen provést přetypování protože svazek (LUN {}) má snímek, " -"který má přesunování zakázáno." - msgid "Driver must implement initialize_connection" msgstr "Ovladač musí zavést initialize_connection" @@ -2165,12 +2062,6 @@ msgstr "" "Chyba při přidružování skupiny úložiště: %(storageGroupName)s k zásadě FAST: " "%(fastPolicyName)s. Popis chyby: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "" -"Chyba při připojování svazku %s. Možná bylo dosaženo limitu v cílovém " -"zařízení!" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2324,10 +2215,6 @@ msgstr "Chyba při vytváření prostoru pro %(space)s s velikostí %(size)d GB" msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "Chyba při rozšiřování prostoru svazku %(space)s o dalších %(size)d GB" -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Chyba při mapování svazku %(vol)s. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2370,17 +2257,9 @@ msgstr "Při mazání snímku skupiny jednotnosti %s nastala chyba." msgid "Error occurred when updating consistency group %s." msgstr "Při aktualizaci skupiny jednotnosti %s nastala chyba." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Nelze zpracovat soubor s nastavením: %s" - msgid "Error promoting secondary volume to primary" msgstr "Chyba při propagování druhotného svazku na hlavní" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Chyba při mazání svazku: %(vol)s. %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Chyba při přejmenování svazku %(vol)s: %(err)s." @@ -2540,12 +2419,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Rozšíření svazku není zavedeno" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"Povolovač rychlého VP není nainstalován. Nelze nastavit zásadu vrstvení pro " -"svazek" - msgid "FAST is not supported on this array." msgstr "FAST není podporován v tomto poli." @@ -2602,10 +2475,6 @@ msgstr "" "Nelze získat zámek zdroje. (Sériová konzole: %(serial)s, inst: %(inst)s, " "ret: %(ret)s, chybový výstup: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "Nelze přidat %(vol)s do %(sg)s po %(retries)s pokusech." - msgid "Failed to add the logical device." msgstr "Nelze přidat logické zařízení." @@ -2684,9 +2553,6 @@ msgstr "" msgid "Failed to create IG, %s" msgstr "Nelze vytvořit IG, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "Nelze vytvořit obraz-svazek SolidFire" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Nelze vytvořit skupinu svazku: %(vg_name)s" @@ -2797,9 +2663,6 @@ msgstr "Nelze vytvořit postup správce plánovače svazku" msgid "Failed to create snapshot %s" msgstr "Nelze vytvořit snímek %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "Nelze vytvořit snímek protože nebylo zadáno LUN ID" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Nelze vytvořit snímek pro skupinu jednotnosti: %(cgName)s." @@ -2933,9 +2796,6 @@ msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "Nelze zajistit oblast zdrojů snímku, nelze nalézt svazek s id %s" -msgid "Failed to establish SSC connection." -msgstr "Nelze vytvořit připojení SSC." - msgid "Failed to establish connection with Coho cluster" msgstr "Zavedení připojení s clusterem Coho selhalo" @@ -2972,10 +2832,6 @@ msgstr "" msgid "Failed to find host %s." msgstr "Nelze najít hostitele %s." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "Nelze najít zásobu úložiště pro zdrojový svazek %s." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Nelze získat údaje účtu CloudByte [%s]." @@ -3167,27 +3023,6 @@ msgstr "" "Nelze spravovat existující svazek %(name)s, protože nahlášená velikost " "%(size)s není číslo s plovoucí desetinnou čárkou." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"Nelze spravovat existující svazky protože zásoba zvoleného typu svazku " -"neodpovídá sdílení NFS předanému v odkazu svazku." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"Nelze spravovat existující svazky protože zásoba zvoleného typu svazku " -"neodpovídá souborovému systému předanému v odkazu svazku." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"Nelze spravovat existující svazky protože zásoba zvoleného typu svazku " -"neodpovídá zásobě hostitele." - #, python-format msgid "Failed to manage volume %s." msgstr "Nelze spravovat svazek %s." @@ -3461,9 +3296,6 @@ msgstr "Chyba při hledání id hostitele lun." msgid "Find lun group from mapping view error." msgstr "Chyba při hledání skupiny lun v zobrazení mapování." -msgid "Find lun number error." -msgstr "Chyba při hledání čísla lun." - msgid "Find mapping view error." msgstr "Chyba při hledání zobrazení mapování." @@ -3793,14 +3625,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "Hostitel %s nemá žádné zavaděče FC" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "Hostitel %s nemá žádný zavaděč iSCSI" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "Hostitel '%s' nemohl být nalezen." - #, python-format msgid "Host group with name %s not found" msgstr "Skupina hostitele s názvem %s nebyla nalezena" @@ -3809,9 +3633,6 @@ msgstr "Skupina hostitele s názvem %s nebyla nalezena" msgid "Host group with ref %s not found" msgstr "Skupina hostitele mající odkaz %s nebyla nalezena" -msgid "Host not found" -msgstr "Hostitel nenalezen" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Hostitel nenalezen. Nelze odstranit %(service)s na %(host)s." @@ -3832,9 +3653,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "IP adresa/název hostitele API Blockbridge." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -3928,12 +3746,6 @@ msgstr "" "Výjimka CLI Infortrend: %(err)s, parametr: %(param)s (Návratový kód: %(rc)s) " "(Výstup: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Původní vrstva: {}, zásada: {} není platné." - -msgid "Input type {} is not supported." -msgstr "Typ vstupu {} není podporován." - msgid "Input volumes or snapshots are invalid." msgstr "Vstupní svazky nebo snímky jsou neplatné." @@ -3950,17 +3762,6 @@ msgstr "Pro rozšíření svazku není dostatek volného místa." msgid "Insufficient privileges" msgstr "Nedostatečná oprávnění" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" -"Hodnota intervalu ( ve vteřinách) mezi jednotlivými pokusy o připojení ke " -"clusteru ceph." - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "" -"V seznamu vstupních/výstupních portů zadány neplatné porty %(port)s pro " -"protokol %(protocol)s." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Neplatná doména 3PAR: %(err)s" @@ -4002,10 +3803,6 @@ msgid "" msgstr "" "Při získávání zásady QoS pro svazek %s byla zjištěna neplatná specifikace QoS" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Neplatný typ ověření VNX: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4044,14 +3841,6 @@ msgstr "Neplatný ověřovací klíč: %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "Neplatná záloha: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"Neplatná url barbican api: je požadována verze, např. 'http[s]://|" -"[:port]/', zadaná url je: %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "" "V úložišti CloudByte nalezeny neplatné přihlašovací údaje CHAP pro uživatele." @@ -4215,14 +4004,6 @@ msgstr "Neplatný typ přenosu." msgid "Invalid update setting: '%s'" msgstr "Neplatné nastavení aktualizace: '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"Neplatná URL: Musí být ve formátu 'http[s]://|[:port]/" -"', zadaná url je: %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Neplatná hodnota '%s' pro vynucení." @@ -4363,9 +4144,6 @@ msgstr "" "Vyvolání zavedení záložního systému, protože replikace není správně " "nastavena." -msgid "Item not found" -msgstr "Položka nenalezena" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "Úkol nebyl nalezen v CloudByte odpovědi pro vytvoření svazku [%s]." @@ -4388,9 +4166,6 @@ msgstr "Chyba klíče: %s" msgid "LUN export failed!" msgstr "Export LUN selhal!" -msgid "LUN id({}) is not valid." -msgstr "LUN id({}) není platné." - msgid "LUN map overflow on every channel." msgstr "Přetečení LUN mapy ve všech kanálech." @@ -4398,9 +4173,6 @@ msgstr "Přetečení LUN mapy ve všech kanálech." msgid "LUN not found with given ref %s." msgstr "LUN nenalezena pomocí zadaného odkazu %s." -msgid "LUN number ({}) is not an integer." -msgstr "LUN číslo ({}) není celé číslo." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "Číslo LUN je mimo rozsah v kanálu s id: %(ch_id)s." @@ -4567,50 +4339,15 @@ msgstr "Záloha popisných dat je pro tento svazek již vytvořena" msgid "Metadata backup object '%s' already exists" msgstr "Object zálohy popisných dat '%s' již existuje" -msgid "Metadata item was not found" -msgstr "Položka popisných dat nenalezena" - -msgid "Metadata item was not found." -msgstr "Položka popisných dat nebyla nalezena." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "Klíč vlastnosti popisných dat %s je větší než 255 znaků" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "Hodnota vlastnosti klíče popisných dat %s je vetší než 255 znaků" - -msgid "Metadata property key blank" -msgstr "Klíč vlastnosti popisných dat je prázdný" - msgid "Metadata property key blank." msgstr "Klíč vlastnosti popisných dat je prázdný." -msgid "Metadata property key greater than 255 characters." -msgstr "Klíč vlastnosti popisných dat je větší než 255 znaků." - -msgid "Metadata property value greater than 255 characters." -msgstr "Hodnota vlastnosti popisných dat je vetší než 255 znaků." - msgid "Metadata restore failed due to incompatible version" msgstr "Obnovení popisných dat selhalo kvůli nekompatibilní verzi" msgid "Metadata restore failed due to incompatible version." msgstr "Obnovení popisných dat selhalo kvůli nekompatibilní verzi." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "Přesunutí svazku %(src)s selhalo." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "Přesun svazku selhal mezi zdrojovým %(src)s a cílovým %(dst)s svazkem." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "Přesun LUN %s byl zastaven nebo je chybný." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -4638,9 +4375,6 @@ msgstr "V těle žádosti chybí povinný prvek '%s'." msgid "Missing required element 'consistencygroup' in request body." msgstr "V těle žádosti chybí povinný prvek 'consistencygroup'." -msgid "Missing required element 'host' in request body." -msgstr "V těle žádosti chybí povinný prvek 'host'." - msgid "Missing required element quota_class_set in request body." msgstr "V těle žádosti chybí povinný prvek quota_class_set." @@ -4747,9 +4481,6 @@ msgstr "Název nebo id zásoby úložiště musí být zadáno." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "Musíte zadat zásoby úložiště. Volba: sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "Věk musí být zadán pomocí kladné hodnoty" - msgid "Must supply a positive, non-zero value for age" msgstr "Musíte zadat kladnou, nenulovou hodnotu pro věk" @@ -5103,9 +4834,6 @@ msgstr "" "Při dotazování úkolu [%(job)s] využívající [%(operation)s] v úložišti " "CloudByte byla obdržena prázdná odpověď." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "Počet pokusů pokud připojení ke clusteru ceph selže." - msgid "Object Count" msgstr "Počet objektů" @@ -5162,16 +4890,10 @@ msgstr "Volba gpfs_images_share_mode není správně nastavena." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "Volba gpfs_mount_point_base není správně nastavena." -msgid "Option map (cls._map) is not defined." -msgstr "Mapa voleb (cls._map) není zadána." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "Původní %(res)s %(prop)s musí mít jednu z hodnot '%(vals)s'" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "Potlačit port HTTPS pro připojení k API serveru Blockbridge." - #, python-format msgid "ParseException: %s" msgstr "Chyba zpracování: %s" @@ -5388,14 +5110,6 @@ msgstr "Odpověď serveru RPC je nedokončená" msgid "Raid did not have MCS Channel." msgstr "RAID neměl kanál MCS." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Dosaženo omezení nastavené konfigurační volbou max_luns_per_storage_group. " -"Operace přidávající %(vol)s do skupiny úložiště %(sg)s byla zamítnuta." - #, python-format msgid "Received error string: %s" msgstr "Obdržen chybový řetězec: %s" @@ -5541,9 +5255,6 @@ msgstr "Požadované nastavení nenalezeno." msgid "Required flag %s is not set" msgstr "Požádovaný příznak %s není nastaven" -msgid "Requires an NaServer instance." -msgstr "Vyžaduje instanci NaServeru." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -5698,10 +5409,6 @@ msgstr "Služba %(service)s odstraněna na hostiteli %(host)s." msgid "Service %(service_id)s could not be found." msgstr "Služba %(service_id)s nemohla být nalezena." -#, python-format -msgid "Service %s not found." -msgstr "Služba %s nenalezena." - msgid "Service is unavailable at this time." msgstr "Služba je v tuto chvíli nedostupná." @@ -5926,9 +5633,6 @@ msgstr "ID úložného systému nebylo nastaveno." msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Systém úložiště nebyl nalezen pro zásobu %(poolNameInStr)s." -msgid "Storage-assisted migration failed during manage volume." -msgstr "Přesun za pomocí úložiště selhal během správy svazku." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "Systém úložiště %(array)s nebyl nalezen." @@ -5959,10 +5663,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Cílový typ svazku se stále používá." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "ID nájemníka: %s neexistuje." - msgid "Terminate connection failed" msgstr "Ukončení připojení selhalo" @@ -6051,10 +5751,6 @@ msgstr "Zařízení na cestě %(path)s není dostupné: %(reason)s" msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "Doba konce (%(end)s) musí být po době začátku (%(start)s)." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "Dodatečná specifikace %s je neplatná." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Dodatečná specifikace: %(extraspec)s není platná." @@ -6085,14 +5781,6 @@ msgstr "Skupina hostitele nebo cíl iSCSI nebyly nalezeny." msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "CHAP uživatel iSCSI %(user)s neexistuje." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"Importovaný lun %(lun_id)s je v zásobě %(lun_pool)s, kterou nespravuje " -"hostitel %(host)s." - msgid "The key cannot be None." msgstr "Klíč nemůže být None." @@ -6158,11 +5846,6 @@ msgstr "Výsledky jsou neplatné." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "Snímek nelze vytvořit, zatímco svazek je v režimu údržby." -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "Zdrojový svazek %s není v zásobě spravované současným hostitelem." - msgid "The source volume for this WebDAV operation not found." msgstr "Zdrojový svazek nebyl nalezen pro tuto operaci WebDAV." @@ -6320,10 +6003,6 @@ msgstr "Žádné dostupné zdroje k použití. (Zdroj: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Žádní platní hostitelé ESX." -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "K %s nejsou připojena žádná platná datová úložiště." - msgid "There are no valid datastores." msgstr "Žádná platná datová úložiště." @@ -6410,10 +6089,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "Mělké poskytování není podporováno v této verzi LVM." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"Povolovač mělkého poskytování není nainstalován. Nelze vytvořit mělký svazek" - msgid "This driver does not support deleting in-use snapshots." msgstr "Tento ovladač nepodporuje mazání právě používaných snímků." @@ -6445,14 +6120,6 @@ msgid "" msgstr "" "Při čekání na aktualizaci od Nova při mazání snímku %(id)s vypršel čas." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Hodnota časového limitu (ve vteřinách) použitá při připojování do clusteru " -"ceph. Pokud je hodnota < 0, není nastaven žádný limit a bude použita výchozí " -"hodnota librados." - #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Při žádání o API %(service)s vypršel časový limit." @@ -6521,9 +6188,6 @@ msgstr "" msgid "Unable to connect or find connection to host" msgstr "Nelze se připojit nebo nalézt připojení k hostiteli" -msgid "Unable to create Barbican Client without project_id." -msgstr "Nelze vytvořit klienta Barbican bez id projektu." - #, python-format msgid "Unable to create consistency group %s" msgstr "Nelze vytvořit skupinu jednotnosti %s" @@ -6848,9 +6512,6 @@ msgstr "Neznámý protokol: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "Neznámý zdroj kvóty %(unknown)s." -msgid "Unknown service" -msgstr "Neznámá služba" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Neznámý směr řazení, musí být 'desc' nebo 'asc'" @@ -6945,9 +6606,6 @@ msgstr "ID uživatele" msgid "User does not have admin privileges" msgstr "Uživatel nemá správcovská oprávnění" -msgid "User is not authorized to use key manager." -msgstr "Uživatel nemá oprávnění používat správce klíčů." - msgid "User not authorized to perform WebDAV operations." msgstr "Uživatel nemá oprávnění provádět operace WebDAV." @@ -7155,11 +6813,6 @@ msgstr "Skupina svazku %s neexistuje" msgid "Volume Type %(id)s already exists." msgstr "Typ svazku %(id)s již existuje." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"Typ svazku %(type_id)s nemá žádné dodatečné specifikace s klíčem %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -7414,10 +7067,6 @@ msgstr "Název typu svazku nemůže být prázdný." msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "Svazek s id %s neexistuje." - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -7426,14 +7075,6 @@ msgstr "" "Svazek: %(volumeName)s není zřetězený svazek. Rozšíření lze provádět pouze " "na zřetězeném svazku. Ukončování..." -#, python-format -msgid "Volume: %s could not be found." -msgstr "Svazek: %s nemohlo být nalezeno." - -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "" -"Svazky mnohou být rozkouskovány do objektů této velikosti (v megabajtech)." - #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." @@ -7737,13 +7378,6 @@ msgstr "ovládací umístění musí být zadáno" msgid "create_cloned_volume: Source and destination size differ." msgstr "Vytvoření klonovaného svazku: Velikost zdroje a cíle se liší." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"vytváření skupiny jednotnosti ze zdroje podporuje zdroj snímku skupiny " -"jednotnosti, nebo zdroj skupiny jednotnosti. Nelze použít více zdrojů." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" @@ -7785,18 +7419,12 @@ msgstr "" "Vytvoření svazku ze snímku: Pro vytvoření svazku musí být stav snímku " "\"dostupný\". Neplatný stav je: %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "Vytvoření svazku ze snímku: Velikost zdroje a cíle se liší." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "Vytvoření svazku ze snímku: Velikost svazku se liší od svazku ze snímku." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "Automatické a deduplikující vrstvení nemůže být povoleno současně." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -7837,9 +7465,6 @@ msgstr "odpojit snímek ze vzdáleného uzle" msgid "do_setup: No configured nodes." msgstr "Zavedení: Nenastaveny žádné uzly." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries musí být větší nebo rovno 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -8003,9 +7628,6 @@ msgstr "Spuštění iscsiadm selhalo." msgid "key manager error: %(reason)s" msgstr "chyba správce klíčů: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key není zadáno" - msgid "limit param must be an integer" msgstr "parametr limit musí být celé číslo" @@ -8049,10 +7671,6 @@ msgstr "nalezeno mnoho zdrojů mající ID snímku %s" msgid "name cannot be None" msgstr "název nemůže být Žádný" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: Nelze najít nástroj NAVISECCLI %(path)s." - #, python-format msgid "no REPLY but %r" msgstr "Žádná odpověď ale %r" @@ -8165,9 +7783,6 @@ msgstr "san_ip není nastaveno." msgid "san_ip must be set" msgstr "san_ip musí být nastaveno" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: Povinné pole nastavení san_ip není nastaveno." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -8178,16 +7793,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() může být voláno pouze jednou" -msgid "service not found" -msgstr "služba nenalezena" - -msgid "snapshot does not exist" -msgstr "snímek neexistuje" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "id snímku:%s nenalezeno" - #, python-format msgid "snapshot-%s" msgstr "snímek-%s" @@ -8198,10 +7803,6 @@ msgstr "snímky přiděleny" msgid "snapshots changed" msgstr "snímek změněn" -#, python-format -msgid "source vol id:%s not found" -msgstr "id zdrojového svazku:%s nenalezeno" - #, python-format msgid "source volume id:%s is not replicated" msgstr "id zdrojového svazku:%s není replikováno" @@ -8280,9 +7881,6 @@ msgstr "svazek přidělen" msgid "volume changed" msgstr "svazek změněn" -msgid "volume does not exist" -msgstr "svazek neexistuje" - msgid "volume is already attached" msgstr "svazek již je připojen" @@ -8300,9 +7898,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "velikost svazku %d je neplatná." -msgid "volume_type cannot be None" -msgstr "Typ svazku nemůže být None" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "při vytváření svazku ve skupině jednotnosti musí být zadán jeho typ." @@ -8328,6 +7923,3 @@ msgstr "svazky změněny" #, python-format msgid "wait_for_condition: %s timed out." msgstr "Čekání na podmínku: %s vypršel časový limit." - -msgid "{} is not a valid option." -msgstr "{} není platná volba." diff --git a/cinder/locale/de/LC_MESSAGES/cinder.po b/cinder/locale/de/LC_MESSAGES/cinder.po index 43be865c9..89f1d59e3 100644 --- a/cinder/locale/de/LC_MESSAGES/cinder.po +++ b/cinder/locale/de/LC_MESSAGES/cinder.po @@ -10,9 +10,9 @@ # Monika Wolf , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -271,9 +271,6 @@ msgstr "'status' muss angegeben werden." msgid "'volume_id' must be specified" msgstr "'volume_id' muss angegeben werden." -msgid "'{}' object has no attribute '{}'" -msgstr "'{}'-Objekt hat kein Attribut '{}'." - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -438,23 +435,9 @@ msgstr "" "Eine API-Versionsanforderung muss mit einem VersionedMethod-Objekt " "verglichen werden." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "In SheepdogDriver ist ein Fehler aufgetreten. (Ursache: %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Während des Datensicherungsvorgangs ist ein Fehler aufgetreten." -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "" -"Bei dem Versuch, die Schattenkopie '%s' zu ändern, ist ein Fehler " -"aufgetreten." - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Beim Suchen nach Datenträger \"%s\" ist ein Fehler aufgetreten." - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -572,18 +555,12 @@ msgid "Auth user details not found in CloudByte storage." msgstr "" "Details des Authentifizierungsbenutzers nicht im CloudByte-Speicher gefunden." -msgid "Authentication error" -msgstr "Authentifizierungsfehler" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Authentifizierung fehlgeschlagen. Überprüfen Sie die Berechtigungsnachweise " "für den Switch. Fehlercode %s." -msgid "Authorization error" -msgstr "Autorisierungsfehler" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "Verfügbarkeitszone '%(s_az)s' ist ungültig." @@ -602,11 +579,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "Backend ist nicht vorhanden (%(backend)s)." -msgid "Backend has already been failed over. Unable to fail back." -msgstr "" -"Es wurde bereits ein Failover für das Backend durchgeführt. Zurücksetzen " -"nicht möglich." - #, python-format msgid "Backend reports: %(message)s" msgstr "Backendberichte: %(message)s" @@ -617,9 +589,6 @@ msgstr "Backendberichte: Element ist bereits vorhanden." msgid "Backend reports: item not found" msgstr "Backendberichte: Element nicht gefunden." -msgid "Backend server not NaServer." -msgstr "Backend-Server ist kein NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Zeitlimittreffer für Wiederholungen bei Backenddienst: %(timeout)s s" @@ -726,12 +695,6 @@ msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Fehlerhaftes Projektformat: Projekt weist nicht das richtige Format (%s) auf." -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Fehlerhafte Anforderung an Datera-Cluster gesendet: Ungültige Argumente: " -"%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Ungültige Antwort von Datera-API" @@ -748,18 +711,6 @@ msgstr "Binärdatei" msgid "Blank components" msgstr "Leere Komponenten" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Authentifizierungsschema für Blockbridge-API (token oder password)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Kennwort für Blockbridge-API (für Authentifizierungsschema 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Token für Blockbridge-API (für Authentifizierungsschema 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Benutzer für Blockbridge-API (für Authentifizierungsschema 'password')" - msgid "Blockbridge api host not configured" msgstr "Blockbridge-API-Host nicht konfiguriert" @@ -886,9 +837,6 @@ msgstr "%s kann nicht in eine Ganzzahl umgesetzt werden." msgid "Can't access 'scality_sofs_config': %s" msgstr "Zugriff auf 'scality_sofs_config' nicht möglich: %s" -msgid "Can't attach snapshot." -msgstr "Schattenkopie kann nicht angehängt werden." - msgid "Can't decode backup record." msgstr "Sicherungsdatensatz kann nicht decodiert werden." @@ -1015,10 +963,6 @@ msgstr "" "Schattenkopie %s kann nicht in Cinder importiert werden. Status der " "Schattenkopie ist nicht normal oder der Aktivitätsstatus ist nicht 'online'. " -#, python-format -msgid "Can't open config file: %s" -msgstr "Konfigurationsdatei kann nicht geöffnet werden: %s" - msgid "Can't parse backup record." msgstr "Sicherungsdatensatz kann nicht analysiert werden." @@ -1096,13 +1040,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "Verbindung zu ECOM-Server kann nicht hergestellt werden." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"Klon mit der Größe %(vol_size)s kann nicht aus einem Datenträger der Größe " -"%(src_vol_size)s erstellt werden." - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1153,13 +1090,6 @@ msgstr "" "Eine Speichergruppe mit dem Namen %(sgGroupName)s kann nicht erstellt oder " "gefunden werden." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"Datenträger mit der Größe %(vol_size)s kann nicht aus einer Schattenkopie " -"der Größe %(snap_size)s erstellt werden." - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "" @@ -1500,10 +1430,6 @@ msgstr "Der Coho-RPC-Port ist nicht konfiguriert." msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Befehl %(cmd)s blockierte in der CLI und wurde abgebrochen." -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: %s Zeitlimit" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s Zeitlimit." @@ -1666,20 +1592,10 @@ msgstr "GPFS-Cluster-ID wurde nicht gefunden: %s." msgid "Could not find GPFS file system device: %s." msgstr "GPFS-Dateisystemgerät wurde nicht gefunden: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"Es wurde kein Host für den Datenträger %(volume_id)s mit dem Typ %(type_id)s " -"gefunden." - #, python-format msgid "Could not find config at %(path)s" msgstr "Konfiguration wurde nicht im Pfad %(path)s gefunden." -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "iSCSI-Export für Datenträger %(volumeName)s wurde nicht gefunden." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "iSCSI-Export nicht gefunden für Datenträger %s" @@ -1770,17 +1686,6 @@ msgstr "" "Erstellung von Sicherung abgebrochen. Datenträgerstatus %(expected_status)s " "erwartet, tatsächlicher Status ist %(actual_status)s." -msgid "Create consistency group failed." -msgstr "Erstellen der Konsistenzgruppe fehlgeschlagen." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"Das Erstellen von verschlüsselten Datenträgern mit Typ %(type)s aus Image " -"%(image)s wird nicht unterstützt." - msgid "Create export for volume failed." msgstr "Erstellen des Exports für Datenträger fehlgeschlagen." @@ -1874,12 +1779,6 @@ msgstr "" "Der zurzeit zugeordnete Host für den Datenträger %(vol)s befindet sich in " "einer nicht unterstützten Hostgruppe mit %(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "Veraltet: Version 1 der Cinder-API implementieren." - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "Veraltet: Version 2 der Cinder-API implementieren." - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1964,15 +1863,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "Dedup-LUNs können nicht erweitert werden." -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"Deduplication Enabler ist nicht installiert. Es kann kein deduplizierter " -"Datenträger erstellt werden. " - -msgid "Default pool name if unspecified." -msgstr "Standardpoolname, wenn nicht angegeben." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1986,12 +1876,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "Standarddatenträgertyp wurde nicht gefunden." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Definiert die Gruppe der verfügbaren Pools und deren zugehörige Backend-" -"Abfragezeichenketten." - msgid "Delete LUNcopy error." msgstr "Fehler beim Löschen der LUN-Kopie." @@ -2081,9 +1965,6 @@ msgstr "" "Dell Cinder-Treiberkonfigurationsfehler: 'replication_device' %s wurde nicht " "gefunden." -msgid "Deploy v3 of the Cinder API." -msgstr "Implementieren Sie Version 3 der Cinder-API." - msgid "Describe-resource is admin only functionality" msgstr "'Describe-resource' ist eine reine Administratorfunktion." @@ -2139,13 +2020,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Treiberinitialisierungsverbindung fehlgeschlagen (Fehler: %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"Treiber kann keine Typänderung vornehmen, da der Datenträger (LUN {}) eine " -"Schattenkopie hat, die nicht migriert werden darf." - msgid "Driver must implement initialize_connection" msgstr "Treiber muss initialize_connection implementieren." @@ -2661,12 +2535,6 @@ msgstr "" "Fehler beim Zuordnen der Speichergruppe %(storageGroupName)s zur FAST-" "Richtlinie %(fastPolicyName)s. Fehlerbeschreibung: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "" -"Fehler beim Anhängen des Datenträgers %s. Zielgrenzwert möglicherweise " -"erreicht!" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2840,10 +2708,6 @@ msgstr "" msgid "Error managing volume: %s." msgstr "Fehler beim Verwalten des Datenträgers: %s" -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Fehler beim Zuordnen von Datenträger %(vol)s. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2887,18 +2751,10 @@ msgstr "Beim Löschen von Cgsnapshot %s ist ein Fehler aufgetreten." msgid "Error occurred when updating consistency group %s." msgstr "Beim Aktualisieren von Konsistenzgruppe %s ist ein Fehler aufgetreten." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Fehler bei der Auswertung der Konfigurationsdatei: %s" - msgid "Error promoting secondary volume to primary" msgstr "" "Fehler beim Hochstufen des sekundären Datenträgers zum primären Datenträger." -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Fehler beim Entfernen von Datenträger %(vol)s. %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Fehler beim Umbenennen von Datenträger %(vol)s: %(err)s." @@ -3188,12 +3044,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Erweitern von Datenträgern nicht implementiert." -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"FAST VP Enabler ist nicht installiert. Die Tiering-Richtlinie für den " -"Datenträger kann nicht festgelegt werden." - msgid "FAST is not supported on this array." msgstr "FAST wird in diesem Array nicht unterstützt." @@ -3265,11 +3115,6 @@ msgstr "" "Es konnte keine Ressourcensperre bezogen werden. (Seriennummer: %(serial)s, " "Instanz: %(inst)s, Rückgabe: %(ret)s, Standardfehler: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "" -"Fehler beim Hinzufügen von %(vol)s zu %(sg)s nach %(retries)s Versuchen. " - msgid "Failed to add the logical device." msgstr "Die logische Einheit wurde nicht hinzugefügt." @@ -3357,9 +3202,6 @@ msgstr "" msgid "Failed to create IG, %s" msgstr "IG konnte nicht erstellt werden. %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "SolidFire-Abbilddatenträger konnte nicht erstellt werden." - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Erstellen der Datenträgergruppe fehlgeschlagen: %(vg_name)s" @@ -3482,10 +3324,6 @@ msgstr "" msgid "Failed to create snapshot %s" msgstr "Erstellen von Schattenkopie %s fehlgeschlagen" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "" -"Erstellen von Schattenkopie fehlgeschlagen, da keine LUN-ID angegeben ist." - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Fehler beim Erstellen von Schattenkopie für cg: %(cgName)s." @@ -3640,9 +3478,6 @@ msgstr "" "Fehler beim Sicherstellen des Schattenkopieressourcenbereichs. Datenträger " "für ID %s wurde nicht gefunden." -msgid "Failed to establish SSC connection." -msgstr "Fehler beim Herstellen der SSC-Verbindung." - msgid "Failed to establish connection with Coho cluster" msgstr "Fehler beim Herstellen einer Verbindung mit Coho-Cluster." @@ -3697,10 +3532,6 @@ msgstr "Host %s nicht gefunden." msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "Die iSCSI-Initiatorgruppe mit %(initiator)s wurde nicht gefunden." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "Speicherpool für Quellendatenträger %s konnte nicht gefunden werden." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Abrufen der CloudByte-Kontodetails zu Konto [%s] fehlgeschlagen." @@ -3931,29 +3762,6 @@ msgstr "" "Fehler beim Verwalten des bereits vorhandenen Datenträgers %(name)s, da die " "gemeldete Größe %(size)s keine Gleitkommazahl war." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"Fehler beim Verwalten des vorhandenen Datenträgers, da der Pool des " -"gewählten Datenträgertyps nicht mit der NFS-Freigabe, die in der " -"Datenträgerreferenz übergeben wurde, übereinstimmt." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"Fehler beim Verwalten des vorhandenen Datenträgers, da der Pool des " -"gewählten Datenträgertyps nicht mit dem Dateisystem, das in der " -"Datenträgerreferenz übergeben wurde, übereinstimmt." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"Fehler beim Verwalten des vorhandenen Datenträgers, da der Pool des " -"gewählten Datenträgertyps nicht mit dem Pool des Hosts übereinstimmt." - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4319,9 +4127,6 @@ msgstr "Fehler beim Suchen der Host-LUN-ID." msgid "Find lun group from mapping view error." msgstr "Fehler beim Suchen der LUN-Gruppe in der Zuordnungsansicht." -msgid "Find lun number error." -msgstr "Fehler beim Suchen der LUN-Nummer." - msgid "Find mapping view error." msgstr "Fehler beim Suchen der Zuordnungsansicht." @@ -4709,9 +4514,6 @@ msgstr "Fehlerhafte Pfadangaben von DRBDmanage erhalten! (%s)" msgid "HBSD error occurs." msgstr "HBSD-Fehler tritt auf." -msgid "HNAS has disconnected SSC" -msgstr "HNAS hat die Verbindung zu SSC getrennt." - msgid "HPELeftHand url not found" msgstr "HPELeftHand-URL nicht gefunden" @@ -4751,14 +4553,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "Host %s hat keine iSCSI-Initiatoren." -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "Host %s hat keinen iSCSI-Initiator." - -#, python-format -msgid "Host '%s' could not be found." -msgstr "Der Host '%s' wurde nicht gefunden." - #, python-format msgid "Host group with name %s not found" msgstr "Hostgruppe mit dem Namen %s nicht gefunden." @@ -4773,9 +4567,6 @@ msgstr "Der Host ist nicht gesperrt." msgid "Host is already Frozen." msgstr "Der Host ist bereits gesperrt." -msgid "Host not found" -msgstr "Host nicht gefunden" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" @@ -4806,9 +4597,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "IP-Adresse/Hostname der Blockbridge-API." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4909,12 +4697,6 @@ msgstr "" "Infortrend-CLI-Ausnahmebedingung: %(err)s Parameter: %(param)s " "(Rückgabecode: %(rc)s) (Ausgabe: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Ursprüngliche Schicht: {}, Richtlinie: {} ist nicht gültig." - -msgid "Input type {} is not supported." -msgstr "Eingabetyp {} wird nicht unterstützt." - msgid "Input volumes or snapshots are invalid." msgstr "Eingabedatenträger oder Schattenkopien sind ungültig." @@ -4933,14 +4715,6 @@ msgstr "" msgid "Insufficient privileges" msgstr "Unzureichende Berechtigungen" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" -"Intervall (in Sekunden) zwischen Verbindungsversuchen zum ceph-Cluster." - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "Ungültige %(protocol)s-Ports %(port)s für io_port_list angegeben." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Ungültige 3PAR-Domäne: %(err)s" @@ -4987,10 +4761,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Ungültiges Replikationsziel: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Ungültiger VNX-Authentifizierungstyp: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -5036,14 +4806,6 @@ msgstr "Ungültiger Autorisierungsschlüssel: %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "Ungültige Sicherung: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"Ungültige Barbican-API-URL: Die Version ist erforderlich. Beispiel: " -"'http[s]://|[:Port]/'. Angegebene URL: %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "Ungültige Details des CHAP-Benutzers im CloudByte-Speicher gefunden." @@ -5196,10 +4958,6 @@ msgstr "Ungültiger Antwortheader vom RPC-Server" msgid "Invalid secondary id %s." msgstr "Ungültige sekundäre ID %s." -#, python-format -msgid "Invalid secondary_id specified. Valid backend id is %s." -msgstr "Die Angabe für 'secondary_id' ist ungültig. Gültige Backend-ID: %s." - msgid "Invalid service catalog json." msgstr "Ungültige Servicekatalog-JSON." @@ -5225,10 +4983,6 @@ msgstr "Ungültiger Speicherpool %s angegeben." msgid "Invalid storage pool is configured." msgstr "Es wurde ein ungültiger Speicherpool konfiguriert." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "Ungültiger Synchronisationsmodus angegeben. Zulässiger Modus: %s." - msgid "Invalid transport type." msgstr "Ungültiger Transporttyp." @@ -5236,14 +4990,6 @@ msgstr "Ungültiger Transporttyp." msgid "Invalid update setting: '%s'" msgstr "Ungültige Aktualisierungseinstellung: '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"Ungültige URL: Muss das folgende Format haben: 'http[s]://|" -"[:Port]/'- Angegebene URL: %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Wert '%s' für Zwangsausführung ungültig." @@ -5392,9 +5138,6 @@ msgstr "" "Ausgabe eines Failovers fehlgeschlagen, weil die Replikation nicht " "ordnungsgemäß konfiguriert wurde. " -msgid "Item not found" -msgstr "Element nicht gefunden" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" @@ -5428,9 +5171,6 @@ msgstr "LU für den Datenträger nicht vorhanden: %s" msgid "LUN export failed!" msgstr "Exportieren der LUN fehlgeschlagen!" -msgid "LUN id({}) is not valid." -msgstr "LUN-ID({}) ist nicht gültig." - msgid "LUN map overflow on every channel." msgstr "Überlauf der LUN-Zuordnung an jedem Kanal." @@ -5438,9 +5178,6 @@ msgstr "Überlauf der LUN-Zuordnung an jedem Kanal." msgid "LUN not found with given ref %s." msgstr "LUN mit angegebener Referenz %s nicht gefunden." -msgid "LUN number ({}) is not an integer." -msgstr "LUN-Zahl ({}) ist keine Ganzzahl." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" @@ -5631,32 +5368,9 @@ msgstr "Für diesen Datenträger ist bereits eine Metadatensicherung vorhanden." msgid "Metadata backup object '%s' already exists" msgstr "Metadatensicherungsobjekt '%s' ist bereits vorhanden." -msgid "Metadata item was not found" -msgstr "Metadatenelement wurde nicht gefunden" - -msgid "Metadata item was not found." -msgstr "Metadatenelement wurde nicht gefunden." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "Metadateneigenschaftsschlüssel %s ist größer als 255 Zeichen." - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "Metadateneigenschaftenschlüsselwert %s ist größer als 255 Zeichen." - -msgid "Metadata property key blank" -msgstr "Metadateneigenschaftenschlüssel leer" - msgid "Metadata property key blank." msgstr "Metadateneigenschaftenschlüssel leer." -msgid "Metadata property key greater than 255 characters." -msgstr "Metadateneigenschaftenschlüssel größer als 255 Zeichen." - -msgid "Metadata property value greater than 255 characters." -msgstr "Metadateneigenschaftenwert größer als 255 Zeichen." - msgid "Metadata restore failed due to incompatible version" msgstr "" "Fehler bei der Wiederherstellung der Metadaten aufgrund einer inkompatiblen " @@ -5667,23 +5381,6 @@ msgstr "" "Metadatenwiederherstellung aufgrund einer inkompatiblen Version " "fehlgeschlagen." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "Migrieren des Datenträgers %(src)s fehlgeschlagen." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"Migrieren des Quellendatenträger %(src)s zu Zieldatenträger %(dst)s " -"fehlgeschlagen." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "Migration von LUN %s wurde gestoppt oder ist fehlerhaft." - -msgid "MirrorView/S enabler is not installed." -msgstr "MirrorView/S-Enabler ist nicht installiert." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5712,9 +5409,6 @@ msgid "Missing required element 'consistencygroup' in request body." msgstr "" "Fehlendes erforderliches Element 'consistencygroup' im Anforderungshauptteil." -msgid "Missing required element 'host' in request body." -msgstr "Fehlendes erforderliches Element 'host' im Anforderungshauptteil." - msgid "Missing required element quota_class_set in request body." msgstr "" "Fehlendes erforderliches Element 'quota_class_set' im Anforderungshauptteil." @@ -5840,9 +5534,6 @@ msgstr "Speicherpoolname oder Speicherpool-ID muss angegeben werden." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "Speicherpools müssen angegeben werden. Option: sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "Für age muss ein positiver Wert angegeben werden." - msgid "Must supply a positive, non-zero value for age" msgstr "Für age muss ein positiver Wert ungleich null angegeben werden." @@ -6256,11 +5947,6 @@ msgstr "" "Antwort 'Null' beim Abfragen nach auf [%(operation)s] basierendem Job " "[%(job)s] im CloudByte-Speicher erhalten." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "" -"Anzahl der Wiederholungen, wenn Verbindung zum ceph-Cluster fehlgeschlagen " -"ist." - msgid "Object Count" msgstr "Objektanzahl" @@ -6326,19 +6012,12 @@ msgstr "Option gpfs_images_share_mode wurde nicht richtig festgelegt." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "Option gpfs_mount_point_base wurde nicht richtig festgelegt." -msgid "Option map (cls._map) is not defined." -msgstr "Optionszuordnung (cls._map) ist nicht definiert. " - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" "Die Ursprungs-%(res)s %(prop)s muss einen der folgenden Werte haben: " "'%(vals)s'." -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "" -"HTTPS-Port zum Verbinden mit Blockbridge-API-Server außer Kraft setzen." - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -6610,15 +6289,6 @@ msgstr "RPC-Serverantwort ist unvollständig." msgid "Raid did not have MCS Channel." msgstr "RAID hatte keinen MCS-Channel." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Der mit der Konfigurationsoption max_luns_per_storage_group festgelegte " -"Grenzwert wurde erreicht. Operation zum Hinzufügen von %(vol)s zu " -"Speichergruppe %(sg)s wird zurückgewiesen. " - #, python-format msgid "Received error string: %s" msgstr "Empfangene Fehlerzeichenkette: %s" @@ -6812,9 +6482,6 @@ msgstr "Erforderliche Konfiguration nicht gefunden." msgid "Required flag %s is not set" msgstr "Erforderliche Markierung %s ist nicht gesetzt." -msgid "Requires an NaServer instance." -msgstr "Erfordert eine NaServer-Instanz." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -7013,10 +6680,6 @@ msgstr "Der Dienst %(service_id)s wurde auf dem Host %(host)s nicht gefunden." msgid "Service %(service_id)s could not be found." msgstr "Der Dienst %(service_id)s wurde nicht gefunden." -#, python-format -msgid "Service %s not found." -msgstr "Der Dienst %s wurde nicht gefunden." - msgid "Service is too old to fulfil this request." msgstr "Der Dienst ist zu alt, um diese Anforderung zu erfüllen." @@ -7123,10 +6786,6 @@ msgstr "" "Schattenkopie %(snapshot_id)s enthält keine Metadaten mit dem Schlüssel " "%(metadata_key)s." -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "Schattenkopie %s darf nicht Teil einer Konsistenzgruppe sein." - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "Schattenkopie '%s' ist im Array nicht vorhanden." @@ -7155,9 +6814,6 @@ msgstr "Schattenkopie von Datenträger im Status %s nicht unterstützt." msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Schattenkopieressource \"%s\", die an keiner Stelle implementiert ist?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "Größe der Schattenkopie muss Vielfaches von 1 GB sein." - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" @@ -7317,11 +6973,6 @@ msgstr "Speichersystem-ID nicht festgelegt." msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Speichersystem für Pool %(poolNameInStr)s nicht gefunden." -msgid "Storage-assisted migration failed during manage volume." -msgstr "" -"Speicherunterstützte Migration ist beim Verwalten des Datenträgers " -"fehlgeschlagen." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s wurde nicht gefunden. " @@ -7373,10 +7024,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Zieldatenträgertyp ist noch im Gebrauch." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "Mandanten-ID %s ist nicht vorhanden." - msgid "Terminate connection failed" msgstr "Beenden der Verbindung fehlgeschlagen" @@ -7478,10 +7125,6 @@ msgstr "Das Gerät im Pfad %(path)s ist nicht erreichbar: %(reason)s" msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "Die Endzeit (%(end)s) muss nach der Startzeit (%(start)s) liegen." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec %s ist ungültig. " - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Die zusätzliche Spezifikation %(extraspec)s ist nicht gültig." @@ -7536,14 +7179,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "Der iSCSI-CHAP-Benutzer %(user)s ist nicht vorhanden." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"Die importierte LUN %(lun_id)s befindet sich im Pool %(lun_pool)s, der nicht " -"vom Host %(host)s verwaltet wird." - #, python-format msgid "The job has not completed and is in a %(state)s state." msgstr "Die Aufgabe wurde nicht ausgeführt und hat den Status %(state)s." @@ -7638,13 +7273,6 @@ msgstr "" "Die Schattenkopie kann nicht erstellt werden, wenn der Datenträger im " "Wartungsmodus ist." -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "" -"Der Quellendatenträger %s befindet sich nicht in dem vom aktuellen Host " -"verwalteten Pool." - msgid "The source volume for this WebDAV operation not found." msgstr "" "Der Quellendatenträger für diese WebDAV-Operation wurde nicht gefunden." @@ -7823,10 +7451,6 @@ msgstr "" msgid "There are no valid ESX hosts." msgstr "Es sind keine gültigen ESX-Hosts vorhanden." -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "Es sind keine gültigen Datenspeicher an %s angehängt." - msgid "There are no valid datastores." msgstr "Es gibt keine gültigen Datenspeicher." @@ -7928,11 +7552,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "Thin Provisioning wird mit dieser Version von LVM nicht unterstützt." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"ThinProvisioning Enabler ist nicht installiert. Es kann kein Thin-" -"Datenträger erstellt werden. " - msgid "This driver does not support deleting in-use snapshots." msgstr "" "Dieser Treiber unterstützt nicht das Löschen von Schattenkopien mit dem " @@ -7971,14 +7590,6 @@ msgstr "" "Zeitlimit beim Warten auf Nova-Aktualisierung zum Löschen von Schattenkopie " "%(id)s überschritten." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Zeitlimitwert (in Sekunden), der beim Herstellen einer Verbindung zum ceph-" -"Cluster verwendet wird. Wenn der Wert < 0 ist, ist kein Zeitlimit festgelegt " -"und der Standardwert für librados wird verwendet. " - #, python-format msgid "Timeout while calling %s " msgstr "Zeitlimitüberschreitung beim Aufruf von %s " @@ -8070,9 +7681,6 @@ msgstr "" "Verbindung zum Host kann nicht hergestellt werden oder es wurde keine " "Verbindung zum Host gefunden." -msgid "Unable to create Barbican Client without project_id." -msgstr "Barbican Client kann nicht ohne Projekt-ID erstellt werden." - #, python-format msgid "Unable to create consistency group %s" msgstr "Konsistenzgruppe %s konnte nicht erstellt werden." @@ -8178,10 +7786,6 @@ msgstr "" "Replikation mit Purity-REST-API-Version %(api_version)s nicht möglich. " "Erfordert eine der folgenden Versionen: %(required_versions)s." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "" -"Replikation und Snapcopy können nicht zur selben Zeit aktiviert werden. " - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Es kann keine Beziehung zum Storwizse-Cluster %s hergestellt werden." @@ -8592,9 +8196,6 @@ msgstr "Unbekanntes Protokoll: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "Unbekannte Kontingentressourcen %(unknown)s." -msgid "Unknown service" -msgstr "Unbekannter Dienst" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" "Unbekannte Sortierrichtung: Muss 'desc' (absteigend) oder " @@ -8718,9 +8319,6 @@ msgstr "Benutzer-ID" msgid "User does not have admin privileges" msgstr "Benutzer hat keine Admin-Berechtigungen." -msgid "User is not authorized to use key manager." -msgstr "Benutzer ist nicht zum Verwenden des Schlüsselmanagers berechtigt. " - msgid "User not authorized to perform WebDAV operations." msgstr "" "Der Benutzer ist zum Ausführen von WebDAV-Operationen nicht berechtigt." @@ -8961,15 +8559,6 @@ msgstr "" "Der Datenträger %s ist 'online'. Setzen Sie den Datenträger für die " "Verwaltung mit OpenStack auf 'offline'." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"Der Datenträger %s darf sich nicht in einer Migration befinden, nicht " -"zugeordnet sein, darf zu keiner Konsistenzgruppe gehören und keine " -"Schattenkopien haben." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Datenträger %s darf nicht Teil einer Konsistenzgruppe sein." @@ -8998,12 +8587,6 @@ msgstr "Datenträgergruppe %s ist nicht vorhanden." msgid "Volume Type %(id)s already exists." msgstr "Datenträgertyp %(id)s ist bereits vorhanden." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"Der Datenträgertyp %(type_id)s hat keine zusätzliche Spezifikation mit " -"Schlüssel %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -9214,9 +8797,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." -msgid "Volume size must be multiple of 1 GB." -msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." - msgid "Volume size must multiple of 1 GB." msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." @@ -9298,10 +8878,6 @@ msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "Datenträgertyp mit dem Namen %(volume_type_name)s wurde nicht gefunden." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "Datenträger mit Datenträger-ID %s ist nicht vorhanden." - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -9317,17 +8893,10 @@ msgstr "" "Der Datenträger %(volumeName)s wurde der Speichergruppe %(sgGroupName)s " "nicht hinzugefügt." -#, python-format -msgid "Volume: %s could not be found." -msgstr "Datenträger: %s wurde nicht gefunden." - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Der Datenträger %s wird bereits von Cinder verwaltet." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "Datenträger werden in Objekte dieser Größe (in Megabyte) aufgeteilt. " - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" @@ -9984,14 +9553,6 @@ msgstr "" "oder eine Konsistenzgruppenquelle. Die Verwendung mehrerer Quellen ist nicht " "zulässig." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"'create_consistencygroup_from_src' unterstützt eine cgsnapshot-Quelle oder " -"eine Konsistenzgruppenquelle. Die Verwendung mehrerer Quellen ist nicht " -"zulässig." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" @@ -10104,10 +9665,6 @@ msgstr "" "create_volume_from_snapshot: Der Status der Schattenkopie muss \"available\" " "zum Erstellen eines Datenträgers sein. Der ungültige Status ist %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "" -"create_volume_from_snapshot: Größe von Quelle und Ziel sind unterschiedlich." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -10115,10 +9672,6 @@ msgstr "" "create_volume_from_snapshot: Datenträgergröße unterscheidet sich von der " "Größe des auf der Schattenkopie basierenden Datenträgers." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "" -"'deduplicated' und 'auto tiering' können nicht gleichzeitig aktiviert sein." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -10162,9 +9715,6 @@ msgstr "Zuordnung der Schattenkopie zu fernem Knoten aufheben" msgid "do_setup: No configured nodes." msgstr "do_setup: keine konfigurierten Knoten." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries muss größer-gleich 0 sein." - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -10372,23 +9922,12 @@ msgstr "Ausführen von 'iscsiadm' fehlgeschlagen." msgid "key manager error: %(reason)s" msgstr "Schlüsselmanagerfehler: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key nicht definiert." - msgid "limit param must be an integer" msgstr "'limit'-Parameter muss eine Ganzzahl sein." msgid "limit param must be positive" msgstr "'limit'-Parameter muss positiv sein." -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"'manage_existing' kann einen Datenträger nicht verwalten, der mit Hosts " -"verbunden ist. Trennen Sie vor dem Import die Verbindung dieses Datenträgers " -"zu vorhandenen Hosts. " - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing erfordert den Schlüssel 'name' zum Identifizieren eines " @@ -10435,10 +9974,6 @@ msgstr "mehrere Ressourcen mit Schattenkopie-ID %s gefunden" msgid "name cannot be None" msgstr "Name darf nicht 'None' sein." -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: NAVISECCLI-Tool %(path)s wurde nicht gefunden." - #, python-format msgid "no REPLY but %r" msgstr "Keine Antwort, aber %r" @@ -10502,14 +10037,6 @@ msgstr "rados- und rbd-python-Bibliotheken nicht gefunden." msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "'read_deleted' kann nur 'no', 'yes' oder 'only' sein, nicht '%r'" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "'replication_device' muss im Backend konfiguriert sein: %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "'replication_device' mit 'backend_id' [%s] fehlt." - #, python-format msgid "replication_failover failed. %s not found." msgstr "'replication_failover' fehlgeschlagen. %s wurde nicht gefunden." @@ -10570,9 +10097,6 @@ msgstr "san_ip wurde nicht festgelegt." msgid "san_ip must be set" msgstr "'san_ip' muss festgelegt sein." -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: Konfiguration für Pflichtfeld. san_ip ist nicht festgelegt. " - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10584,16 +10108,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() kann nur einmal aufgerufen werden." -msgid "service not found" -msgstr "Dienst nicht gefunden" - -msgid "snapshot does not exist" -msgstr "Schattenkopie ist nicht vorhanden" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "Schattenkopie-ID %s nicht gefunden" - #, python-format msgid "snapshot-%s" msgstr "snapshot-%s" @@ -10604,10 +10118,6 @@ msgstr "Schattenkopien zugeordnet" msgid "snapshots changed" msgstr "Schattenkopien geändert" -#, python-format -msgid "source vol id:%s not found" -msgstr "Quellendatenträger-ID %s nicht gefunden" - #, python-format msgid "source volume id:%s is not replicated" msgstr "Quellendatenträger-ID: %s wird nicht repliziert." @@ -10707,9 +10217,6 @@ msgstr "Datenträger zugeordnet" msgid "volume changed" msgstr "Datenträger geändert" -msgid "volume does not exist" -msgstr "Datenträger ist nicht vorhanden." - msgid "volume is already attached" msgstr "Datenträger ist bereits angehängt." @@ -10727,9 +10234,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "Die Datenträgergröße %d ist ungültig." -msgid "volume_type cannot be None" -msgstr "'volume_type' darf nicht 'None' sein." - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10767,6 +10271,3 @@ msgid "" msgstr "" "Die Eigenschaft 'zfssa_manage_policy' muss auf 'strict' oder 'loose' gesetzt " "sein. Aktueller Wert: %s." - -msgid "{} is not a valid option." -msgstr "{} ist keine gültige Option." diff --git a/cinder/locale/es/LC_MESSAGES/cinder.po b/cinder/locale/es/LC_MESSAGES/cinder.po index 708035c2f..ac62ee68a 100644 --- a/cinder/locale/es/LC_MESSAGES/cinder.po +++ b/cinder/locale/es/LC_MESSAGES/cinder.po @@ -7,16 +7,17 @@ # FIRST AUTHOR , 2011 # Jose Enrique Ruiz Navarro , 2014 # Andreas Jaeger , 2016. #zanata +# Jose Porrua , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-27 07:57+0000\n" -"Last-Translator: Eugènia Torrella \n" +"PO-Revision-Date: 2016-07-28 05:39+0000\n" +"Last-Translator: Jose Porrua \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" @@ -129,6 +130,10 @@ msgstr "" "%(msg_type)s: se está creando NetworkPortal: asegúrese de que el puerto " "%(port)d en la IP %(ip)s no lo esté utilizando otro servicio." +#, python-format +msgid "%(name)s cannot be all spaces." +msgstr "%(name)s no puede estar vacío." + #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " @@ -242,6 +247,10 @@ msgstr "%s no está definido en cinder.conf" msgid "%s not set." msgstr "%s no establecido." +#, python-format +msgid "'%(key)s = %(value)s'" +msgstr "'%(key)s = %(value)s'" + #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " @@ -265,9 +274,6 @@ msgstr "se debe especificar 'status'." msgid "'volume_id' must be specified" msgstr "Se debe especificar 'volume_id'" -msgid "'{}' object has no attribute '{}'" -msgstr "El objeto '{}' no tiene ningún atributo '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -430,21 +436,9 @@ msgstr "" "Una solicitud de versión de la API se debe comparar con un objeto " "VersionedMethod." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "Se ha producido un error en SheepdogDriver. (Razón: %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Un error ha ocurrido durante la operación de copia de seguridad" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "Se ha producido un error al intentar modificar la instantánea '%s'." - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Se ha producido un error al buscar el volumen \"%s\"." - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -490,6 +484,9 @@ msgstr "" "No se ha podido suprimir un destino de iSCSI. (puerto: %(port)s, tno: " "%(tno)s, alias: %(alias)s)" +msgid "An unknown error occurred." +msgstr "Se ha producido un error desconocido." + msgid "An unknown exception occurred." msgstr "Una excepción desconocida ha ocurrido" @@ -556,18 +553,12 @@ msgstr "" "No se han encontrado detalles del usuario de autenticación en el " "almacenamiento CloudByte." -msgid "Authentication error" -msgstr "Error de autenticación" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Ha fallado la autenticación, compruebe las credenciales del conmutador, " "código de error %s." -msgid "Authorization error" -msgstr "Error de autorización" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "La zona de disponibilidad '%(s_az)s' no es válida." @@ -586,11 +577,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "El programa de fondo no existe %(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "" -"El programa de fondo ya ha hecho una migración tras error. No se puede " -"restaurar." - #, python-format msgid "Backend reports: %(message)s" msgstr "Informes de fondo: %(message)s" @@ -601,9 +587,6 @@ msgstr "Informes de fondo: el elemento ya existe" msgid "Backend reports: item not found" msgstr "Informes de fondo: elemento no encontrado" -msgid "Backend server not NaServer." -msgstr "El servidor Backend no es NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" @@ -715,12 +698,6 @@ msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Formato de proyecto erróneo: el proyecto no tiene un formato correcto (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Solicitud incorrecta enviada al clúster Datera: Argumentos no válidos: " -"%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Respuesta errónea de la API Datera" @@ -737,20 +714,6 @@ msgstr "Binario" msgid "Blank components" msgstr "Componentes en blanco" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Esquema de autenticación de API Blockbridge (señal o contraseña)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "" -"Contraseña de API Blockbridge (para esquema de autenticación 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Señal de API Blockbridge (para el esquema de autenticación 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "" -"Usuario de API Blockbridge (para el esquema de autenticación 'password')" - msgid "Blockbridge api host not configured" msgstr "No se ha configurado el host de API Blockbridge" @@ -878,9 +841,6 @@ msgstr "No se puede traducir %s a un entero." msgid "Can't access 'scality_sofs_config': %s" msgstr "No se puede acceder a 'scality_sofs_config': %s" -msgid "Can't attach snapshot." -msgstr "No se puede adjuntar la instantánea." - msgid "Can't decode backup record." msgstr "No se puede decodificar el registro de copia de seguridad." @@ -1002,10 +962,6 @@ msgstr "" "No se puede importar la instantánea %s en Cinder. El estado de la " "instantánea no es normal o el estado de ejecución no es en línea." -#, python-format -msgid "Can't open config file: %s" -msgstr "No se puede abrir el archivo de configuración: %s" - msgid "Can't parse backup record." msgstr "No se puede analizar el registro de copia de seguridad." @@ -1080,13 +1036,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "No se puede conectar al servidor de ECOM.." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"No se puede crear un clon con un tamaño de %(vol_size)s a partir de un " -"volumen con un tamaño de %(src_vol_size)s" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1138,13 +1087,6 @@ msgstr "" "No se puede crear o encontrar un grupo de almacenamiento con el nombre " "%(sgGroupName)s." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"No se puede crear un volumen con un tamaño de %(vol_size)s a partir de una " -"instantánea con un tamaño de %(snap_size)s" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "No se puede crear el volumen de tamaño %s: no es múltiplo de 8GB." @@ -1486,10 +1428,6 @@ msgstr "No se ha configurado el puerto RPC de Coho" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Mandato %(cmd)s bloqueado en la CLI que se ha cancelado" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: %s tiempo de espera" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" @@ -1655,22 +1593,10 @@ msgid "Could not find GPFS file system device: %s." msgstr "" "No se ha podido encontrar el dispositivo de sistema de archivos GPFS: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"No se ha podido encontrar un host para el volumen %(volume_id)s con el tipo " -"%(type_id)s." - #, python-format msgid "Could not find config at %(path)s" msgstr "No se ha podido encontrar configuración en %(path)s" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "" -"No se ha podido encontrar la exportación iSCSI para el volumen " -"%(volumeName)s." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "No se ha podido encontrar la exportación iSCSI para el volumen %s" @@ -1760,17 +1686,6 @@ msgstr "" "La creación de copia de seguridad ha terminado anormalmente, se esperaba el " "estado de volumen %(expected_status)s pero se ha obtenido %(actual_status)s." -msgid "Create consistency group failed." -msgstr "Ha fallado la operación de crear un grupo de consistencia." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"No se admite la creación de volúmenes cifrados con el tipo %(type)s de la " -"imagen %(image)s ." - msgid "Create export for volume failed." msgstr "Error al crear la exportación de volumen." @@ -1860,12 +1775,6 @@ msgstr "" "El host correlacionado actualmente para el volumen %(vol)s está en un grupo " "de hosts no admitido con %(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "OBSOLETO: Despliegue v1 de la API de Cinder." - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "EN DESUSO: Despliegue la v2 de la API de Cinder." - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1948,15 +1857,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "No se pueden ampliar los LUN dedup" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"El habilitador de deduplicación no está instalado. No se puede crear un " -"volumen deduplicado." - -msgid "Default pool name if unspecified." -msgstr "Nombre de agrupación predeterminado si no se especifica." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1970,12 +1870,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "No se ha podido encontrar el tipo de volumen predeterminado." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Define el conjunto de agrupaciones expuestas y sus series de consulta de " -"programa de fondo asociadas" - msgid "Delete LUNcopy error." msgstr "Error al suprimir LUNcopy." @@ -2065,9 +1959,6 @@ msgstr "" "Error de configuración del controlador Cinder de Dell, no se ha encontrado " "el dispositivo de replicación (replication_device) %s" -msgid "Deploy v3 of the Cinder API." -msgstr "Despliegue la v3 de la API de Cinder." - msgid "Describe-resource is admin only functionality" msgstr "El recurso de descripción es funcionalidad sólo de administrador" @@ -2120,13 +2011,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "El controlador no ha podido inicializar la conexión (error: %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"El controlador no puede realizar la reescritura porque el volumen (LUN {}) " -"tiene una instantánea que está prohibido migrar." - msgid "Driver must implement initialize_connection" msgstr "El controlador debe implementar initialize_connection" @@ -2637,11 +2521,6 @@ msgstr "" "Error al asociar el grupo de almacenamiento: %(storageGroupName)s con la " "política fast: %(fastPolicyName)s con la descripción de error: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "" -"Error al conectar el volumen %s. Podría alcanzarse el límite de destino." - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2818,10 +2697,6 @@ msgstr "" msgid "Error managing volume: %s." msgstr "Error al gestionar el volumen: %s." -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Error al correlacionar el volumen %(vol)s. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2865,17 +2740,9 @@ msgstr "Se producido un error al suprimir el cgsnapshot %s." msgid "Error occurred when updating consistency group %s." msgstr "Se ha producido un error al actualizar el grupo de consistencia %s." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Error al analizar el archivo de configuración: %s" - msgid "Error promoting secondary volume to primary" msgstr "Error al promocionar el volumen secundario al primario" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Error al eliminar el volumen %(vol)s. %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Error al cambiar el nombre del volumen %(vol)s: %(err)s." @@ -3157,12 +3024,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Ampliar el volumen no se ha implementado" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"El habilitador de FAST VP no está instalado. No se puede establecer la " -"política de capas para el volumen" - msgid "FAST is not supported on this array." msgstr "FAST no se admite en esta matriz." @@ -3232,11 +3093,6 @@ msgstr "" "No se ha podido obtener un bloqueo de recurso. (serie: %(serial)s, inst: " "%(inst)s, ret: %(ret)s, stderr: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "" -"No se ha podido añadir %(vol)s a %(sg)s después de %(retries)s intentos." - msgid "Failed to add the logical device." msgstr "No se ha podido añadir el dispositivo lógico." @@ -3327,9 +3183,6 @@ msgstr "" msgid "Failed to create IG, %s" msgstr "No se ha podido crear IG, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "No se ha podido crear SolidFire Image-Volume" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "No se ha podido crear el grupo de volumen: %(vg_name)s" @@ -3404,6 +3257,9 @@ msgstr "" msgid "Failed to create iqn." msgstr "No se ha podido crear el iqn." +msgid "Failed to create iscsi client" +msgstr "No se ha podido crear el cliente iSCSI" + #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" @@ -3445,11 +3301,6 @@ msgstr "No se ha podido crear flujo de volumen de gestor de planificador" msgid "Failed to create snapshot %s" msgstr "No se ha podido crear la instantánea %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "" -"No se ha podido crear la instantánea porque no se ha especificado ningún ID " -"de LUN" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "No se ha podido crear una instantánea para cg: %(cgName)s." @@ -3602,8 +3453,8 @@ msgstr "" "No se ha podido garantizar el área de recursos de la instantánea, no se ha " "encontrado el volumen para el ID %s" -msgid "Failed to establish SSC connection." -msgstr "No se ha podido establecer la conexión SSC." +msgid "Failed to establish a stable connection" +msgstr "No se ha podido establecer una conexión estable." msgid "Failed to establish connection with Coho cluster" msgstr "No se ha podido establecer conexión con el clúster Coho." @@ -3662,12 +3513,6 @@ msgstr "" "No se ha podido encontrar el grupo de iniciadores iSCSI que contiene " "%(initiator)s." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "" -"No se ha podido encontrar la agrupación de almacenamiento para el volumen de " -"origen %s." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "" @@ -3910,29 +3755,6 @@ msgstr "" "No se ha podido gestionar el volumen existente %(name)s, porque el archivo " "indicado %(size)s no era un número de coma flotante." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"No se ha podido gestionar el volumen existente porque la agrupación del tipo " -"de volumen seleccionado no coincide con el uso compartido NFS pasado en la " -"referencia de volumen." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"No se ha podido gestionar el volumen existente porque la agrupación del tipo " -"de volumen seleccionado no coincide con el sistema de archivos pasado en la " -"referencia de volumen." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"No se ha podido gestionar el volumen existente porque la agrupación del tipo " -"de volumen seleccionado no coincide con la agrupación del host." - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4293,9 +4115,6 @@ msgstr "Error al buscar el ID de LUN de host." msgid "Find lun group from mapping view error." msgstr "Error al buscar el grupo de LUN en la vista de correlaciones." -msgid "Find lun number error." -msgstr "Error al buscar el número de lun." - msgid "Find mapping view error." msgstr "Error al buscar la vista de correlaciones." @@ -4677,9 +4496,6 @@ msgstr "" msgid "HBSD error occurs." msgstr "Se ha producido un error HBSD." -msgid "HNAS has disconnected SSC" -msgstr "HNAS ha desconectado SSC" - msgid "HPELeftHand url not found" msgstr "URL de HPELeftHand no encontrado" @@ -4719,14 +4535,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "El host %s no tiene ningún iniciador FC" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "El host %s no tiene ningún iniciador iSCSI" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "El host '%s' no se ha encontrado." - #, python-format msgid "Host group with name %s not found" msgstr "No se ha encontrado el grupo de host con el nombre %s." @@ -4741,9 +4549,6 @@ msgstr "El host NO está inmovilizado." msgid "Host is already Frozen." msgstr "El host ya está inmovilizado." -msgid "Host not found" -msgstr "Host no encontrado" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" @@ -4764,6 +4569,9 @@ msgstr "El tipo de host %s no se soporta." msgid "Host with ports %(ports)s not found." msgstr "No se ha encontrado el host con los puertos %(ports)s." +msgid "Hosts" +msgstr "Hosts" + msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "No se puede utilizar Hipermetro y Replicación en el mismo volume_type." @@ -4776,9 +4584,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Dirección IP/nombre de host de la API of Blockbridge." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4831,6 +4636,9 @@ msgstr "La imagen %(image_id)s es inaceptable: %(reason)s" msgid "Image location not present." msgstr "Ubicación de imagen no presente." +msgid "Image quota exceeded" +msgstr "Se ha excedido la cuota de la imágen" + #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " @@ -4876,12 +4684,6 @@ msgstr "" "Excepción de CLI Infortrend: %(err)s Parám: %(param)s (Código de retorno: " "%(rc)s) (Salida: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Nivel inicial: {}, la política: {} no es válida." - -msgid "Input type {} is not supported." -msgstr "El tipo de entrada {} no está soportado." - msgid "Input volumes or snapshots are invalid." msgstr "Los volúmenes de entrada o instantáneas no son válidos." @@ -4898,17 +4700,6 @@ msgstr "No hay suficiente espacio libre disponible para extender el volumen." msgid "Insufficient privileges" msgstr "Privilegios insuficientes" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" -"Valor de intervalo (en segundos) entre los reintentos de conexión al clúster " -"ceph." - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "" -"Se han especificado puertos %(protocol)s %(port)s no válidos para " -"io_port_list." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Dominio 3PAR no válido: %(err)s" @@ -4957,10 +4748,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Destino de replicación no válido: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Tipo de autenticación VNX no válido: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -5007,14 +4794,6 @@ msgstr "Claves de autenticación inválidas: %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "Copia de seguridad no válida: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"URL de API de Barbican no válido: debe tener el formato siguiente: " -"'http[s]://|[:puerto]/', el URL especificado es: %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Se han encontrado detalles de usuario chap no válidos en el almacenamiento " @@ -5193,12 +4972,6 @@ msgstr "Se ha especificado una agrupación de almacenamiento no válida %s." msgid "Invalid storage pool is configured." msgstr "Se ha configurado una agrupación de almacenamiento no válida." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "" -"Se ha especificado un modo de sincronización no válido, el modo permitido es " -"%s." - msgid "Invalid transport type." msgstr "Tipo de transporte no válido." @@ -5206,14 +4979,6 @@ msgstr "Tipo de transporte no válido." msgid "Invalid update setting: '%s'" msgstr "Valor de actualización no válido: '%s' " -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"URL no válido: debe tener el formato siguiente: 'http[s]://|[:" -"puerto]/', el URL especificado es: %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Valor no válido %s' para forzar." @@ -5362,9 +5127,6 @@ msgstr "" "Se está emitiendo un mensaje de migración tras error fallida porque la " "replicación no está configurada correctamente." -msgid "Item not found" -msgstr "Elemento no encontrado" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" @@ -5397,12 +5159,12 @@ msgstr "" msgid "LU does not exist for volume: %s" msgstr "No existe ningún LU para el volumen: %s" +msgid "LUN doesn't exist." +msgstr "El LUN no existe." + msgid "LUN export failed!" msgstr "Error al exportar LUN." -msgid "LUN id({}) is not valid." -msgstr "El ID de LUN ({}) no es válido." - msgid "LUN map overflow on every channel." msgstr "Desbordamiento de correlación de LUN en todos los canales." @@ -5410,9 +5172,6 @@ msgstr "Desbordamiento de correlación de LUN en todos los canales." msgid "LUN not found with given ref %s." msgstr "No se ha encontrado un LUN con la referencia dada %s." -msgid "LUN number ({}) is not an integer." -msgstr "El número de LUN ({}) no es un entero." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "El número de LUN está fuera de limites en el ID de canal: %(ch_id)s." @@ -5575,6 +5334,9 @@ msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "" "La vista de máscara %(maskingViewName)s no se ha suprimido correctamente" +msgid "Maximum age is count of days since epoch." +msgstr "La edad máxima es el recuento de días desde epoch." + #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "" @@ -5597,6 +5359,10 @@ msgstr "" msgid "May specify only one of %s" msgstr "Puede especificar solo uno de %s" +#, python-format +msgid "Message %(message_id)s could not be found." +msgstr "No se ha podido encontrar el mensaje %(message_id)s." + msgid "Metadata backup already exists for this volume" msgstr "La copia de seguridad de metadatos ya existe para este volumen" @@ -5604,32 +5370,17 @@ msgstr "La copia de seguridad de metadatos ya existe para este volumen" msgid "Metadata backup object '%s' already exists" msgstr "El objeto de copia de seguridad de metadatos '%s' ya existe" -msgid "Metadata item was not found" -msgstr "No se ha encontrado el elemento metadatos" - -msgid "Metadata item was not found." -msgstr "No se ha encontrado el elemento de metadatos." +#, python-format +msgid "Metadata property key %s greater than 255 characters." +msgstr "Clave de propiedad de metadatos %s mayor que 255 caracteres." #, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "Clave de propiedad de metadatos %s mayor que 255 caracteres" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "Valor de clave de propiedad de metadatos %s mayor que 255 caracteres" - -msgid "Metadata property key blank" -msgstr "Clave de propiedad de metadatos en blanco" +msgid "Metadata property key %s value greater than 255 characters." +msgstr "Valor de clave de propiedad de metadatos %s mayor que 255 caracteres." msgid "Metadata property key blank." msgstr "Clave de propiedad de metadatos en blanco" -msgid "Metadata property key greater than 255 characters." -msgstr "La clave de propiedad de metadatos tiene más de 255 caracteres" - -msgid "Metadata property value greater than 255 characters." -msgstr "El valor de propiedad de metadatos tiene más de 255 caracteres" - msgid "Metadata restore failed due to incompatible version" msgstr "" "La restauración de metadatos ha fallado debido a la versión incompatible" @@ -5638,23 +5389,6 @@ msgid "Metadata restore failed due to incompatible version." msgstr "" "La restauración de metadatos ha fallado debido a una versión incompatible." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "No se ha podido migrar el volumen %(src)s." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"No se ha podido migrar el volumen entre el volumen de origen %(src)s y el " -"volumen de destino %(dst)s." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "La migración de LUN %s se ha detenido o tiene un error." - -msgid "MirrorView/S enabler is not installed." -msgstr "El habilitador de MirrorView/S no está instalado." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5672,6 +5406,11 @@ msgstr "Falta el cuerpo de la solicitud" msgid "Missing request body." msgstr "No se ha hallado el cuerpo de la solicitud." +#, python-format +msgid "Missing required element '%(element)s' in request body." +msgstr "" +"Falta el elemento obligatorio '%(element)s' en el cuerpo de la solicitud." + #, python-format msgid "Missing required element '%s' in request body" msgstr "Falta el elemento requerido '%s' en el cuerpo de la solicitud" @@ -5685,9 +5424,6 @@ msgstr "" "Falta el elemento obligatorio 'consistencygroup' en el cuerpo de la " "solicitud." -msgid "Missing required element 'host' in request body." -msgstr "Falta el elemento obligatorio 'host' en el cuerpo de la solicitud." - msgid "Missing required element quota_class_set in request body." msgstr "Falta el elemento necesario quota_class_set en cuerpo de solicitud." @@ -5818,9 +5554,6 @@ msgstr "" "Se deben especificar las agrupaciones de almacenamiento. Opción: " "sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "Debe proporcionar un valor positivo para la edad" - msgid "Must supply a positive, non-zero value for age" msgstr "Debe proporcionar un valor positivo distinto de cero para la edad" @@ -6138,6 +5871,9 @@ msgstr "No se ha encontrado ningún host válido. %(reason)s" msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "No hay hosts válidos para el volumen %(id)s con el tipo %(type)s" +msgid "No valid ports." +msgstr "No hay puertos válidos." + #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "No hay ningún vdisk con el UID especificado en ref %s." @@ -6248,9 +5984,6 @@ msgstr "" "Se ha recibido una respuesta nula al realizar la consulta para el trabajo " "basado en [%(operation)s] [%(job)s] en el almacenamiento CloudByte." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "Número de reintentos si la conexión al clúster ceph ha fallado." - msgid "Object Count" msgstr "Recuento de objetos" @@ -6314,16 +6047,15 @@ msgstr "La opción gpfs_images_share_mode no se ha establecido correctamente." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "La opción gpfs_mount_point_base no se ha establecido correctamente." -msgid "Option map (cls._map) is not defined." -msgstr "La correlación de opciones (cls._map) no está definida." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "%(res)s %(prop)s de origen debe ser uno de los valores '%(vals)s'" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "" -"Sustituya el puerto HTTPS para conectarse al servidor de API Blockbridge." +msgid "Param [identifier] is invalid." +msgstr "El parámetro [identifier] no es válido." + +msgid "Param [lun_name] is invalid." +msgstr "El parámetro [lun_name] no es válido." #, python-format msgid "ParseException: %s" @@ -6420,6 +6152,10 @@ msgstr "No se ha establecido el nombre de agrupaciones." msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Estado de copia primaria: %(status)s y sincronizado: %(sync)s." +#, python-format +msgid "Programming error in Cinder: %(reason)s" +msgstr "Error de programación en Cinder: %(reason)s" + msgid "Project ID" msgstr "ID del proyecto" @@ -6588,15 +6324,6 @@ msgstr "La respuesta del servidor RPC es incompleta" msgid "Raid did not have MCS Channel." msgstr "Raid no tiene el canal MCS." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Se ha alcanzado el límite establecido por la opción de configuración " -"max_luns_per_storage_group. La operación de añadir %(vol)s al grupo de " -"almacenamiento %(sg)s se ha rechazado." - #, python-format msgid "Received error string: %s" msgstr "Serie de error recibida: %s" @@ -6787,9 +6514,6 @@ msgstr "Configuración necesaria no encontrada" msgid "Required flag %s is not set" msgstr "El distintivo necesario %s no se ha establecido" -msgid "Requires an NaServer instance." -msgstr "Requiere una instancia de NaServer." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6877,6 +6601,12 @@ msgstr "" msgid "Retry count exceeded for command: %s" msgstr "Se ha superado el recuento de reintentos para el mandato: %s" +msgid "Retryable Dell Exception encountered" +msgstr "Se ha detectado una excepción reintentable de Dell" + +msgid "Retryable Pure Storage Exception encountered" +msgstr "Se ha detectado una excepción reintentable de Pure Storage" + msgid "Retryable SolidFire Exception encountered" msgstr "Se ha detectado una excepción reintentable de SolidFire" @@ -6995,16 +6725,15 @@ msgstr "" msgid "Service %(service_id)s could not be found." msgstr "No se ha podido encontrar el servicio %(service_id)s." -#, python-format -msgid "Service %s not found." -msgstr "El servicio %s no se ha encontrado." - msgid "Service is too old to fulfil this request." msgstr "El servicio es demasiado antiguo para cumplir esta solicitud." msgid "Service is unavailable at this time." msgstr "El servicio no esta disponible en este momento" +msgid "Session might have expired." +msgstr "La sesión pudo haber expirado." + msgid "Set pair secondary access error." msgstr "Error al definir el acceso secundario del par." @@ -7094,10 +6823,6 @@ msgstr "" "La instantánea %(snapshot_id)s no tiene metadatos con la clave " "%(metadata_key)s." -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "La instantánea %s no puede formar parte de un grupo de consistencia." - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "La instantánea '%s' no existe en la matriz." @@ -7124,9 +6849,6 @@ msgstr "La instantánea del volumen no se soporta en estado: %s." msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "¿Recurso de instantánea \"%s\" no desplegado en ningún sitio?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "El tamaño de la instantánea debe ser múltiplo de 1 GB." - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Estado de instantánea %(cur)s no permitido para update_snapshot_status" @@ -7291,10 +7013,6 @@ msgstr "" "No se ha encontrado el sistema de almacenamiento para la agrupación " "%(poolNameInStr)s." -msgid "Storage-assisted migration failed during manage volume." -msgstr "" -"Ha fallado la migración asistida con almacenamiento al gestionar el volumen." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "El sistema de almacenamiento %(array)s no se ha encontrado." @@ -7344,10 +7062,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "El tipo de volumen de destino aún se está utilizando." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "ID de arrendatario: %s no existe." - msgid "Terminate connection failed" msgstr "No se ha podido terminar la conexión" @@ -7449,10 +7163,6 @@ msgstr "" "La hora de finalización (%(end)s) debe ser posterior a la hora de inicio " "(%(start)s)." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec: %s no es válido." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "La especificación extraspec: %(extraspec)s no es válida." @@ -7505,14 +7215,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "El usuario CHAP de iSCSI %(user)s no existe." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"El LUN importado %(lun_id)s está en la agrupación %(lun_pool)s que no está " -"gestionada por el host %(host)s." - msgid "The key cannot be None." msgstr "La clave no puede ser Ninguno." @@ -7603,11 +7305,8 @@ msgstr "" "mantenimiento." #, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "" -"El volumen de origen %s no está en la agrupación gestionada por el host " -"actual." +msgid "The snapshot is unavailable: %(data)s" +msgstr "La instantánea no está disponible: %(data)s" msgid "The source volume for this WebDAV operation not found." msgstr "No se ha encontrado el volumen de origen para esta operación WebDAV." @@ -7785,10 +7484,6 @@ msgstr "No hay recursos disponibles para utilizar. (recurso: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "No hay hosts ESX válidos." -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "No hay almacenes de datos válidos conectados a %s." - msgid "There are no valid datastores." msgstr "No hay almacenes de datos válidos." @@ -7884,11 +7579,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "No se admite el aprovisionamiento ligero en esta versión de LVM." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"El habilitador de ThinProvisioning no está instalado. No se puede crear un " -"volumen ligero" - msgid "This driver does not support deleting in-use snapshots." msgstr "Este controlador no admite suprimir instantáneas en uso." @@ -7925,14 +7615,6 @@ msgstr "" "Se ha desactivado mientras esperaba la actualización de Nova para suprimir " "la instantánea %(id)s." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Valor de tiempo de espera (en segundos) que se utiliza al conectarse al " -"clúster ceph. Si el valor. < 0, no se establece ningún tiempo de espera y se " -"utiliza el valor librados predeterminado." - #, python-format msgid "Timeout while calling %s " msgstr "Tiempo de espera excedido al llamar a %s " @@ -8025,9 +7707,6 @@ msgstr "No se ha podido completar la migración tras error de %s." msgid "Unable to connect or find connection to host" msgstr "No se ha podido conectar o encontrar una conexión con el host" -msgid "Unable to create Barbican Client without project_id." -msgstr "No se puede crear el cliente Barbican sin un project_id." - #, python-format msgid "Unable to create consistency group %s" msgstr "No se ha podido crear el grupo de consistencia %s" @@ -8130,10 +7809,6 @@ msgstr "" "%(api_version)s, se necesita una de las versiones siguientes: " "%(required_versions)s." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "" -"No se ha podido habilitar la replicación y la copia instantánea a la vez." - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "" @@ -8380,6 +8055,9 @@ msgstr "No se puede renombrar el volumen %(existing)s a %(newname)s" msgid "Unable to retrieve snapshot group with id of %s." msgstr "No se ha podido recuperar el grupo de instantáneas con el id %s." +msgid "Unable to retrieve volume stats." +msgstr "No e ha podido recuperar las estadísticas del volumen. " + #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " @@ -8405,6 +8083,10 @@ msgstr "" "(volume-copy), que no se permite cuando el nuevo tipo es replicación. " "Volumen = %s" +#, python-format +msgid "Unable to send requests: %s" +msgstr "No se han podido enviar peticiones: %s" + #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." @@ -8545,9 +8227,6 @@ msgstr "Protocolo desconocido: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "Recursos de cuota desconocidos %(unknown)s." -msgid "Unknown service" -msgstr "Servicio desconocido" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" @@ -8613,6 +8292,10 @@ msgstr "" msgid "Unsupported backup verify driver" msgstr "Controlador de verificación de copia de seguridad no admitido" +#, python-format +msgid "Unsupported fields %s." +msgstr "Campos no soportados: %s." + #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " @@ -8668,9 +8351,6 @@ msgstr "ID de usuario" msgid "User does not have admin privileges" msgstr "El usuario no tiene privilegios de administrador" -msgid "User is not authorized to use key manager." -msgstr "El usuario no está autorizado a usar el gestor de clave." - msgid "User not authorized to perform WebDAV operations." msgstr "El usuario no tiene autorización para realizar operaciones WebDAV." @@ -8902,18 +8582,14 @@ msgstr "" "El volumen %s está en línea. Defina el volumen como fuera de línea para " "gestionarlo con OpenStack." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"El volumen %s no se puede migrar ni adjuntar, no puede pertenecer a un " -"grupo de consistencia ni tener instantáneas." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "El volumen %s no debe formar parte de un grupo de consistencia." +#, python-format +msgid "Volume %s not found" +msgstr "No se ha encontrado el volumen %s" + #, python-format msgid "Volume %s not found." msgstr "No se ha encontrado el volumen %s." @@ -8938,12 +8614,6 @@ msgstr "El grupo de volúmenes %s no existe" msgid "Volume Type %(id)s already exists." msgstr "El tipo de volumen %(id)s ya existe. " -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"El tipo de volumen %(type_id)s no tiene una especificación adicional con la " -"clave %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -9003,6 +8673,10 @@ msgstr "La vía de acceso de archivo de dispositivo de volumen %s no existe." msgid "Volume device not found at %(device)s." msgstr "Dispositivo de volumen no encontrado en: %(device)s" +#, python-format +msgid "Volume does not exists %s." +msgstr "El volumen %s no existe." + #, python-format msgid "Volume driver %s not initialized." msgstr "Controlador de volumen %s no inicializado." @@ -9150,9 +8824,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "El tamaño del volumen debe ser un múltiplo de 1 GB." -msgid "Volume size must be multiple of 1 GB." -msgstr "El tamaño del volumen debe ser múltiplo de 1 GB." - msgid "Volume size must multiple of 1 GB." msgstr "El tamaño de volumen debe ser múltiplo de 1 GB." @@ -9233,8 +8904,8 @@ msgstr "" "%(volume_type_name)s." #, python-format -msgid "Volume with volume id %s does not exist." -msgstr "El volumen con el ID de volumen %s no existe." +msgid "Volume%s: not found" +msgstr "Volumen %s: no encontrado" #, python-format msgid "" @@ -9250,18 +8921,10 @@ msgstr "" "El volumen %(volumeName)s no se ha añadido al grupo de almacenamiento " "%(sgGroupName)s." -#, python-format -msgid "Volume: %s could not be found." -msgstr "Volumen: %s no se ha encontrado." - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "El volumen %s ya se gestiona en Cinder." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "" -"Los volúmenes se fragmentarán en objetos de este tamaño (en megabytes)." - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" @@ -9859,6 +9522,9 @@ msgstr "" msgid "check_hypermetro_exist error." msgstr "Error de check_hypermetro_exist." +msgid "cinder-all is deprecated in Newton and will be removed in Ocata." +msgstr "cinder-all está en desuso en Mitaka y se eliminará en Ocata." + #, python-format msgid "clone depth exceeds limit of %s" msgstr "la profundidad de clon excede el límite de %s" @@ -9909,13 +9575,6 @@ msgstr "" "create_consistencygroup_from_src solo admite un origen de cgsnapshot o bien " "un origen de grupo de consistencia. No se pueden utilizar diversos orígenes." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src admite un origen de cgsnapshot o bien un " -"origen de grupo de consistencia. No se pueden utilizar diversos orígenes." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" @@ -10026,10 +9685,6 @@ msgstr "" "create_volume_from_snapshot: el estado de la instantánea debe ser \"available" "\" (disponible) para crear el volumen. El estado no válido es: %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "" -"create_volume_from_snapshot: el tamaño de origen y de destino son distintos." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -10037,9 +9692,8 @@ msgstr "" "create_volume_from_snapshot: el tamaño del volumen es distinto al volumen " "basado en la instantánea." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "" -"los niveles deduplicados y automáticos no pueden estar ambos habilitados." +msgid "data not found" +msgstr "No se han encontrado datos" #, python-format msgid "" @@ -10085,9 +9739,6 @@ msgstr "desconecte instantánea del nodo remoto" msgid "do_setup: No configured nodes." msgstr "do_setup: No hay nodos configurado." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries debe ser mayor o igual que 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -10295,23 +9946,12 @@ msgstr "Ha fallado la ejecución de iscsiadm." msgid "key manager error: %(reason)s" msgstr "error de gestor clave: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr:fixed_key no está definido" - msgid "limit param must be an integer" msgstr "el parámetro de límite debe ser un entero" msgid "limit param must be positive" msgstr "el parámetro de límite debe ser positivo" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -" manage_existing no puede gestionar un volumen conectado con hosts. " -"Desconecte este volumen de los hosts existentes antes de realizar la " -"importación." - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing necesita una clave 'name' para identificar un volumen " @@ -10359,16 +9999,16 @@ msgstr "se han encontrado varios recursos con el ID de instantánea %s" msgid "name cannot be None" msgstr "el nombre no puede ser None" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "" -"naviseccli_path: No se ha podido encontrar la herramienta NAVISECCLI " -"%(path)s." - #, python-format msgid "no REPLY but %r" msgstr "ninguna RESPUESTA, sino %r" +msgid "no data found" +msgstr "No se han encontrado datos" + +msgid "no error code found" +msgstr "No se ha encontrado ningún código de error." + #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "no se ha encontrado ninguna instantánea con el id %s en drbdmanage" @@ -10427,18 +10067,6 @@ msgstr "no se han encontrado las bibliotecas rados y rbd python" msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted solo puede ser 'no', 'yes' o 'only', no %r" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "" -"Se debe configurar el dispositivo de replicación (replication_device) en el " -"programa de fondo: %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "" -"Falta el dispositivo de replicación (replication_device) con el ID de " -"programa de fondo (backend_id) [%s]." - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover ha fallado. No se ha encontrado %s." @@ -10499,10 +10127,6 @@ msgstr "san_ip no está establecido." msgid "san_ip must be set" msgstr "se debe establecer san_ip" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "" -"san_ip: Configuración de campo obligatorio. san_ip no se ha establecido" - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10514,15 +10138,11 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() sólo se puede llamar una vez " -msgid "service not found" -msgstr "no se ha encontrado el servicio" +msgid "size not found" +msgstr "No se ha encontrado el tamaño" -msgid "snapshot does not exist" -msgstr "la instantánea no existe" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "id:%s de instantánea no encontrado" +msgid "snapshot info not found" +msgstr "No se ha encontrado información de la instantánea" #, python-format msgid "snapshot-%s" @@ -10534,10 +10154,6 @@ msgstr "instantáneas asignadas" msgid "snapshots changed" msgstr "instantáneas modificadas" -#, python-format -msgid "source vol id:%s not found" -msgstr "id:%s de volumen de origen no encontrado" - #, python-format msgid "source volume id:%s is not replicated" msgstr "El ID de volumen de origen: %s no se replica" @@ -10555,6 +10171,9 @@ msgstr "el estado debe ser %s y" msgid "status must be available" msgstr "el estado debe ser available" +msgid "status not found" +msgstr "No se ha encontrado el estado" + msgid "stop_hypermetro error." msgstr "Error de stop_hypermetro." @@ -10635,9 +10254,6 @@ msgstr "volumen asignado" msgid "volume changed" msgstr "volumen modificado" -msgid "volume does not exist" -msgstr "el volumen no existe" - msgid "volume is already attached" msgstr "El volumen ya está conectado" @@ -10655,9 +10271,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "el tamaño de volumen %d no es válido." -msgid "volume_type cannot be None" -msgstr "volume_type no puede ser None (Ninguno)" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10696,6 +10309,3 @@ msgid "" msgstr "" "La propiedad zfssa_manage_policy se debe establecer a 'strict' o 'loose'. El " "valor actual es: %s." - -msgid "{} is not a valid option." -msgstr "{} no es una opción válida." diff --git a/cinder/locale/fr/LC_MESSAGES/cinder.po b/cinder/locale/fr/LC_MESSAGES/cinder.po index af08c63b9..f9aa85768 100644 --- a/cinder/locale/fr/LC_MESSAGES/cinder.po +++ b/cinder/locale/fr/LC_MESSAGES/cinder.po @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -266,9 +266,6 @@ msgstr "'status' doit être spécifié." msgid "'volume_id' must be specified" msgstr "'volume_id' doit être spécifié." -msgid "'{}' object has no attribute '{}'" -msgstr "L'objet '{}' n'a pas d'attribut '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -434,23 +431,9 @@ msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "Une demande de version d'API doit être comparée à un objet VersionedMethod." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "Une erreur s'est produite dans SheepdogDriver. (Motif : %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Une erreur est survenue lors de la sauvegarde" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "" -"Une erreur s'est produite lors de la tentative de modification de " -"l'instantané '%s'." - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Une erreur s'est produite lors de la recherche du volume \"%s\"." - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -562,18 +545,12 @@ msgstr "" "Informations d'utilisateur de l'authentification introuvables dans le " "stockage CloudByte." -msgid "Authentication error" -msgstr "Erreur d'authentification" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "L'authentification a échoué, vérifiez les données d'identification du " "commutateur, code d'erreur : %s." -msgid "Authorization error" -msgstr "Erreur d'autorisation" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "La zone de disponibilité '%(s_az)s' n'est pas valide." @@ -592,9 +569,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "Le système dorsal n'existe pas (%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "Le back-end a déjà été basculé. Impossible de le rebasculer." - #, python-format msgid "Backend reports: %(message)s" msgstr "Rapports de back-end : %(message)s" @@ -605,9 +579,6 @@ msgstr "Rapports de back-end : l'élément existe déjà" msgid "Backend reports: item not found" msgstr "Rapports de back-end : élément introuvable" -msgid "Backend server not NaServer." -msgstr "Le serveur de back-end n'est pas de type NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" @@ -717,12 +688,6 @@ msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Format de projet incorrect : le projet n'est pas au format approprié (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Demande incorrecte envoyée au cluster Datera : arguments non valides : " -"%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Réponse erronée de l'API Datera" @@ -739,23 +704,6 @@ msgstr "binaire" msgid "Blank components" msgstr "Composants vides" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "" -"Méthode d'authentification de l'API Blockbridge (jeton ou mot de passe)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "" -"Mot de passe de l'API Blockbridge (pour méthode d'authentification 'mot de " -"passe')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Jeton de l'API Blockbridge (pour méthode d'authentification 'jeton')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "" -"Utilisateur de l'API Blockbridge (pour méthode d'authentification 'mot de " -"passe')" - msgid "Blockbridge api host not configured" msgstr "L'hôte de l'API Blockbridge n'a pas été configuré" @@ -877,9 +825,6 @@ msgstr "Impossible de transformer %s en entier." msgid "Can't access 'scality_sofs_config': %s" msgstr "Impossible d'accéder à 'scality_sofs_config' : %s" -msgid "Can't attach snapshot." -msgstr "Impossible de connecter l'instantané." - msgid "Can't decode backup record." msgstr "Impossible de décoder l'enregistrement de sauvegarde." @@ -1005,10 +950,6 @@ msgstr "" "Impossible d'importer l'instantané %s dans Cinder. Le statut de l'instantané " "n'est pas normal ou le statut d'exécution n'est pas connecté (online)." -#, python-format -msgid "Can't open config file: %s" -msgstr "Impossible d'ouvrir le fichier de configuration : %s" - msgid "Can't parse backup record." msgstr "Impossible d'analyser l'enregistrement de sauvegarde." @@ -1084,13 +1025,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "Connexion au serveur ECOM impossible." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"Impossible de créer un clone d'une taille de %(vol_size)s depuis un volume " -"d'une taille de %(src_vol_size)s" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1141,13 +1075,6 @@ msgstr "" "Impossible de créer ou de trouver un groupe de stockage dénommé " "%(sgGroupName)s." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"Impossible de créer un volume d'une taille de %(vol_size)s depuis un " -"instantané d'une taille de %(snap_size)s" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "" @@ -1496,10 +1423,6 @@ msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "" "La commande %(cmd)s a été bloquée dans l'interface CLI et a été annulée" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition : dépassement du délai de %s " - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" @@ -1666,20 +1589,10 @@ msgstr "ID de cluster GPFS introuvable : %s." msgid "Could not find GPFS file system device: %s." msgstr "Périphérique du système de fichiers GPFS introuvable : %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"Impossible de trouver un hôte pour le volume %(volume_id)s de type " -"%(type_id)s." - #, python-format msgid "Could not find config at %(path)s" msgstr "Configuration introuvable dans %(path)s" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "Exportation iSCSI introuvable pour le volume %(volumeName)s." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Exportation iSCSI trouvée pour le volume %s" @@ -1766,17 +1679,6 @@ msgstr "" "Création de la sauvegarde interrompue, état du volume attendu " "%(expected_status)s, mais état %(actual_status)s obtenu." -msgid "Create consistency group failed." -msgstr "La création du groupe de cohérence a échoué." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"La création de volumes chiffrés du type %(type)s depuis l'image %(image)s " -"n'est pas prise en charge." - msgid "Create export for volume failed." msgstr "La création d'une exportation pour le volume a échoué." @@ -1870,12 +1772,6 @@ msgstr "" "L'hôte actuellement mappé pour le volume %(vol)s est dans un groupe d'hôtes " "non pris en charge avec %(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "OBSOLETE : Déploiement v1 de l'API Cinder." - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "OBSOLETE : Déploiement v2 de l'API Cinder." - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1958,15 +1854,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "Les numéros d'unité logique dédoublonnés ne peuvent pas être étendus" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"L'optimiseur de dédoublonnage n'est pas installé. Impossible de créer un " -"volume dédupliqué" - -msgid "Default pool name if unspecified." -msgstr "Nom de pool par défaut s'il n'a pas été spécifié." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1980,12 +1867,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "Le type de volume par défaut est introuvable." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Définit l'ensemble de pools exposés et leurs chaînes de requête dorsale " -"associées" - msgid "Delete LUNcopy error." msgstr "Erreur lors de la suppression de LUNcopy." @@ -2077,9 +1958,6 @@ msgstr "" "Erreur de configuration du pilote Cinder de Dell, replication_device %s " "introuvable" -msgid "Deploy v3 of the Cinder API." -msgstr "Déploiement v3 de l'API Cinder." - msgid "Describe-resource is admin only functionality" msgstr "Describe-resource est une fonctionnalité admin uniquement" @@ -2135,13 +2013,6 @@ msgid "Driver initialize connection failed (error: %(err)s)." msgstr "" "L'initialisation de la connexion par le pilote a échoué (erreur : %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"Le pilote n'est pas en mesure d'effectuer une nouvelle saisie car le volume " -"(LUN {}) contient un instantané qu'il est interdit de migrer." - msgid "Driver must implement initialize_connection" msgstr "Le pilote doit implémenter initialize_connection" @@ -2649,12 +2520,6 @@ msgstr "" "Erreur d'association du groupe de stockage : %(storageGroupName)s. A la " "règle FAST : %(fastPolicyName)s avec la description d'erreur : %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "" -"Erreur lors du rattachement du volume %s. Il se peut que la limite des " -"cibles ait été atteinte. " - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2833,10 +2698,6 @@ msgstr "" msgid "Error managing volume: %s." msgstr "Erreur lors de la gestion du volume : %s." -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Erreur de mappage du volume %(vol)s. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2885,17 +2746,9 @@ msgid "Error occurred when updating consistency group %s." msgstr "" "Une erreur est survenue lors de la mise à jour du groupe de cohérence %s." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Erreur lors de l'analyse syntaxique du fichier de configuration : %s" - msgid "Error promoting secondary volume to primary" msgstr "Erreur de promotion du volume secondaire en volume primaire" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Erreur de suppression du volume %(vol)s. %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "" @@ -3190,12 +3043,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Extension du volume non implémentée" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"L'optimiseur FAST VP n'est pas installé. Impossible de définir la règle de " -"hiérarchisation pour le volume" - msgid "FAST is not supported on this array." msgstr "FAST n'est pas pris en charge sur cette matrice." @@ -3267,10 +3114,6 @@ msgstr "" "Echec de l'acquisition d'un verrouillage des ressources. (série : " "%(serial)s, inst : %(inst)s, ret : %(ret)s, stderr : %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "Echec de l'ajout de %(vol)s dans %(sg)s après %(retries)s tentatives." - msgid "Failed to add the logical device." msgstr "Echec de l'ajout de l'unité logique." @@ -3357,9 +3200,6 @@ msgstr "" msgid "Failed to create IG, %s" msgstr "Echec de la création du groupe demandeur, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "Echec de la création de l'entité (image-volume) SolidFire" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Impossible de créer le groupe de volumes : %(vg_name)s" @@ -3478,11 +3318,6 @@ msgstr "" msgid "Failed to create snapshot %s" msgstr "Echec de la création de l'instantané %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "" -"Echec de la création d'instantané car aucun identificateur de numéro d'unité " -"logique (LUN) n'est indiqué" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "" @@ -3641,9 +3476,6 @@ msgstr "" "Impossible de réserver la zone de ressource d'instantané, impossible de " "localiser le volume pour l'ID %s" -msgid "Failed to establish SSC connection." -msgstr "Echec d'établissement de la connexion SSC." - msgid "Failed to establish connection with Coho cluster" msgstr "Connexion impossible à établir avec le cluster Coho " @@ -3697,10 +3529,6 @@ msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "Impossible de trouver le groupe d'initiateurs iSCSI contenant %(initiator)s." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "Pool de stockage introuvable pour le volume source %s." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Echec de l'obtention des détails CloudByte du compte [%s]." @@ -3938,28 +3766,6 @@ msgstr "" "Echec de gestion du volume %(name)s existant, en raison de la taille " "rapportée %(size)s qui n'était pas un nombre à virgule flottante." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"Echec de la gestion du volume existant vu que le pool du type de volume " -"choisi ne correspond pas au partage NFS transmis dans le volume de référence." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"Echec de la gestion du volume existant vu que le pool du type de volume " -"choisi ne correspond pas au système de fichiers transmis dans le volume de " -"référence." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"Echec de la gestion du volume existant vu que le pool du type de volume " -"choisi ne correspond pas au pool de l'hôte." - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4323,9 +4129,6 @@ msgstr "" "Erreur lors de la recherche de groupe de numéros d'unité logique dans la vue " "de mappage." -msgid "Find lun number error." -msgstr "Erreur lors de la recherche du nombre de numéros d'unité logique." - msgid "Find mapping view error." msgstr "Erreur lors de la recherche de la vue de mappage." @@ -4730,9 +4533,6 @@ msgstr "Informations de chemin incorrectes reçues de DRBDmanage : (%s)" msgid "HBSD error occurs." msgstr "Une erreur HBSD s'est produite." -msgid "HNAS has disconnected SSC" -msgstr "HNAS a déconnecté SSC" - msgid "HPELeftHand url not found" msgstr "URL HPELeftHand introuvable" @@ -4772,14 +4572,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "L'hôte %s n'a aucun demandeur FC" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "L'hôte %s n'a aucun initiateur iSCSI" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "L'hôte '%s' est introuvable." - #, python-format msgid "Host group with name %s not found" msgstr "Le groupe d'hôtes nommé %s est introuvable" @@ -4794,9 +4586,6 @@ msgstr "L'hôte N'EST PAS figé." msgid "Host is already Frozen." msgstr "L'hôte est déjà figé." -msgid "Host not found" -msgstr "Hôte introuvable" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Hôte introuvable. La suppression de %(service)s sur %(host)s a échoué." @@ -4829,9 +4618,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Adresse IP/nom d'hôte de l'API Blockbridge." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4930,12 +4716,6 @@ msgstr "" "Exception CLI Infortrend : %(err)s Paramètre : %(param)s (Code retour : " "%(rc)s) (Sortie : %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Niveau initial : {}, stratégie : {} est non valide." - -msgid "Input type {} is not supported." -msgstr "Le type d'entrée {} n'est pas pris en charge." - msgid "Input volumes or snapshots are invalid." msgstr "Les volumes ou les instantanés d'entrée ne sont pas valides." @@ -4952,15 +4732,6 @@ msgstr "L'espace libre disponible est insuffisant pour l'extension du volume." msgid "Insufficient privileges" msgstr "Privilèges insuffisants" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" -"Valeur de l'intervalle (en secondes) entre les nouvelles tentatives de " -"connexion au cluster ceph cluster." - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "Ports %(protocol)s %(port)s non valides spécifiés pour io_port_list." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Domaine 3PAR non valide : %(err)s" @@ -5007,10 +4778,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Cible de réplication non valide : %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Type d'authentification VNX non valide : %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -5055,14 +4822,6 @@ msgstr "Clé d'auth non valide : %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "Sauvegarde non valide : %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"URL d'API barbican non valide : la version est obligatoire, par exemple " -"'http[s]://|[:port]/', l'URL indiquée est : %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Informations d'utilisateur chap non valides détectées dans le stockage " @@ -5238,10 +4997,6 @@ msgstr "Pool de stockage %s non valide spécifié." msgid "Invalid storage pool is configured." msgstr "Un pool de stockage non valide est configuré." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "Mode de synchronisation non valide indiqué, le mode autorisé est %s." - msgid "Invalid transport type." msgstr "Type de transport non valide." @@ -5249,14 +5004,6 @@ msgstr "Type de transport non valide." msgid "Invalid update setting: '%s'" msgstr "Paramètre de mise à jour non valide : '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"URL non valide : doit être au format 'http[s]://|[:port]/" -"', l'URL indiquée est : %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Valeur invalide '%s' pour le 'forçage'." @@ -5407,9 +5154,6 @@ msgstr "" "Le lancement d'un basculement a échoué car la réplication n'a pas été " "configurée correctement." -msgid "Item not found" -msgstr "Élément introuvable" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" @@ -5445,9 +5189,6 @@ msgstr "L'unité logique n'existe pas pour le volume : %s" msgid "LUN export failed!" msgstr "L'exportation de numéro d'unité logique a échoué. " -msgid "LUN id({}) is not valid." -msgstr "ID LUN ({}) non valide." - msgid "LUN map overflow on every channel." msgstr "Dépassement de mappe de numéro d'unité logique sur chaque canal." @@ -5455,9 +5196,6 @@ msgstr "Dépassement de mappe de numéro d'unité logique sur chaque canal." msgid "LUN not found with given ref %s." msgstr "LUN introuvable avec la réf donnée %s." -msgid "LUN number ({}) is not an integer." -msgstr "Le numéro d'unité logique (LUN) ({}) n'est pas un entier." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" @@ -5653,36 +5391,9 @@ msgstr "Une sauvegarde de métadonnées existe déjà pour ce volume" msgid "Metadata backup object '%s' already exists" msgstr "L'objet de sauvegarde des métadonnées '%s' existe déjà" -msgid "Metadata item was not found" -msgstr "Elément de métadonnées introuvable" - -msgid "Metadata item was not found." -msgstr "L'élément Metadata est introuvable." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "" -"Taille de la clé de propriété de métadonnées %s supérieure à 255 caractères" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "" -"Valeur de taille de la clé de propriété de métadonnées (%s) supérieure à 255 " -"caractères" - -msgid "Metadata property key blank" -msgstr "Propriété de métadonnées à blanc" - msgid "Metadata property key blank." msgstr "Clé de propriété de métadonnées à blanc." -msgid "Metadata property key greater than 255 characters." -msgstr "" -"Taille de la clé de propriété de métadonnées supérieure à 255 caractères." - -msgid "Metadata property value greater than 255 characters." -msgstr "Valeur de la propriété de métadonnées supérieure à 255 caractères." - msgid "Metadata restore failed due to incompatible version" msgstr "" "Echec de restauration des métadonnées en raison d'une version incompatible" @@ -5691,25 +5402,6 @@ msgid "Metadata restore failed due to incompatible version." msgstr "" "Echec de restauration des métadonnées en raison d'une version incompatible." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "La migration du volume %(src)s a échoué." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"La migration du volume entre le volume source %(src)s et le volume de " -"destination %(dst)s a échoué." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "" -"La migration du numéro d'unité logique %s a été interrompue ou a rencontré " -"une erreur." - -msgid "MirrorView/S enabler is not installed." -msgstr "L'optimiseur MirrorView/S n'est pas installé." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5738,9 +5430,6 @@ msgid "Missing required element 'consistencygroup' in request body." msgstr "" "L'élément requis 'consistencygroup' est manquant dans le corps de demande." -msgid "Missing required element 'host' in request body." -msgstr "L'élément requis 'host' est manquant dans le corps de la demande." - msgid "Missing required element quota_class_set in request body." msgstr "Elément quota_class_set requis manquant dans le corps de demande." @@ -5865,9 +5554,6 @@ msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "Vous devez spécifier des pools de stockage. Option : sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "Vous devez indiquer un entier positif pour 'age'" - msgid "Must supply a positive, non-zero value for age" msgstr "Une valeur positive différente de zéro doit être indiquée pour age" @@ -6280,10 +5966,6 @@ msgstr "" "Réponse Null reçue lors de l'interrogation du travail [%(operation)s] basé " "[%(job)s] dans le stockage CloudByte." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "" -"Nombre de nouvelles tentatives si la connexion au cluster ceph a échoué." - msgid "Object Count" msgstr "Nombre d'objets" @@ -6345,18 +6027,11 @@ msgstr "L'option gpfs_images_share_mode n'est pas correctement définie." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "L'option gpfs_mount_point_base n'est pas correctement définie." -msgid "Option map (cls._map) is not defined." -msgstr "Mappe d'options (cls._map) non définie." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" "L'état d'origine de %(res)s %(prop)s doit être l'une des valeurs '%(vals)s'" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "" -"Remplacer le port HTTPS pour se connecter au serveur de l'API Blockbridge." - #, python-format msgid "ParseException: %s" msgstr "ParseException : %s" @@ -6616,15 +6291,6 @@ msgstr "Réponse incomplète du serveur RPC" msgid "Raid did not have MCS Channel." msgstr "Raid n'avait pas de canal MCS." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Atteignez la limitation définie par l'option de configuration " -"max_luns_per_storage_group. L'opération d'ajout de %(vol)s dans le groupe de " -"stockage %(sg)s est rejetée." - #, python-format msgid "Received error string: %s" msgstr "Chaîne d'erreur reçue : %s" @@ -6811,9 +6477,6 @@ msgstr "Configuration obligatoire non trouvée" msgid "Required flag %s is not set" msgstr "L'indicateur obligatoire %s n'est pas défini" -msgid "Requires an NaServer instance." -msgstr "Nécessite une instance NaServer." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -7011,10 +6674,6 @@ msgstr "Service %(service_id)s introuvable sur l'hôte %(host)s." msgid "Service %(service_id)s could not be found." msgstr "Le service %(service_id)s est introuvable." -#, python-format -msgid "Service %s not found." -msgstr "Service %s non trouvé." - msgid "Service is too old to fulfil this request." msgstr "Service trop ancien pour satisfaire cette demande." @@ -7111,10 +6770,6 @@ msgstr "" "L'instantané %(snapshot_id)s n'a pas de métadonnées avec la clé " "%(metadata_key)s." -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "L'instantané %s ne doit pas faire partie d'un groupe de cohérence." - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "L'instantané '%s' n'existe pas sur la matrice." @@ -7141,9 +6796,6 @@ msgstr "Instantané du volume non pris en charge à l'état : %s." msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Ressource d'instantané \"%s\" non déployée ailleurs ?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "La taille de l'instantané doit être un multiple de 1 Go." - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Statut d'instantané %(cur)s interdit pour update_snapshot_status" @@ -7303,11 +6955,6 @@ msgstr "ID du système de stockage non défini." msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Système de stockage introuvable pour le pool %(poolNameInStr)s." -msgid "Storage-assisted migration failed during manage volume." -msgstr "" -"La migration à l'aide du back-end de stockage a échoué lors de l'opération " -"de gestion du volume." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s introuvable." @@ -7359,10 +7006,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Le type de volume cible est toujours utilisé." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "L'ID titulaire %s n'existe pas." - msgid "Terminate connection failed" msgstr "Echec de fin de la connexion" @@ -7462,10 +7105,6 @@ msgstr "" "L'heure de fin (%(end)s) doit être postérieure à l'heure de début " "(%(start)s)." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec %s n'est pas valide." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "La spécification supplémentaire %(extraspec)s n'est pas valide." @@ -7518,14 +7157,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "L'utilisateur CHAP iSCSI %(user)s n'existe pas." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"Le numéro d'unité logique importé, %(lun_id)s, fait partie du pool " -"%(lun_pool)s, lequel n'est pas géré par l'hôte %(host)s." - msgid "The key cannot be None." msgstr "La clé ne peut pas être nulle." @@ -7615,11 +7246,6 @@ msgstr "" "L'instantané ne peut pas être créé alors que le volume est en mode " "maintenance." -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "Le volume source %s n'est pas dans le pool géré par l'hôte actuel." - msgid "The source volume for this WebDAV operation not found." msgstr "Volume source introuvable pour cette opération WebDAV." @@ -7796,10 +7422,6 @@ msgstr "" msgid "There are no valid ESX hosts." msgstr "Il n'existe aucun hôte ESX valide." -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "Aucun magasin de données valide connecté à %s." - msgid "There are no valid datastores." msgstr "Il n'y a aucun magasin de données valide." @@ -7903,11 +7525,6 @@ msgstr "" "L'allocation de ressources à la demande n'est pas prise en charge sur cette " "version du gestionnaire de volume logique (LVM)." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"L'optimiseur d'allocation de ressources n'est pas installé. Impossible de " -"créer un volume fin" - msgid "This driver does not support deleting in-use snapshots." msgstr "" "Ce pilote ne prend pas en charge la suppression d'instantanés en cours " @@ -7947,14 +7564,6 @@ msgstr "" "Dépassement du délai d'attente de mise à jour Nova pour la suppression de " "l'instantané %(id)s." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Valeur de délai d'attente (en secondes) utilisée lors de la connexion au " -"cluster ceph. Si la valeur < 0, aucun délai d'attente n'est défini et la " -"valeur librados par défaut est utilisée." - #, python-format msgid "Timeout while calling %s " msgstr "Délai d'attente dépassé lors de l'appel de %s " @@ -8046,9 +7655,6 @@ msgstr "Impossible d'effectuer le basculement de %s." msgid "Unable to connect or find connection to host" msgstr "Impossible d'établir ou de trouver une connexion à l'hôte" -msgid "Unable to create Barbican Client without project_id." -msgstr "Impossible de créer le client Barbican sans project_id." - #, python-format msgid "Unable to create consistency group %s" msgstr "Impossible de créer le groupe de cohérence %s" @@ -8149,10 +7755,6 @@ msgstr "" "Impossible d'effectuer de réplication avec l'API REST Purity version " "%(api_version)s, l'une des %(required_versions)s est nécessaire." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "" -"Impossible d'activer la réplication et la fonction snapcopy en même temps." - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Impossible d'établir un partenariat avec le cluster Storwize %s." @@ -8560,9 +8162,6 @@ msgstr "Protocole inconnu : %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "Ressources de quota inconnues %(unknown)s." -msgid "Unknown service" -msgstr "Service inconnu" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'" @@ -8685,9 +8284,6 @@ msgstr "ID Utilisateur" msgid "User does not have admin privileges" msgstr "L’utilisateur n'a pas les privilèges administrateur" -msgid "User is not authorized to use key manager." -msgstr "L'utilisateur n'est pas autorisé à utiliser le gestionnaire de clés." - msgid "User not authorized to perform WebDAV operations." msgstr "Utilisateur non autorisé à exécuter des opérations WebDAV." @@ -8917,14 +8513,6 @@ msgstr "" "Le volume %s est en ligne (online). Définissez le volume comme étant hors " "ligne (offline) pour le gérer avec OpenStack." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"Le volume %s ne doit pas être en cours de migration ou connecté, il ne doit " -"pas appartenir à un groupe de cohérence ou avoir des instantanés." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Le volume %s ne doit pas faire partie d'un groupe de cohérence." @@ -8953,12 +8541,6 @@ msgstr "Le groupe de volumes %s n'existe pas" msgid "Volume Type %(id)s already exists." msgstr "Le type de volume %(id)s existe déjà." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"Le type de volume %(type_id)s n'a pas de spécifications supplémentaires avec " -"la clé %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -9164,9 +8746,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "La taille du volume doit être un multiple de 1 Go." -msgid "Volume size must be multiple of 1 GB." -msgstr "La taille du volume doit être un multiple de 1 Go." - msgid "Volume size must multiple of 1 GB." msgstr "La taille du volume doit être un multiple de 1 Go." @@ -9246,10 +8825,6 @@ msgstr "Le nom de type de volume ne peut pas être vide." msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Le type de volume portant le nom %(volume_type_name)s est introuvable." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "Le volume avec le volume_id %s n'existe pas." - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -9264,18 +8839,10 @@ msgstr "" "Le volume : %(volumeName)s n'a pas été ajouté au groupe de stockage " "%(sgGroupName)s." -#, python-format -msgid "Volume: %s could not be found." -msgstr "Volume: %s introuvable" - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Volume %s déjà géré par Cinder." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "" -"Les volumes seront morcelées en objets de cette taille (en mégaoctets)." - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" @@ -9922,14 +9489,6 @@ msgstr "" "une source de groupe de cohérence uniquement. Vous ne pouvez pas utiliser " "plusieurs sources." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src prend en charge une source cgsnapshot ou " -"une source de groupe de cohérence. Vous ne pouvez pas utiliser plusieurs " -"sources." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" @@ -10039,11 +9598,6 @@ msgstr "" "create_volume_from_snapshot : le statut de l'instantané doit être \"available" "\" pour créer le volume. Le statut non valide est : %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "" -"create_volume_from_snapshot : La taille de la source et de la destination " -"diffère." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -10051,11 +9605,6 @@ msgstr "" "create_volume_from_snapshot : la taille du volume est différente de celle du " "volume basé sur l'instantané." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "" -"Les hiérarchisations dédupliquée et automatique ne peuvent pas être activées " -"toutes les deux." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -10100,9 +9649,6 @@ msgstr "détachez l'instantané du noeud distant" msgid "do_setup: No configured nodes." msgstr "do_setup : Aucun noeud configuré." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries doit être supérieur ou égal à 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -10303,22 +9849,12 @@ msgstr "L'exécution d'iscsiadm a échoué. " msgid "key manager error: %(reason)s" msgstr "Erreur du gestionnaire de clés : %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key n'est pas défini" - msgid "limit param must be an integer" msgstr "le paramètre limit doit être un entier" msgid "limit param must be positive" msgstr "le paramètre limit doit être positif" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"manage_existing ne peut pas gérer un volume connecté à des hôtes. Veuillez " -"déconnecter ce volume des hôtes existants avant l'importation" - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing requiert une clé 'name' pour identifier un volume existant." @@ -10364,10 +9900,6 @@ msgstr "plusieurs ressources avec l'ID d'instantané %s ont été détectées" msgid "name cannot be None" msgstr "le nom ne peut pas être None" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path : outil NAVISECCLI introuvable %(path)s." - #, python-format msgid "no REPLY but %r" msgstr "aucune réponse (REPLY) mais %r" @@ -10431,18 +9963,6 @@ msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted peut uniquement correspondre à 'no', 'yes' ou 'only', et non %r" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "" -"L'unité de réplication (replication_device) doit être configurée sur le back-" -"end : %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "" -"L'unité de réplication (replication_device ) avec l'ID de back-end [%s] est " -"manquante." - #, python-format msgid "replication_failover failed. %s not found." msgstr "Echec de replication_failover. %s introuvable." @@ -10503,9 +10023,6 @@ msgstr "san_ip n'a pas été défini." msgid "san_ip must be set" msgstr "san_ip doit être défini" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip : Configuration de zone obligatoire. san_ip n'est pas défini." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10517,16 +10034,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() ne peut être appelé qu'une seule fois" -msgid "service not found" -msgstr "service introuvable" - -msgid "snapshot does not exist" -msgstr "L'instantané n'existe pas" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "ID d'instantané :%s introuvable" - #, python-format msgid "snapshot-%s" msgstr "instantané %s" @@ -10537,10 +10044,6 @@ msgstr "instantanés affectés" msgid "snapshots changed" msgstr "instantanés modifiés" -#, python-format -msgid "source vol id:%s not found" -msgstr "ID de volume source :%s introuvable" - #, python-format msgid "source volume id:%s is not replicated" msgstr "identificateur de volume source : %s non répliqué" @@ -10640,9 +10143,6 @@ msgstr "volume affecté" msgid "volume changed" msgstr "volume modifié" -msgid "volume does not exist" -msgstr "Le volume n'existe pas" - msgid "volume is already attached" msgstr "le volume est déjà connecté" @@ -10660,9 +10160,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "taille du volume %d non valide." -msgid "volume_type cannot be None" -msgstr "La valeur de volume_type ne peut pas être None" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10698,6 +10195,3 @@ msgid "" msgstr "" "La propriété zfssa_manage_policy doit être définie avec la valeur 'strict' " "ou 'loose'. Valeur actuelle : %s." - -msgid "{} is not a valid option." -msgstr "{} est une option non valide." diff --git a/cinder/locale/it/LC_MESSAGES/cinder-log-error.po b/cinder/locale/it/LC_MESSAGES/cinder-log-error.po index e0af7e8a4..128932e2b 100644 --- a/cinder/locale/it/LC_MESSAGES/cinder-log-error.po +++ b/cinder/locale/it/LC_MESSAGES/cinder-log-error.po @@ -9,9 +9,9 @@ # Remo Mattei , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -107,10 +107,6 @@ msgstr "" msgid "Array Serial Number must be in the file %(fileName)s." msgstr "Il numero di serie dell'array deve essere nel file %(fileName)s." -#, python-format -msgid "Array mismatch %(myid)s vs %(arid)s" -msgstr "Mancata corrispondenza dell'array %(myid)s vs %(arid)s" - #, python-format msgid "Array query failed - No response (%d)!" msgstr "Query dell'array non riuscita - Nessuna risposta (%d)." @@ -183,11 +179,6 @@ msgstr "Chiamata a Nova per creare l'istantanea non riuscita" msgid "Call to json.loads() raised an exception: %s." msgstr "La chiamata a json.loads() ha generato un'eccezione: %s." -#, python-format -msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." -msgstr "" -"Impossibile aggiungere la lun %(lun)s al gruppo di coerenza %(cg_name)s." - #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "" @@ -196,17 +187,6 @@ msgstr "" msgid "Can not open the recent url, login again." msgstr "Impossibile aprire l'url recente, rieseguire il login." -#, python-format -msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "" -"Impossibile inserire le nuove LUN %(luns)s nel gruppo di coerenza " -"%(cg_name)s." - -#, python-format -msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "" -"Impossibile rimuovere le LUN %(luns)s nel gruppo di coerenza %(cg_name)s." - #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "Impossibile trovare il volume da associare %(key)s, %(msg)s" @@ -402,10 +382,6 @@ msgstr "" msgid "Connection to %s failed and no secondary!" msgstr "Connessione a %s non riuscita e nessun elemento secondario." -#, python-format -msgid "Consistency group %s: create failed" -msgstr "Creazione del gruppo di coerenza %s: non riuscita" - #, python-format msgid "Controller GET failed (%d)" msgstr "Comando GET del controller non riuscito (%d)" @@ -449,22 +425,6 @@ msgstr "Impossibile eliminare il volume dell'immagine non riuscita %(id)s." msgid "Could not delete the image volume %(id)s." msgstr "Impossibile eliminare il volume dell'immagine %(id)s." -#, python-format -msgid "" -"Could not do delete of snapshot %s on filer, falling back to exec of \"rm\" " -"command." -msgstr "" -"Non è stato possibile eliminare l'istantanea %s sul filer, fallback " -"nell'esecuzione del comando \"rm\"." - -#, python-format -msgid "" -"Could not do delete of volume %s on filer, falling back to exec of \"rm\" " -"command." -msgstr "" -"Non è stato possibile eliminare il volume %s sul filer, fallback " -"nell'esecuzione del comando \"rm\"." - #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "Impossibile trovare un host per il gruppo di coerenza %(group_id)s." @@ -518,10 +478,6 @@ msgstr "" msgid "Could not validate device %s" msgstr "Impossibile convalidare il dispositivo %s" -#, python-format -msgid "Create cg snapshot %s failed." -msgstr "Creazione dell'istantanea cg %s non riuscita." - #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " @@ -530,10 +486,6 @@ msgstr "" "Creazione di clone_image_volume: %(volume_id)s per l'immagine %(image_id)s, " "non riuscita (Eccezione: %(except)s)" -#, python-format -msgid "Create consistency group %s failed." -msgstr "Creazione del gruppo di coerenza %s non riuscita." - #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." @@ -616,14 +568,6 @@ msgstr "" "Impossibile trovare il tipo di volume predefinito. Controllare la " "configurazione default_volume_type:" -#, python-format -msgid "Delete cgsnapshot %s failed." -msgstr "Eliminazione dell'istantanea %s non riuscita." - -#, python-format -msgid "Delete consistency group %s failed." -msgstr "Eliminazione del gruppo di coerenza %s non riuscita." - msgid "Delete consistency group failed to update usages." msgstr "" "L'eliminazione del gruppo di coerenza non è riuscita ad aggiornare gli " @@ -752,9 +696,6 @@ msgstr "Errore TypeError. %s" msgid "Error activating LV" msgstr "Errore durante l'attivazione di LV" -msgid "Error adding HBA to server" -msgstr "Errore durante l'aggiunta di HBA al server" - #, python-format msgid "Error changing Storage Profile for volume %(original)s to %(name)s" msgstr "" @@ -780,12 +721,6 @@ msgstr "" "Errore durante il tentativo di contattare il server glance '%(netloc)s' per " "'%(method)s', %(extra)s." -msgid "Error copying key." -msgstr "Errore durante la copia della chiave." - -msgid "Error creating Barbican client." -msgstr "Errore durante la creazione del client Barbican." - #, python-format msgid "Error creating QOS rule %s" msgstr "Errore durante la creazione della regola QOS %s" @@ -802,9 +737,6 @@ msgstr "Errore durante la creazione del record chap." msgid "Error creating cloned volume" msgstr "Errore durante la creazione del volume clonato" -msgid "Error creating key." -msgstr "Errore durante la creazione della chiave." - msgid "Error creating snapshot" msgstr "Errore durante la creazione dell'istantanea" @@ -818,9 +750,6 @@ msgstr "Errore durante la creazione del volume. Messaggio - %s." msgid "Error deactivating LV" msgstr "Errore durante la disattivazione di LV" -msgid "Error deleting key." -msgstr "Errore durante l'eliminazione della chiave." - msgid "Error deleting snapshot" msgstr "Errore durante l'eliminazione dell'istantanea" @@ -919,24 +848,9 @@ msgstr "Errore durante il richiamo di array, pool, SLO e carico di lavoro." msgid "Error getting chap record." msgstr "Errore durante il richiamo del record chap." -#, python-format -msgid "Error getting iSCSI target info from EVS %(evs)s." -msgstr "" -"Errore durante il richiamo delle informazioni sulla destinazione iSCSI da " -"EVS %(evs)s." - -msgid "Error getting key." -msgstr "Errore durante il richiamo della chiave." - msgid "Error getting name server info." msgstr "Errore durante il richiamo delle informazioni sul server dei nomi. " -msgid "Error getting secret data." -msgstr "Errore durante il richiamo dei dati segreti." - -msgid "Error getting secret metadata." -msgstr "Errore durante il richiamo dei metadati segreti." - msgid "Error getting show fcns database info." msgstr "Impossibile ottenere le informazioni sul database show fcns. " @@ -944,11 +858,6 @@ msgid "Error getting target pool name and array." msgstr "" "Errore durante il richiamo del nome del pool di destinazione e dell'array." -#, python-format -msgid "Error happened during storage pool querying, %s." -msgstr "" -"Si è verificato un errore durante la query del pool di archiviazione, %s." - #, python-format msgid "Error has occurred: %s" msgstr "Si è verificato un errore: %s" @@ -1111,14 +1020,6 @@ msgstr "" "Si è verificato un errore durante la creazione del volume: %(id)s " "dall'immagine: %(image_id)s." -#, python-format -msgid "Error on adding lun to consistency group. %s" -msgstr "Errore nell'aggiunta della lun al gruppo di coerenza. %s" - -#, python-format -msgid "Error on enable compression on lun %s." -msgstr "Errore nell'abilitazione della compressione nella lun %s" - #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" @@ -1198,9 +1099,6 @@ msgstr "" msgid "Error starting coordination backend." msgstr "Errore durante l'avvio del backend di coordinazione." -msgid "Error storing key." -msgstr "Errore durante l'archiviazione della chiave." - #, python-format msgid "Error trying to change %(opt)s from %(old)s to %(new)s" msgstr "Errore durante il tentativo di modificare %(opt)s da %(old)s a %(new)s" @@ -1780,10 +1678,6 @@ msgstr "" msgid "Failed to find %(s)s. Result %(r)s" msgstr "impossibile trovare %(s)s. Risultato %(r)s" -#, python-format -msgid "Failed to find available iSCSI targets for %s." -msgstr "Impossibile trovare le destinazioni iSCSI disponibili per %s." - msgid "Failed to get IQN!" msgstr "Impossibile ottenere IQN." @@ -1927,9 +1821,6 @@ msgstr "Impossibile eseguire il failover della replica" msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "Impossibile presentare il volume %(name)s (%(status)d)!" -msgid "Failed to query migration status of LUN." -msgstr "Impossibile eseguire la query dello stato di migrazione della LUN." - msgid "Failed to re-export volume, setting to ERROR." msgstr "Impossibile riesportare il volume, impostazione in ERRORE." @@ -2125,14 +2016,6 @@ msgstr "" "Impossibile aggiornare i metadati %(volume_id)s utilizzando i metadati " "dell'istantanea fornita %(snapshot_id)s." -#, python-format -msgid "" -"Failed to update initiator data for initiator %(initiator)s and backend " -"%(backend)s" -msgstr "" -"Impossibile aggiornare i dati dell'iniziatore per l'iniziatore %(initiator)s " -"e il backend %(backend)s" - #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "" @@ -2189,10 +2072,6 @@ msgstr "Impossibile scrivere in /etc/scst.conf." msgid "Failed to write persistence file: %(path)s." msgstr "Impossibile scrivere il file di persistenza: %(path)s." -#, python-format -msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" -msgstr "Impossibile aggiornare %(object_type)s %(object_id)s con %(update)s" - #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " @@ -2315,10 +2194,6 @@ msgstr "Errore di richiamo metodo. " msgid "Get replication status for volume failed." msgstr "Richiamo dello stato delle replica per il volume non riuscito. " -#, python-format -msgid "HDP not found: %s" -msgstr "HDP non trovato: %s" - #, python-format msgid "Host PUT failed (%s)." msgstr "Comando PUT dell'host non riuscito (%s)." @@ -2361,10 +2236,6 @@ msgstr "Nome host non valido %(host)s" msgid "Invalid replication target specified for failover" msgstr "Destinazione di replica non valida specificata per il failover" -#, python-format -msgid "Invalid value for %(key)s, value is %(value)s." -msgstr "Valore non valido per %(key)s, il valore è %(value)s." - msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" @@ -2442,12 +2313,6 @@ msgstr "" msgid "Lun delete for %s failed!" msgstr "Eliminazione della lun per %s non riuscita." -#, python-format -msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "" -"L'istantanea di eliminazione lun per il volume %(vol)s istantanea %(snap)s " -"non è riuscita." - msgid "Lun mapping returned null!" msgstr "L'associazione LUN ha restituito null." @@ -2486,10 +2351,6 @@ msgstr "" msgid "Message: %s" msgstr "Messaggio: %s" -#, python-format -msgid "Migration of LUN %s failed to complete." -msgstr "Impossibile completare la migrazione della LUN %s." - msgid "Model update failed." msgstr "Aggiornamento del modello non riuscito." @@ -2509,12 +2370,6 @@ msgstr "Errore di montaggio per %(share)s." msgid "Multiple replay profiles under name %s" msgstr "Più profili di risposta con nome %s" -#, python-format -msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" -msgstr "" -"La condivisione NFS %(share)s non ha alcuna voce di servizio: %(svc)s -> " -"%(hdp)s" - msgid "No CLI output for firmware version check" msgstr "Nessun output CLI per il controllo della versione firmware" @@ -2537,14 +2392,6 @@ msgstr "" "Nessuna azione richiesta. Il volume: %(volumeName)s è già parte della " "combinazione slo/carico di lavoro: %(targetCombination)s." -#, python-format -msgid "No configuration found for service: %s" -msgstr "Nessuna configurazione trovata per il servizio: %s" - -#, python-format -msgid "No configuration found for service: %s." -msgstr "Nessuna configurazione trovata per il servizio: %s." - #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " @@ -2881,9 +2728,6 @@ msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "Il connettore non contiene le informazioni necessarie: wwpns mancante" -msgid "The given extra_spec or valid_values is None." -msgstr "I parametri extra_spec o valid_values specificati sono None." - msgid "The list of iscsi_ip_addresses is empty" msgstr "L'elenco di iscsi_ip_addresses è vuoto" @@ -3126,13 +2970,6 @@ msgstr "" "Impossibile gestire il volume esistente. Driver del volume %s non " "inizializzato. " -msgid "Unable to manage_existing snapshot on a disabled service." -msgstr "" -"Impossibile gestire l'istantanea esistente su un servizio disabilitato." - -msgid "Unable to manage_existing volume on a disabled service." -msgstr "Impossibile gestire il volume esistente su un servizio disabilitato." - #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "Impossibile associare %(vol)s a %(srv)s" @@ -3552,9 +3389,6 @@ msgstr "" "eliminazione: %(vol_id)s non riuscito con stdout: %(out)s\n" " stderr: %(err)s" -msgid "delete_vol: provider location empty." -msgstr "delete_vol: ubicazione del fornitore vuota." - #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "ensure_export: Volume %s non trovato nella memoria." @@ -3569,10 +3403,6 @@ msgstr "errore durante l'aggiornamento delle statistiche del volume" msgid "horcm command timeout." msgstr "Timeout del comando horcm." -#, python-format -msgid "iSCSI portal not found for service: %s" -msgstr "Portale iSCSI non trovato per il servizio: %s" - msgid "import pywbem failed!! pywbem is necessary for this volume driver." msgstr "" "importazione di pywbem non riuscita. pywbem è necessario per questo driver " diff --git a/cinder/locale/it/LC_MESSAGES/cinder-log-info.po b/cinder/locale/it/LC_MESSAGES/cinder-log-info.po index 0d6849447..1e15fa243 100644 --- a/cinder/locale/it/LC_MESSAGES/cinder-log-info.po +++ b/cinder/locale/it/LC_MESSAGES/cinder-log-info.po @@ -8,13 +8,13 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev522\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-31 10:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-17 10:50+0000\n" +"PO-Revision-Date: 2016-03-17 10:49+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -66,10 +66,6 @@ msgstr "" "Dati risposta: %(res)s\n" "\n" -#, python-format -msgid "%(element)s: %(val)s" -msgstr "%(element)s: %(val)s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" @@ -107,10 +103,6 @@ msgstr "" "3PAR vlun per il volume '%(name)s' è stato eliminato ma l'host '%(host)s' " "non è stato eliminato perché: %(reason)s" -#, python-format -msgid "AUTH properties: %(authProps)s" -msgstr "Proprietà AUTH: %(authProps)s" - #, python-format msgid "AUTH properties: %s." msgstr "Proprietà AUTH: %s." @@ -187,30 +179,14 @@ msgstr "" msgid "Availability Zones retrieved successfully." msgstr "Zone di disponibilità richiamate correttamente." -#, python-format -msgid "Available services: %s" -msgstr "Servizi disponibili: %s" - -#, python-format -msgid "Available services: %s." -msgstr "Servizi disponibili: %s." - #, python-format msgid "Backend name is %s." msgstr "Il nome backend è %s." -#, python-format -msgid "Backend type: %s" -msgstr "Tipo di backend: %s" - #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "VM di backup: %(backing)s ridenominata %(new_name)s." -#, python-format -msgid "Backing consistency group snapshot %s available for deletion" -msgstr "Backup istantanea gruppo di coerenza %s disponibile per l'eliminazione" - msgid "Backing not available, no operation to be performed." msgstr "Backup non disponibile, nessuna operazione da eseguire." @@ -239,10 +215,6 @@ msgstr "" msgid "Backup service: %s." msgstr "Servizio di backup: %s." -#, python-format -msgid "Bandwidth limit is: %s." -msgstr "Il limite della larghezza di banda è: %s." - #, python-format msgid "Begin backup of volume %s." msgstr "Inizio backup del volume %s." @@ -276,10 +248,6 @@ msgstr "Versione CONCERTO: %s" msgid "Calling os-brick to detach ScaleIO volume." msgstr "Chiamata os-brick per scollegare il volume ScaleIO." -#, python-format -msgid "Cancelling Migration from LUN %s." -msgstr "Annullamento della migrazione da LUN %s." - #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " @@ -320,16 +288,6 @@ msgstr "" "Impossibile fornire la migrazione assistita del backend per il volume: %s in " "quanto il volume deriva da un backend diverso." -#, python-format -msgid "" -"Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu" -msgstr "" -"Statistiche di capacità per il pool SRP %(poolName)s sull'array " -"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu" - #, python-format msgid "Cgsnapshot %s: creating." msgstr "Crazione di cgsnapshot %s:." @@ -346,25 +304,12 @@ msgstr "Verifica del clone dell'immagine %s dalla condivisione glance." msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "Verifica dell'origine %(origin)s del volume %(volume)s." -#, python-format -msgid "" -"Cinder ISCSI volume with current path %(path)s is no longer being managed. " -"The new name is %(unm)s." -msgstr "" -"Il volume ISCSI Cinder con percorso corrente %(path)s non viene più gestito. " -"Il nuovo nome è %(unm)s." - #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "" "Il volume NFS Cinder con percorso corrente \"%(cr)s\" non è più gestito." -#, python-format -msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." -msgstr "" -"Il volume NFS Cinder con percorso corrente %(cr)s non viene più gestito. " - msgid "Cinder secure environment indicator file exists." msgstr "Il file indicatore dell'ambiente sicuro Cinder esiste." @@ -419,13 +364,6 @@ msgstr "" "%(agent-type)s perfpol-name=%(perfpol-name)s encryption=%(encryption)s " "cipher=%(cipher)s multi-initiator=%(multi-initiator)s" -#, python-format -msgid "" -"Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" -msgstr "" -"Clonazione con volume_name %(vname)s clone_name %(cname)s export_path " -"%(epath)s" - #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "API CloudByte eseguito correttamente per il comando [%s]." @@ -444,10 +382,6 @@ msgstr "Completamento-migrazione del volume completati correttamente." msgid "Completed: convert_to_base_volume: id=%s." msgstr "Completato: convert_to_base_volume: id=%s." -#, python-format -msgid "Configured pools: %s" -msgstr "Pool configurati: %s" - #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " @@ -469,22 +403,6 @@ msgstr "" msgid "Connector returning fcnsinfo-%s" msgstr "Il connettore restituisce fcnsinfo-%s" -#, python-format -msgid "Consistency group %(cg)s is created successfully." -msgstr "Gruppo di coerenza %(cg)s creato correttamente." - -#, python-format -msgid "Consistency group %s was deleted successfully." -msgstr "Gruppo di coerenza %s eliminato correttamente." - -#, python-format -msgid "Consistency group %s: created successfully" -msgstr "Gruppo di coerenza %s: creato correttamente" - -#, python-format -msgid "Consistency group %s: creating" -msgstr "Creazione del gruppo di coerenza %s:" - #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "Convertita immagine di %(sz).2f MB su %(mbps).2f MB/s" @@ -593,14 +511,6 @@ msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "" "Creazione volume %(volume_id)s da istantanea %(snapshot_id)s completata." -#, python-format -msgid "" -"Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " -"%(provisioning)s tiering: %(tiering)s " -msgstr "" -"Crea volume: %(volume)s Dimensione: %(size)s pool: %(pool)s provisioning: " -"%(provisioning)s livellamento: %(tiering)s " - #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " @@ -617,9 +527,6 @@ msgstr "Creazione backup completata, backup: %s." msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "Creazione backup avviata, backup: %(backup_id)s volume: %(volume_id)s." -msgid "Create consistency group completed successfully." -msgstr "Creazione del gruppo di coerenza completata correttamente." - #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "" @@ -643,10 +550,6 @@ msgstr "" msgid "Create snapshot from volume %s" msgstr "Crea istantanea dal volume %s" -#, python-format -msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" -msgstr "Crea istantanea: %(snapshot)s: volume: %(volume)s" - #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " @@ -874,10 +777,6 @@ msgstr "Elimina gruppo di coerenza: %(group)s." msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "Eliminazione istantanea %(snapshot_id)s completata." -#, python-format -msgid "Delete Snapshot: %(snapshot)s" -msgstr "Elimina istantanea: %(snapshot)s." - #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "Elimina istantanea: %(snapshot)s." @@ -1015,10 +914,6 @@ msgstr "Eliminazione dell'istantanea: %s" msgid "Deleting stale snapshot: %s" msgstr "Eliminazione dell'istantanea obsoleta: %s" -#, python-format -msgid "Deleting unneeded host %(host_name)r." -msgstr "Eliminazione dell'host non necessario %(host_name)r." - #, python-format msgid "Deleting volume %s " msgstr "Eliminazione del volume %s" @@ -1101,10 +996,6 @@ msgstr "Inizializzazione del driver completata correttamente." msgid "Driver post RPC initialization completed successfully." msgstr "Post-inizializzazione RPC del driver completata correttamente." -#, python-format -msgid "Driver stats: %s" -msgstr "Statistiche driver: %s" - #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " @@ -1132,10 +1023,6 @@ msgstr "Driver EQL: esecuzione di \"%s\"." msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "Modifica del volume %(vol)s con maschera %(mask)s" -#, python-format -msgid "Elapsed time for clear volume: %.2f sec" -msgstr "Tempo trascorso per la cancellazione del volume: %.2f sec" - msgid "Embedded mode detected." msgstr "Rilevata modalità integrata." @@ -1220,10 +1107,6 @@ msgstr "" "%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." -#, python-format -msgid "FC Initiators %(in)s of %(ins)s need registration" -msgstr "Gli iniziatori FC %(in)s di %(ins)s necessitano la registrazione." - msgid "Failed over to replication target successfully." msgstr "Failover della destinazione di replica eseguito correttamente." @@ -1255,10 +1138,6 @@ msgstr "Errore generato: %s" msgid "Fetched vCenter server version: %s" msgstr "Recuperata versione server vCenter: %s" -#, python-format -msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" -msgstr "Il filtro %(cls_name)s ha restituito %(obj_len)d host(s)" - #, python-format msgid "Filtered targets for SAN is: %(targets)s" msgstr "Le destinazioni filtrate per SAN sono: %(targets)s" @@ -1361,18 +1240,6 @@ msgstr "Nome zona breve dopo la formazione: %(zonename)s" msgid "Generating transfer record for volume %s" msgstr "Generazione del record di trasferimento per il volume %s" -#, python-format -msgid "Get FC targets %(tg)s to register initiator %(in)s." -msgstr "Ricevi destinazioni FC %(tg)s per registrare l'iniziatore %(in)s." - -#, python-format -msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." -msgstr "Ricevi destinazioni ISCSI %(tg)s per registrare l'iniziatore %(in)s." - -#, python-format -msgid "Get Volume response: %s" -msgstr "Ricevi risposta del volume: %s" - msgid "Get all snapshots completed successfully." msgstr "Richiamo di tutte le istantanee completato correttamente." @@ -1383,10 +1250,6 @@ msgstr "Richiamo di tutti i volumi completato correttamente." msgid "Get domain by name response: %s" msgstr "Ottieni dominio dalla risposta del nome: %s" -#, python-format -msgid "Get service: %(lbl)s->%(svc)s" -msgstr "Richiama servizio: %(lbl)s->%(svc)s" - msgid "Get snapshot metadata completed successfully." msgstr "Richiamo dei metadati dell'istantanea completato correttamente." @@ -1418,10 +1281,6 @@ msgstr "Richiamo delle informazioni sul volume per vol_name=%s" msgid "Going to perform request again %s with valid token." msgstr "Eseguire di nuovo la richiesta %s con un token valido." -#, python-format -msgid "HDP list: %s" -msgstr "Elenco HDP: %s" - #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" @@ -1438,17 +1297,9 @@ msgstr "Generata eccezione HTTP: %s" msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "ID hypermetro: %(metro_id)s. ID lun remota: %(remote_lun_id)s." -#, python-format -msgid "ISCSI properties: %(properties)s" -msgstr "Proprietà ISCSI: %(properties)s" - msgid "ISCSI provider_location not stored, using discovery." msgstr "provider_location ISCSI non archiviato, utilizzare il rilevamento." -#, python-format -msgid "ISCSI volume is: %(volume)s" -msgstr "Il volume ISCSI è: %(volume)s" - #, python-format msgid "Ignored LU creation error \"%s\" while ensuring export." msgstr "" @@ -1562,14 +1413,6 @@ msgstr "Il nome del gruppo iniziatore è %(grp)s per l'iniziatore %(iname)s" msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s estesa a %(size)s GB." -#, python-format -msgid "LUN %(lun)s extended to %(size)s GB." -msgstr "LUN %(lun)s estesa a %(size)s GB." - -#, python-format -msgid "LUN %(lun)s of size %(sz)s MB is created." -msgstr "La LUN %(lun)s di dimensione %(sz)s MB viene creata." - #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" @@ -1704,14 +1547,6 @@ msgstr "" "È necessario rimuovere la zona FC, creazione dell'associazione di " "destinazione dell'iniziatore." -msgid "" -"Neither security file nor plain text credentials are specified. Security " -"file under home directory will be used for authentication if present." -msgstr "" -"Non sono specificati né il file di sicurezza né le credenziali del testo " -"normale. Il file di sicurezza nella directory home verrà utilizzato per " -"l'autenticazione, se presente." - #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " @@ -1813,10 +1648,6 @@ msgstr "" msgid "Params for add volume request: %s." msgstr "Parametri per la richiesta di aggiunta del volume: %s." -#, python-format -msgid "Parse_loc: %s" -msgstr "Parse_loc: %s" - #, python-format msgid "Performing post clone for %s" msgstr "Esecuzione del post clone per %s" @@ -1825,10 +1656,6 @@ msgstr "Esecuzione del post clone per %s" msgid "Performing secure delete on volume: %s" msgstr "Esecuzione di secure delete nel volume: %s" -msgid "Plain text credentials are being used for authentication" -msgstr "" -"Per l'autenticazione vengono utilizzare le credenziali del testo normale" - #, python-format msgid "Pool id is %s." msgstr "L'ID pool è %s." @@ -2084,14 +1911,6 @@ msgstr "Ripresa eliminazione del volume completata correttamente." msgid "Resuming delete on backup: %s." msgstr "Ripresa dell'eliminazione al backup: %s." -#, python-format -msgid "Retrieving secret for service: %s." -msgstr "Richiamo del segreto per il servizio: %s." - -#, python-format -msgid "Retrieving target for service: %s." -msgstr "Richiamo della destinazione per il servizio: %s." - #, python-format msgid "Return FC info is: %s." msgstr "Le informazioni FC restituite sono: %s." @@ -2167,24 +1986,9 @@ msgstr "Richiesta di riscrittura del volume eseguita correttamente. " msgid "Retype was to same Storage Profile." msgstr "La riscrittura è stata eseguita nello stesso profilo di archiviazione." -#, python-format -msgid "Review shares: %s" -msgstr "Rivedi condivisioni: %s" - msgid "Roll detaching of volume completed successfully." msgstr "Esecuzione dello scollegamento del volume completata correttamente." -#, python-format -msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" -msgstr "" -"Esecuzione dell'ultimo lavoro ssc cluster per %(server)s e vserver %(vs)s" - -#, python-format -msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" -msgstr "" -"Esecuzione del lavoro di aggiornamento ssc obsoleto per %(server)s e vserver " -"%(vs)s" - #, python-format msgid "Running with vmemclient version: %s" msgstr "Esecuzione con versione vmemclient: %s" @@ -2193,10 +1997,6 @@ msgstr "Esecuzione con versione vmemclient: %s" msgid "SC server created %s" msgstr "Server SC creato %s" -#, python-format -msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" -msgstr "Salva informazioni sul servizio per %(svc)s -> %(hdp)s, %(path)s" - #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " @@ -2286,10 +2086,6 @@ msgstr "" msgid "Set newly managed Cinder volume name to %(name)s." msgstr "Impostare il nome volume Cinder appena gestito su %(name)s." -#, python-format -msgid "Set tgt CHAP secret for service: %s." -msgstr "Impostare il segreto tgt CHAP per il servizio: %s." - #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "Impostazione dell'host %(host)s su %(state)s." @@ -2325,12 +2121,6 @@ msgstr "Ignorare l'eliminazione del volume %s perché non esiste." msgid "Skipping ensure_export. Found existing iSCSI target." msgstr "Ignorare ensure_export. Trovata destinazione iSCSI esistente." -#, python-format -msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" -msgstr "" -"ensure_export viene ignorato. Nessun provisioning di iscsi_target per il " -"volume: %s" - #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current Tenant." @@ -2407,10 +2197,6 @@ msgstr "L'istantanea %s ha cloni dipendenti, verrà eliminata successivamente." msgid "Snapshot %s not found" msgstr "Istantanea %s non trovata" -#, python-format -msgid "Snapshot %s was deleted successfully." -msgstr "Istantanea %s eliminata correttamente." - #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "Volume virtuale '%(ref)s' ridenominato in '%(new)s'." @@ -2488,10 +2274,6 @@ msgstr "Avvio del driver del volume %(driver_name)s (%(version)s)" msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "Gruppo di archiviazione %(storageGroupName)s eliminato correttamente." -#, python-format -msgid "Storage Group %s was empty." -msgstr "Il gruppo di archiviazione %s è vuoto." - #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "Gruppo di archiviazione non associato alla politica. L'eccezione è %s." @@ -2512,17 +2294,6 @@ msgstr "Login corretto da parte dell'utente %s" msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "Aggiunto correttamente %(volumeName)s a %(sgGroupName)s." -#, python-format -msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" -msgstr "Completato correttamente lavoro ssc per %(server)s e vserver %(vs)s" - -#, python-format -msgid "" -"Successfully completed stale refresh job for %(server)s and vserver %(vs)s" -msgstr "" -"Completato correttamente lavoro di aggiornamento obsoleto per %(server)s e " -"vserver %(vs)s" - #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "Copiato correttamente disco su: %(src)s in: %(dest)s." @@ -2682,10 +2453,6 @@ msgstr "" "Il sistema con indirizzi controller [%s] non è registrato con il servizio " "web." -#, python-format -msgid "Target is %(map)s! Targetlist = %(tgtl)s." -msgstr "La destinazione è %(map)s! Targetlist = %(tgtl)s." - #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "" @@ -2800,10 +2567,6 @@ msgstr "" "Non esiste alcun punto di istantanea per il volume con istantanea: %(snap)s. " "Non viene creato alcun backup per il volume: %(vol)s." -#, python-format -msgid "Toggle san_ip from %(current)s to %(new)s." -msgstr "Passare a san_ip da %(current)s a %(new)s." - msgid "Token is invalid, going to re-login and get a new one." msgstr "Il token non è valido, rieseguire il login e ottenere un nuovo token." @@ -3018,18 +2781,6 @@ msgstr "Utilizzo della versione vmware_host_version sovrascritta da config: %s" msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "Utilizzo del pool %(pool)s anziché %(cpg)s" -#, python-format -msgid "Using security file in %s for authentication" -msgstr "Viene utilizzato il file di sicurezza %s per l'autenticazione" - -#, python-format -msgid "Using service label: %s" -msgstr "Utilizzo dell'etichetta di servizio: %s" - -#, python-format -msgid "Using target label: %s." -msgstr "Utilizzo dell'etichetta di destinazione: %s." - msgid "VF context is changed in the session." msgstr "Il contesto VF viene modificato nella sessione." @@ -3212,11 +2963,6 @@ msgstr "" "Il volume con un determinato riferimento %s non deve essere ridenominato " "durante l'operazione di gestione." -#, python-format -msgid "Volume with the name %s wasn't found, can't unmanage" -msgstr "" -"Impossibile trovare il volume con nome %s, impossibile annullare la gestione" - #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " @@ -3264,10 +3010,6 @@ msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" msgstr "" "La zona esiste in modalità I-T. Ignorare la creazione zone per %(zonename)s" -#, python-format -msgid "Zone map to add: %(zonemap)s" -msgstr "Associazione zone da aggiungere: %(zonemap)s" - #, python-format msgid "Zone map to add: %s" msgstr "Associazione zone da aggiungere: %s" @@ -3326,10 +3068,6 @@ msgstr "" "_delete_volume_setting, nome volume:%(volumename)s, volume non trovato su " "ETERNUS. " -#, python-format -msgid "_get_service_target hdp: %s." -msgstr "_get_service_target hdp: %s." - #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "_get_tgt_ip_from_portgroup: Richiamare ip: %s." @@ -3374,10 +3112,6 @@ msgstr "cgsnapshot %s: eliminata correttamente" msgid "cgsnapshot %s: deleting" msgstr "Eliminazione di cgsnapshot %s:." -#, python-format -msgid "config[services]: %s." -msgstr "config[services]: %s." - #, python-format msgid "create_cloned_volume, info: %s, Exit method." msgstr "create_cloned_volume, info: %s, Metodo di uscita." @@ -3422,10 +3156,6 @@ msgstr "create_volume, info: %s, Metodo di uscita." msgid "create_volume, volume id: %s, Enter method." msgstr "create_volume, volume id: %s, Metodo di errore." -#, python-format -msgid "create_volume: create_lu returns %s" -msgstr "create_volume: create_lu restituisce %s" - #, python-format msgid "create_volume_from_snapshot, info: %s, Exit method." msgstr "create_volume_from_snapshot, info: %s, Metodo di uscita." @@ -3446,14 +3176,6 @@ msgstr "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." -#, python-format -msgid "del_iscsi_conn: hlun not found %s." -msgstr "del_iscsi_conn: hlun non trovato %s." - -#, python-format -msgid "delete lun loc %s" -msgstr "elimina loc lun %s" - #, python-format msgid "delete_snapshot, delete: %s, Exit method." msgstr "delete_snapshot, delete: %s, Metodo di uscita." @@ -3479,10 +3201,6 @@ msgstr "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." -#, python-format -msgid "do_setup: %s" -msgstr "do_setup: %s" - #, python-format msgid "extend_volume, used pool name: %s, Exit method." msgstr "extend_volume, nome pool utilizzato: %s, Metodo di uscita." @@ -3497,26 +3215,10 @@ msgstr "" "La capacità disponibile di pool %(pool)s è: %(free)s, capacità totale: " "%(total)s." -#, python-format -msgid "iSCSI Initiators %(in)s of %(ins)s need registration." -msgstr "Gli iniziatori iSCSI %(in)s di %(ins)s necessitano la registrazione." - -#, python-format -msgid "iSCSI portal found for service: %s" -msgstr "Portale iSCSI trovato per il servizio: %s" - #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "igroup %(grp)s trovato per l'iniziatore %(iname)s" -#, python-format -msgid "initialize volume %(vol)s connector %(conn)s" -msgstr "inizializza volume %(vol)s connettore %(conn)s" - -#, python-format -msgid "initialize_ connection: %(vol)s:%(initiator)s" -msgstr "initialize_ connection: %(vol)s:%(initiator)s" - #, python-format msgid "initialize_connection success. Return data: %s." msgstr "initialize_connection riuscita. Dati restituiti: %s." @@ -3571,10 +3273,6 @@ msgid "" msgstr "" "initialize_connection_fc, iniziatore: %(wwpns)s, nome volume: %(volume)s." -#, python-format -msgid "initiate: connection %s" -msgstr "inizializza: connessione %s" - msgid "initiator has no password while using chap,adding it" msgstr "" "l'iniziatore non presenta alcuna password durante l'utilizzo di chap, la " @@ -3584,25 +3282,6 @@ msgstr "" msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." msgstr "nome iniziatore: %(initiator_name)s, ID LUN: %(lun_id)s." -msgid "" -"initiator_auto_registration: False. Initiator auto registration is not " -"enabled. Please register initiator manually." -msgstr "" -"initiator_auto_registration: False. La registrazione automatica " -"dell'iniziatore non è abilitata. Registrare l'iniziatore manualmente." - -#, python-format -msgid "iops limit is: %s." -msgstr "Il limite iops è: %s." - -#, python-format -msgid "iscsi_initiators: %s" -msgstr "iscsi_initiators: %s" - -#, python-format -msgid "location is: %(location)s" -msgstr "location è: %(location)s" - #, python-format msgid "" "manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has been " @@ -3623,10 +3302,6 @@ msgstr "" msgid "new cloned volume: %s" msgstr "nuovo volume clonato: %s" -#, python-format -msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" -msgstr "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" - #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "open_connection a %(ssn)s su %(ip)s" @@ -3645,14 +3320,6 @@ msgstr "" "impostazione del volume %s su error_restoring (era in fase di ripristino del " "backup)." -#, python-format -msgid "share: %(share)s -> %(info)s" -msgstr "condivisione: %(share)s -> %(info)s" - -#, python-format -msgid "share: %s incorrect entry" -msgstr "condivisione: %s voce non corretta" - #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "smis_do_iscsi_discovery è: %(out)s." @@ -3665,22 +3332,10 @@ msgstr "l'istantanea %s non esiste" msgid "source volume for cloning: %s" msgstr "volume di origine per la clonazione: %s" -#, python-format -msgid "stats: stats: %s." -msgstr "stats: stats: %s." - #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "stop_snapshot: nome istantanea: %(snapshot)s, nome volume: %(volume)s." -#, python-format -msgid "targetlist: %s" -msgstr "targetlist: %s" - -#, python-format -msgid "terminate: connection %s" -msgstr "termina: connessione %s" - #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "terminate_connection volume: %(volume)s, connettore: %(con)s" diff --git a/cinder/locale/it/LC_MESSAGES/cinder.po b/cinder/locale/it/LC_MESSAGES/cinder.po index e331760fb..65f2528e5 100644 --- a/cinder/locale/it/LC_MESSAGES/cinder.po +++ b/cinder/locale/it/LC_MESSAGES/cinder.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -263,9 +263,6 @@ msgstr "È necessario specificare 'status'." msgid "'volume_id' must be specified" msgstr "È necessario specificare 'volume_id'" -msgid "'{}' object has no attribute '{}'" -msgstr "L'oggetto '{}'non ha l'attributo '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -430,23 +427,9 @@ msgstr "" "La richiesta di una versione API deve essere confrontata a un oggetto " "VersionedMethod." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "Si è verificato un errore in SheepdogDriver. (Motivo: %(reason)s) " - msgid "An error has occurred during backup operation" msgstr "Si è verificato un errore durante l'operazione di backup" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "" -"Si è verificato un errore durante il tentativo di modificare l'istantanea " -"\"%s\". " - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Si è verificato un errore durante la ricerca del volume \"%s\". " - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -550,18 +533,12 @@ msgstr "Dettagli gruppo aut [%s] non trovati nella memoria CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "Dettagli utente aut non trovati nella memoria CloudByte." -msgid "Authentication error" -msgstr "Errore di autenticazione" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Autenticazione non riuscita, verificare le credenziali dello switch, codice " "di errore %s." -msgid "Authorization error" -msgstr "Errore di autorizzazione" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "L'area di disponibilità '%(s_az)s' non è valida." @@ -580,9 +557,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "Il backend non esiste (%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "Failover del backend già eseguito. Impossibile eseguire il failback." - #, python-format msgid "Backend reports: %(message)s" msgstr "Report Backend: %(message)s" @@ -593,9 +567,6 @@ msgstr "Report Backend: l'elemento esiste già" msgid "Backend reports: item not found" msgstr "Report Backend: elemento non trovato" -msgid "Backend server not NaServer." -msgstr "Il server di backend non è NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Raggiunto timeout di tentativi del servizio backend: %(timeout)s sec" @@ -699,12 +670,6 @@ msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Formato progetto non corretto: il formato del progetto non è corretto (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Richiesta non valida inviata al cluster Datera: Argomenti non validi: " -"%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Risposta errata dall'API Datera" @@ -721,18 +686,6 @@ msgstr "Valore binario" msgid "Blank components" msgstr "Componenti vuoti" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Schema di autenticazione API Blockbridge (token o password)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Password API Blockbridge (per schema aut 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Token API Blockbridge (per schema aut 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Utente API Blockbridge (per schema aut 'password')" - msgid "Blockbridge api host not configured" msgstr "Host api Blockbridge non configurato " @@ -848,9 +801,6 @@ msgstr "Impossibile convertire %s in un numero intero." msgid "Can't access 'scality_sofs_config': %s" msgstr "Impossibile accedere a 'scality_sofs_config': %s" -msgid "Can't attach snapshot." -msgstr "Impossibile collegare l'istantanea." - msgid "Can't decode backup record." msgstr "Impossibile decodificare il record di backup. " @@ -967,10 +917,6 @@ msgstr "" "Impossibile importare l'istantanea %s in Cinder. Lo stato dell'istantanea " "non è normal o lo stato di esecuzione non è online." -#, python-format -msgid "Can't open config file: %s" -msgstr "Impossibile aprire il file di configurazione: %s" - msgid "Can't parse backup record." msgstr "Impossibile analizzare il record di backup. " @@ -1046,13 +992,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "Impossibile effettuare la connessione al server ECOM." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"Impossibile creare il clone della dimensione %(vol_size)s dal volume della " -"dimensione %(src_vol_size)s" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1102,13 +1041,6 @@ msgstr "" "Impossibile creare o trovare un gruppo di archiviazione con nome " "%(sgGroupName)s." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"Impossibile creare il volume della dimensione %(vol_size)s dall'istantanea " -"della dimensione %(snap_size)s" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Impossibile creare il volume di dimensione %s: non multiplo di 8GB. " @@ -1438,10 +1370,6 @@ msgstr "La porta RPC Coho non è configurata" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Il comando %(cmd)s è bloccato nella CLI ed è stato annullato " -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "Timeout CommandLineHelper._wait_for_a_condition: %s " - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s timeout." @@ -1601,19 +1529,10 @@ msgstr "Impossibile trovare l'id cluster GPFS: %s." msgid "Could not find GPFS file system device: %s." msgstr "Impossibile trovare il dispositivo di file system GPFS: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"Impossibile trovare un host per il volume %(volume_id)s con tipo %(type_id)s." - #, python-format msgid "Could not find config at %(path)s" msgstr "Impossibile trovare la configurazione in %(path)s" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "Impossibile trovare l'esportazione iSCSI per il volume %(volumeName)s." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Impossibile trovare l'esportazione iSCSI per il volume %s" @@ -1701,17 +1620,6 @@ msgstr "" "Creazione del backup interrotta, lo stato del volume previsto è " "%(expected_status)s ma è stato ricevuto %(actual_status)s." -msgid "Create consistency group failed." -msgstr "Creazione del gruppo di coerenza non riuscita." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"La creazione di volumi codificati con tipo %(type)s dall'immagine %(image)s " -"non è supportata." - msgid "Create export for volume failed." msgstr "Creazione esportazione per il volume non riuscita." @@ -1801,12 +1709,6 @@ msgstr "" "L'host attualmente associato per il volume %(vol)s è in un gruppo host non " "supportato con%(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "OBSOLETO: distribuire v1 dell'API Cinder." - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "OBSOLETO: distribuire v2 dell'API Cinder." - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1888,15 +1790,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "Impossibile estendere le lun dedup" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"Deduplication Enabler non è installato. Impossibile creare un volume " -"deduplicato" - -msgid "Default pool name if unspecified." -msgstr "Nome pool predefinito se non specificato. " - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1910,12 +1803,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "Impossibile trovare il tipo di volume predefinito." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Definisce la serie di pool esposti e le relative stringhe di query backend " -"associate " - msgid "Delete LUNcopy error." msgstr "Errore di eliminazione LUNcopy. " @@ -2005,9 +1892,6 @@ msgstr "" "Dispositivo di replica dell'errore di configurazione del driver Dell Cinder " "%s non trovato" -msgid "Deploy v3 of the Cinder API." -msgstr "Distribuire v3 dell'API Cinder." - msgid "Describe-resource is admin only functionality" msgstr "Describe-resource è una funzionalità solo di admin" @@ -2061,13 +1945,6 @@ msgid "Driver initialize connection failed (error: %(err)s)." msgstr "" "Inizializzazione connessione del driver non riuscita (errore: %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"Il driver non è in grado di eseguire la riscrittura perché il volume (LUN " -"{}) presenta un'istantanea per cui è proibita la migrazione." - msgid "Driver must implement initialize_connection" msgstr "Il driver deve implementare initialize_connection" @@ -2586,12 +2463,6 @@ msgstr "" "Errore durante l'associazione del gruppo storage: %(storageGroupName)s. alla " "politica FAST: %(fastPolicyName)s con descrizione dell'errore: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "" -"Errore durante il col legamento del volume %s. Potrebbe esser stato " -"raggiunto il limite di destinazione. " - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2763,10 +2634,6 @@ msgstr "" msgid "Error managing volume: %s." msgstr "Errore durante la gestione del volume: %s. " -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Errore durante l'associazione del volume %(vol)s. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2811,17 +2678,9 @@ msgid "Error occurred when updating consistency group %s." msgstr "" "Si è verificato un errore durante l'aggiornamento del gruppo di coerenza %s." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Errore durante l'analisi del file di configurazione: %s" - msgid "Error promoting secondary volume to primary" msgstr "Errore quando si promuove il volume secondario in primario" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Errore durante la rimozione del volume %(vol)s. %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Errore durante la ridenominazione del volume %(vol)s: %(err)s." @@ -3116,12 +2975,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Estensione volume non implementata" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"FAST VP Enabler non è installato. Impossibile impostare la politica a " -"livelli per il volume" - msgid "FAST is not supported on this array." msgstr "FAST non supportato su questo array." @@ -3189,10 +3042,6 @@ msgstr "" "Impossibile acquisire un blocco risorsa. (serial: %(serial)s, inst: " "%(inst)s, ret: %(ret)s, stderr: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "Impossibile aggiungere %(vol)s in %(sg)s dopo %(retries)s tentativi." - msgid "Failed to add the logical device." msgstr "Impossibile aggiungere l'unità logica." @@ -3278,9 +3127,6 @@ msgstr "Impossibile creare il GC %(cgName)s dall'istantanea %(cgSnapshot)s." msgid "Failed to create IG, %s" msgstr "Impossibile creare IG, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "Impossibile creare il volume dell'immagine di SolidFire" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Impossibile creare il gruppo volume: %(vg_name)s" @@ -3394,9 +3240,6 @@ msgstr "Impossibile creare il flusso del volume del gestore scheduler" msgid "Failed to create snapshot %s" msgstr "Impossibile creare istantanea %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "Impossibile creare l'istantanea poiché non è specificato alcun ID LUN" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Impossibile creare l'istantanea per cg: %(cgName)s." @@ -3547,9 +3390,6 @@ msgstr "" "Impossibile garantire l'area risorsa istantanea, impossibile individuare il " "volume per l'ID %s " -msgid "Failed to establish SSC connection." -msgstr "Impossibile stabilire la connessione SSC. " - msgid "Failed to establish connection with Coho cluster" msgstr "Impossibile stabilire la connessione con il cluster Coho" @@ -3604,11 +3444,6 @@ msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "Impossibile trovare il gruppo di iniziatori iSCSI contenente %(initiator)s." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "" -"Impossibile trovare il pool di archiviazione per il volume di origine %s." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Non è stato possibile ottenere i dettagli dell'account CloudByte [%s]." @@ -3837,28 +3672,6 @@ msgstr "" "Impossibile gestire il volume esistente %(name)s, poiché la dimensione " "%(size)s riportata non è un numero a virgola mobile." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"Impossibile gestire il volume esistente poiché il pool del tipo di volume " -"scelto non corrisponde alla condivisione NFS trasmessa nel riferimento " -"volume. " - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"Impossibile gestire il volume esistente poiché il pool del tipo di volume " -"scelto non corrisponde al file system trasmesso nel riferimento volume. " - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"Impossibile gestire il volume esistente poiché il pool del tipo di volume " -"scelto non corrisponde al pool dell'host. " - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4213,9 +4026,6 @@ msgstr "Errore di rilevamento id lun host." msgid "Find lun group from mapping view error." msgstr "Errore di rilevamento gruppo lun dalla vista associazione." -msgid "Find lun number error." -msgstr "Errore di rilevamento numero lun." - msgid "Find mapping view error." msgstr "Errore di rilevamento vista associazione." @@ -4603,9 +4413,6 @@ msgstr "" msgid "HBSD error occurs." msgstr "Si è verificato un errore HBSD." -msgid "HNAS has disconnected SSC" -msgstr "HNAS ha disconnesso SSC" - msgid "HPELeftHand url not found" msgstr "URL HPELeftHand non trovato" @@ -4645,14 +4452,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "L'host %s non ha iniziatori FC" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "L'host %s non ha un iniziatore iSCSI" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "Impossibile trovare l'host '%s'." - #, python-format msgid "Host group with name %s not found" msgstr "Gruppo host con nome %s non trovato" @@ -4667,9 +4466,6 @@ msgstr "L'host NON è bloccato." msgid "Host is already Frozen." msgstr "L'host è già bloccato." -msgid "Host not found" -msgstr "Host non trovato" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Host non trovato. Impossibile rimuovere %(service)s su %(host)s." @@ -4700,9 +4496,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Indirizzo IP o nome host dell'API Blockbridge. " - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4801,12 +4594,6 @@ msgstr "" "Eccezione CLI Infortrend: %(err)s Param: %(param)s (Codice di ritorno: " "%(rc)s) (Output: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Livello iniziale: {}, politica: {} non valido." - -msgid "Input type {} is not supported." -msgstr "Tipo di input {} non supportato." - msgid "Input volumes or snapshots are invalid." msgstr "Istantanee o volumi di input non validi." @@ -4823,15 +4610,6 @@ msgstr "Spazio libero disponibile insufficiente per estendere il volume." msgid "Insufficient privileges" msgstr "Privilegi insufficienti" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" -"Valore di intervallo (in secondi) tra i tentativi di connessione al cluster " -"ceph. " - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "Porte %(protocol)s %(port)s non valide specificate per io_port_list." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Dominio 3PAR non valido: %(err)s" @@ -4881,10 +4659,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Destinazione di replica non valida: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Tipo di autenticazione VNX non valido: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4931,14 +4705,6 @@ msgstr "Chiave di autenticazione non valida: %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "Backup non valido: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"URL API Barbican non valido: è richiesta la versione, ad esempio 'http[s]://" -"|[:port]/', l'url specificato è: %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "Trovati dettagli utente chap non validi nella memoria CloudByte." @@ -5115,12 +4881,6 @@ msgstr "Specificato un pool dell'archivio %s non valido." msgid "Invalid storage pool is configured." msgstr "Configurato pool di archiviazione non valido." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "" -"Specificata modalità di sincronizzazione non valida, la modalità consentita " -"è %s." - msgid "Invalid transport type." msgstr "Tipo di trasporto non valido." @@ -5128,14 +4888,6 @@ msgstr "Tipo di trasporto non valido." msgid "Invalid update setting: '%s'" msgstr "Impostazione di aggiornamento non valida: '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"URL non valido: deve essere nel formato 'http[s]://|[:port]/" -"', l'url specificato è: %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Valore non valido '%s' per force." @@ -5283,9 +5035,6 @@ msgstr "" "Esecuzione del failover non riuscita perché la replica non è configurata " "correttamente." -msgid "Item not found" -msgstr "Elemento non trovato" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" @@ -5320,9 +5069,6 @@ msgstr "LUN non esiste per il volume: %s" msgid "LUN export failed!" msgstr "Esportazione LUN non riuscita. " -msgid "LUN id({}) is not valid." -msgstr "ID LUN ({}) non valido." - msgid "LUN map overflow on every channel." msgstr "Eccedenza mappa LUN su ogni canale. " @@ -5330,9 +5076,6 @@ msgstr "Eccedenza mappa LUN su ogni canale. " msgid "LUN not found with given ref %s." msgstr "LUN non trovata con il riferimento fornito %s." -msgid "LUN number ({}) is not an integer." -msgstr "Il numero LUN ({}) non è un numero intero." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "Il numero LUN è fuori dai limiti sul canale id: %(ch_id)s. " @@ -5523,33 +5266,9 @@ msgstr "Il backup dei metadati esiste già per questo volume" msgid "Metadata backup object '%s' already exists" msgstr "L'oggetto di backup di metadati '%s' esiste già" -msgid "Metadata item was not found" -msgstr "L'elemento metadati non è stato trovato" - -msgid "Metadata item was not found." -msgstr "L'elemento metadati non è stato trovato." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "La chiave della proprietà dei metadati %s supera i 255 caratteri" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "" -"Il valore della chiave della proprietà dei metadati %s supera i 255 caratteri" - -msgid "Metadata property key blank" -msgstr "La chiave della proprietà dei metadati è vuota" - msgid "Metadata property key blank." msgstr "Chiave della proprietà dei metadati vuota." -msgid "Metadata property key greater than 255 characters." -msgstr "La chiave della proprietà dei metadati contiene più di 255 caratteri." - -msgid "Metadata property value greater than 255 characters." -msgstr "Valore della proprietà dei metadati maggiore di 255 caratteri." - msgid "Metadata restore failed due to incompatible version" msgstr "" "Il ripristino dei metadati non è riuscito a causa di una versione non " @@ -5559,23 +5278,6 @@ msgid "Metadata restore failed due to incompatible version." msgstr "" "Ripristino dei metadati non riuscito a causa di una versione non compatibile." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "Migrazione volume %(src)s non riuscita. " - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"Migrazione volume non riuscita tra vol origine %(src)s e vol destinazione " -"%(dst)s. " - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "La migrazione di LUN %s è stata arrestata o è in errore." - -msgid "MirrorView/S enabler is not installed." -msgstr "L'abilitatore MirrorView/S non è installato." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5605,9 +5307,6 @@ msgid "Missing required element 'consistencygroup' in request body." msgstr "" "Manca l'elemento 'consistencygroup' richiesto nel corpo della richiesta. " -msgid "Missing required element 'host' in request body." -msgstr "Manca l'elemento 'host' richiesto nel corpo della richiesta." - msgid "Missing required element quota_class_set in request body." msgstr "" "Elemento quota_class_set obbligatorio mancante nel corpo della richiesta." @@ -5734,9 +5433,6 @@ msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "È necessario specificare i pool di archiviazione. Opzione: sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "È necessario fornire un valore positivo per l'età" - msgid "Must supply a positive, non-zero value for age" msgstr "È necessario fornire un valore positivo, diverso da zero per l'età" @@ -6142,9 +5838,6 @@ msgstr "" "Ricevuta risposta null durante l'interrogazione per il lavoro basato su " "[%(operation)s][%(job)s] nella memoria CloudByte." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "Numero di tentativi se la connessione al cluster ceph non riesce. " - msgid "Object Count" msgstr "Numero oggetti" @@ -6205,16 +5898,10 @@ msgstr "L'opzione gpfs_images_share_mode non è impostata correttamente." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "L'opzione gpfs_mount_point_base non è impostata correttamente." -msgid "Option map (cls._map) is not defined." -msgstr "Mappa delle opzioni (cls._map) non definita." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "%(res)s %(prop)s di origine deve essere uno dei valori '%(vals)s'" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "Sovrascrivere porta HTTPS per connettersi al server API Blockbridge. " - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -6474,15 +6161,6 @@ msgstr "La risposta del server RPC è incompleta" msgid "Raid did not have MCS Channel." msgstr "Il Raid non ha il canale MCS. " -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Raggiunto il limite impostato dall'opzione di configurazione " -"max_luns_per_storage_group. L'operazione per aggiungere %(vol)s al gruppo di " -"archiviazione %(sg)s è rifiutata." - #, python-format msgid "Received error string: %s" msgstr "Ricevuta stringa di errore: %s" @@ -6666,9 +6344,6 @@ msgstr "Configurazione richiesta non trovata" msgid "Required flag %s is not set" msgstr "L'indicatore richiesto %s non è impostato" -msgid "Requires an NaServer instance." -msgstr "Richiede un'istanza NaServer." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6867,10 +6542,6 @@ msgstr "Impossibile trovare il servizio %(service_id)s sull'host %(host)s." msgid "Service %(service_id)s could not be found." msgstr "Impossibile trovare il servizio %(service_id)s." -#, python-format -msgid "Service %s not found." -msgstr "Il servizio %s non è stato trovato." - msgid "Service is too old to fulfil this request." msgstr "Il servizio è troppo vecchio per soddisfare la richiesta." @@ -6968,10 +6639,6 @@ msgstr "" "L'istantanea %(snapshot_id)s non contiene metadati con la chiave " "%(metadata_key)s." -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "Lo snapshot %s non può far parte del gruppo di consistenza. " - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "L'istantanea '%s' non esiste sull'array." @@ -6998,9 +6665,6 @@ msgstr "Istantanea del volume non supportata nello stato: %s." msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Risorsa dell'istantanea \"%s\" non distribuita?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "La dimensione dell'istantanea deve essere un multiplo di 1 GB. " - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" @@ -7157,11 +6821,6 @@ msgstr "" "Impossibile trovare il sistema di archiviazione per il pool " "%(poolNameInStr)s." -msgid "Storage-assisted migration failed during manage volume." -msgstr "" -"La migrazione assistita dall'archiviazione non è riuscita durante la " -"gestione del volume." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s non è stato trovato" @@ -7209,10 +6868,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Il tipo di volume di destinazione è ancora in uso." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "ID titolare: %s non esiste." - msgid "Terminate connection failed" msgstr "Interrompi connessione non riuscito" @@ -7308,10 +6963,6 @@ msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "L'ora di fine (%(end)s) deve essere successiva all'ora di inizio (%(start)s)." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "La extra_spec: %s non è valida." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Specifica supplementare: %(extraspec)s non valida." @@ -7363,14 +7014,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "L'utente iSCSI CHAP %(user)s non esiste." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"La lun importata %(lun_id)s è nel pool %(lun_pool)s che non è gestito " -"dall'host %(host)s." - msgid "The key cannot be None." msgstr "La chiave non può essere Nessuno." @@ -7452,11 +7095,6 @@ msgstr "" "L'istantanea non può essere creata quando il volume è in modalità di " "manutenzione. " -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "Il volume di origine %s non è nel pool gestito dall'host corrente. " - msgid "The source volume for this WebDAV operation not found." msgstr "Volume di origine per questa operazione WebDAV non trovato." @@ -7634,10 +7272,6 @@ msgstr "" msgid "There are no valid ESX hosts." msgstr "Non vi sono host ESX validi. " -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "Nessun archivio dati valido collegato a %s." - msgid "There are no valid datastores." msgstr "Nessun archivio dati valido." @@ -7738,11 +7372,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "Thin provisioning non supportato in questa versione di LVM." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"ThinProvisioning Enabler non è installato. Impossibile creare un volume " -"sottile" - msgid "This driver does not support deleting in-use snapshots." msgstr "Questo driver non supporta l'eliminazione di istantanee in uso." @@ -7778,14 +7407,6 @@ msgstr "" "Timeout in attesa dell'aggiornamento di Nova per l'eliminazione " "dell'istantanea %(id)s." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Il valore di timeout (in secondi) utilizzato quando ci si connette al " -"cluster ceph. Se il valore è < 0, nessun timeout viene impostato e viene " -"utilizzato il valore librados predefinito." - #, python-format msgid "Timeout while calling %s " msgstr "Timeout durante la chiamata di %s " @@ -7873,9 +7494,6 @@ msgstr "Impossibile completare il failover di %s." msgid "Unable to connect or find connection to host" msgstr "Impossibile connettersi o trovare la connessione all'host" -msgid "Unable to create Barbican Client without project_id." -msgstr "Impossibile creare il client Barbican senza project_id." - #, python-format msgid "Unable to create consistency group %s" msgstr "Impossibile creare il gruppo di coerenza %s" @@ -7975,9 +7593,6 @@ msgstr "" "Impossibile eseguire la replica con la versione API REST Purity " "%(api_version)s, richiesta una tra le versioni %(required_versions)s." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "Impossibile abilitare replica e snapcopy contemporaneamente." - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Impossibile stabilire la relazione con il cluster Storwize %s." @@ -8383,9 +7998,6 @@ msgstr "Protocollo sconosciuto: %(protocol)s. " msgid "Unknown quota resources %(unknown)s." msgstr "Risorse quota sconosciute %(unknown)s." -msgid "Unknown service" -msgstr "Servizio sconosciuto" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" @@ -8505,9 +8117,6 @@ msgstr "Identificativo Utente" msgid "User does not have admin privileges" msgstr "L'utente non ha i privilegi dell'amministratore" -msgid "User is not authorized to use key manager." -msgstr "L'utente non è autorizzato ad utilizzare il gestore chiavi." - msgid "User not authorized to perform WebDAV operations." msgstr "L'utente non è autorizzato ad eseguire le operazioni WebDAV." @@ -8731,14 +8340,6 @@ msgstr "" "Il volume %s è online. Impostare il volume su offline per la gestione " "tramite OpenStack." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"Il volume %s non deve essere in fase di migrazione, essere collegato, far " -"parte di un gruppo di coerenza o avere istantanee." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Il volume %s non deve fare parte di un gruppo di coerenza." @@ -8767,12 +8368,6 @@ msgstr "Il gruppo del volume %s non esiste" msgid "Volume Type %(id)s already exists." msgstr "Il tipo di volume %(id)s esiste già." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"Il tipo di volume %(type_id)s non contiene specifica supplementare con la " -"chiave %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8979,9 +8574,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "La dimensione del volume deve essere un multiplo di 1 GB." -msgid "Volume size must be multiple of 1 GB." -msgstr "La dimensione del volume deve essere un multiplo di 1 GB." - msgid "Volume size must multiple of 1 GB." msgstr "La dimensione del volume deve essere un multiplo di 1 GB. " @@ -9060,10 +8652,6 @@ msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "Impossibile trovare il tipo di volume con il nome %(volume_type_name)s." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "Il volume con id volume %s non esiste. " - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -9078,18 +8666,10 @@ msgstr "" "Il volume: %(volumeName)s non è stato aggiunto al gruppo di archiviazione " "%(sgGroupName)s." -#, python-format -msgid "Volume: %s could not be found." -msgstr "Impossibile trovare il volume %s." - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Il volume: %s è già in fase di importazione da Cinder." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "" -"I volumi verranno suddivisi in oggetti di questa dimensione (in megabyte)." - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "Volumi/account superati sugli account SolidFire primario e secondario." @@ -9727,13 +9307,6 @@ msgstr "" "create_consistencygroup_from_src supporta solo un'origine istantanea cg o " "un'origine gruppo di coerenza. Non possono essere utilizzate più origini." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src supporta un'origine cgsnapshot o un'origine " -"del gruppo di coerenza. Non possono essere utilizzate più origini." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: vdisk origine %(src)s (%(src_id)s) non esiste." @@ -9839,11 +9412,6 @@ msgstr "" "create_volume_from_snapshot: Lo stato dell'istantanea deve essere \"available" "\" per la creazione del volume. Lo stato non valido è: %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "" -"create_volume_from_snapshot: la dimensione dell'origine e della destinazione " -"sono differenti." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -9851,10 +9419,6 @@ msgstr "" "create_volume_from_snapshot: Dimensione del volume diversa dal volume basato " "sull'istantanea." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "" -"Non è possibile abilitare entrambi il livellamento automatico e deduplicato." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -9899,9 +9463,6 @@ msgstr "scollegamento istantanea dal nodo remoto " msgid "do_setup: No configured nodes." msgstr "do_setup: Nessun nodo configurato." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries deve essere maggiore o uguale a 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -10099,22 +9660,12 @@ msgstr "Esecuzione di iscsiadm non riuscita. " msgid "key manager error: %(reason)s" msgstr "errore gestore chiavi: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key non definito" - msgid "limit param must be an integer" msgstr "parametro limite deve essere un numero intero" msgid "limit param must be positive" msgstr "parametro limite deve essere positivo" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"manage_existing non può gestire un volume connesso agli host. Disconnettere " -"questo volume dagli host esistenti prima dell'importazione" - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing richiede una chiave 'name' per identificare un volume " @@ -10163,10 +9714,6 @@ msgstr "Rilevate più risorse con ID istantanea %s" msgid "name cannot be None" msgstr "il nome non può essere None" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: Impossibile trovare lo strumento NAVISECCLI %(path)s." - #, python-format msgid "no REPLY but %r" msgstr "nessuna RISPOSTA ma %r" @@ -10229,14 +9776,6 @@ msgstr "le librerie python rados e rbd non sono state trovate" msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted può essere solo 'no', 'yes' o 'only', non %r" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "replication_device deve essere configurato sul backend: %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "replication_device con backend_id [%s] mancante." - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover non riuscito. %s non trovato." @@ -10296,9 +9835,6 @@ msgstr "san_ip non impostato." msgid "san_ip must be set" msgstr "san_ip deve essere impostato" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: Configurazione campo obbligatorio. san_ip non è impostato." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10310,16 +9846,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "il servizio() può essere chiamato solo una volta" -msgid "service not found" -msgstr "servizio non trovato" - -msgid "snapshot does not exist" -msgstr "l'istantanea non esiste" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "ID istantanea:%s non trovato" - #, python-format msgid "snapshot-%s" msgstr "istantanea-%s " @@ -10330,10 +9856,6 @@ msgstr "istantanee assegnate" msgid "snapshots changed" msgstr "istantanee modificate" -#, python-format -msgid "source vol id:%s not found" -msgstr "ID volume di origine: %s non trovato" - #, python-format msgid "source volume id:%s is not replicated" msgstr "ID volume origine:%s non replicato" @@ -10431,9 +9953,6 @@ msgstr "volume assegnato" msgid "volume changed" msgstr "volume modificato" -msgid "volume does not exist" -msgstr "il volume non esiste" - msgid "volume is already attached" msgstr "il volume è già collegato" @@ -10451,9 +9970,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "la dimensione del volume %d non è valida." -msgid "volume_type cannot be None" -msgstr "volume_type non può essere None" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10489,6 +10005,3 @@ msgid "" msgstr "" "La proprietà zfssa_manage_policy deve essere impostata su 'strict' o " "'loose'. Il valore corrente è : %s." - -msgid "{} is not a valid option." -msgstr "{} non è un'opzione valida." diff --git a/cinder/locale/ja/LC_MESSAGES/cinder.po b/cinder/locale/ja/LC_MESSAGES/cinder.po index eee589444..1a385a943 100644 --- a/cinder/locale/ja/LC_MESSAGES/cinder.po +++ b/cinder/locale/ja/LC_MESSAGES/cinder.po @@ -8,16 +8,17 @@ # Ryo Fujita , 2013 # Tomoyuki KATO , 2013 # Andreas Jaeger , 2016. #zanata +# Yoshiki Eguchi , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-04 06:28+0000\n" -"Last-Translator: Tsutomu Kimura \n" +"PO-Revision-Date: 2016-08-31 02:37+0000\n" +"Last-Translator: Yoshiki Eguchi \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -28,6 +29,22 @@ msgstr "" msgid "\t%s" msgstr "\t%s" +#, python-format +msgid "" +"\n" +"CoprHD Exception: %(msg)s\n" +msgstr "" +"\n" +"CoprHD の例外: %(msg)s\n" + +#, python-format +msgid "" +"\n" +"General Exception: %(exec_info)s\n" +msgstr "" +"\n" +"一般的な例外: %(exec_info)s\n" + #, python-format msgid "" "\n" @@ -75,6 +92,14 @@ msgstr "" msgid "%(error_message)s" msgstr "%(error_message)s" +#, python-format +msgid "%(error_msg)s Error description: %(error_description)s" +msgstr "%(error_msg)s エラーの説明: %(error_description)s" + +#, python-format +msgid "%(error_msg)s Error details: %(error_details)s" +msgstr "%(error_msg)s エラー詳細: %(error_details)s" + #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" @@ -114,13 +139,17 @@ msgid "" "Body: %(_body)s" msgstr "" "%(message)s\n" -"状況コード: %(_status)s\n" +"ステータスコード: %(_status)s\n" "本体: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s、subjectAltName: %(sanList)s。" +#, python-format +msgid "%(msg)s And %(num)s services from the cluster were also removed." +msgstr "クラスターの %(msg)s と %(num)s サービスも削除されました。" + #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " @@ -129,6 +158,14 @@ msgstr "" "NetworkPortal の作成に関する %(msg_type)s: 他のサービスが IP %(ip)s 上のポー" "ト %(port)d を使用していないことを確認してください。" +#, python-format +msgid "%(name)s cannot be all spaces." +msgstr "%(name)s は全て空白にはできません。" + +#, python-format +msgid "%(new_size)s < current size %(size)s" +msgstr "%(new_size)s < 現在のサイズ %(size)s" + #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " @@ -139,6 +176,10 @@ msgstr "" "アップオブジェクトが予期しないモードです。イメージまたはファイルのバックアッ" "プがサポートされています。実際のモードは %(vol_mode)s です。" +#, python-format +msgid "%(reason)s" +msgstr "%(reason)s" + #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" @@ -147,11 +188,11 @@ msgstr "" #, python-format msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s は %(max_value)d 以下である必要があります" +msgstr "%(value_name)s は %(max_value)d 以下である必要があります。" #, python-format msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s は %(min_value)d 以上である必要があります" +msgstr "%(value_name)s は %(min_value)d 以上である必要があります。" #, python-format msgid "" @@ -168,6 +209,10 @@ msgstr "%s" msgid "%s \"data\" is not in result." msgstr "結果内に %s \"data\" がありません。" +#, python-format +msgid "%s assigned" +msgstr "割り当てられた %s" + #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." @@ -188,6 +233,10 @@ msgstr "" "%s は圧縮ボリューム上でホストされているため、複製操作を使用してサイズ変更する" "ことはできません" +#, python-format +msgid "%s changed" +msgstr "変更された %s" + #, python-format msgid "%s configuration option is not set." msgstr "%s の設定オプションが設定されていません。" @@ -202,7 +251,7 @@ msgstr "%s はディレクトリーではありません。" #, python-format msgid "%s is not installed" -msgstr "%s がインストールされていません" +msgstr "%s がインストールされていません。" #, python-format msgid "%s is not installed." @@ -242,6 +291,10 @@ msgstr "%s が cinder.conf に設定されていません" msgid "%s not set." msgstr "%s が設定されていません。" +#, python-format +msgid "'%(key)s = %(value)s'" +msgstr "'%(key)s = %(value)s'" + #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " @@ -254,7 +307,7 @@ msgid "'active' must be present when writing snap_info." msgstr "snap_info の書き込み時には 'active' が存在しなければなりません。" msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' を指定する必要があります" +msgstr "'consistencygroup_id' を指定する必要があります。" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' の解析に失敗しました。" @@ -265,9 +318,6 @@ msgstr "'status' を指定する必要があります。" msgid "'volume_id' must be specified" msgstr "'volume_id' を指定する必要があります" -msgid "'{}' object has no attribute '{}'" -msgstr "'{}' オブジェクトに属性 '{}' がありません" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -276,6 +326,18 @@ msgstr "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" +msgid "400 Bad Request" +msgstr "413 Request entity too large" + +msgid "401 Unauthorized Error" +msgstr "401 Unauthorized エラー" + +msgid "404 Not Found Error" +msgstr "404 Not Found エラー" + +msgid "413 Request entity too large" +msgstr "413 Request entity too large" + #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "LUN (HLUN) が見つかりませんでした。(LDEV: %(ldev)s)" @@ -331,9 +393,12 @@ msgstr "パラメーター値が無効です。(%(meta)s)" msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "プールが見つかりませんでした。(プール ID: %(pool_id)s)" +msgid "A readonly volume must be attached as readonly." +msgstr "読み取り専用ボリュームは、読み取り専用として接続する必要があります。" + #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "スナップショット状況が無効です。(状況: %(status)s)" +msgstr "スナップショットの状態況が無効です。(状況: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" @@ -345,11 +410,11 @@ msgstr "ボリューム ID またはシェアが指定されませんでした #, python-format msgid "A volume status is invalid. (status: %(status)s)" -msgstr "ボリューム状況が無効です。(状況: %(status)s)" +msgstr "ボリュームの状態が無効です。(状況: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s がエラー文字列 %(err)s で失敗しました" +msgstr "API %(name)s がエラー文字列 %(err)s で失敗しました。" #, python-format msgid "" @@ -384,6 +449,14 @@ msgstr "" "ゼロでないクォータを持つ子プロジェクトを削除しようとしています。これは実施す" "べきではありません。" +msgid "Access forbidden: Authentication required" +msgstr "アクセス不許可: 認証が必要です。" + +msgid "" +"Access forbidden: You don't have sufficient privileges to perform this " +"operation" +msgstr "アクセス不許可: このオペレーションを行うための十分な権限がありません。" + msgid "Access list not available for public volume types." msgstr "パブリックボリュームタイプではアクセスリストを使用できません。" @@ -430,20 +503,12 @@ msgstr "" "API バージョンのリクエストは VersionedMethod オブジェクトと比較する必要があり" "ます。" -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver でエラーが発生しました。(理由: %(reason)s)" - msgid "An error has occurred during backup operation" -msgstr "バックアップ操作中にエラーが発生しました" +msgstr "バックアップ操作中にエラーが発生しました。" #, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "スナップショット '%s' を変更しようとしたときにエラーが発生しました。" - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "ボリューム \"%s\" の検出中にエラーが発生しました。" +msgid "An error has occurred in SheepdogDriver. (Reason: %(reason)s)" +msgstr "SheepdogDriver でエラーが発生しました。(理由: %(reason)s)" #, python-format msgid "" @@ -458,6 +523,10 @@ msgstr "" msgid "An error occurred while reading volume \"%s\"." msgstr "ボリューム \"%s\" の読み取り中にエラーが発生しました。" +#, python-format +msgid "An error occurred while seeking for volume \"%s\"." +msgstr "ボリューム \"%s\" のシーク中にエラーが発生しました。" + #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "ボリューム \"%s\" への書き込み中にエラーが発生しました。" @@ -486,6 +555,9 @@ msgstr "" "iSCSI ターゲットを削除できませんでした。(ポート: %(port)s、tno: %(tno)s、別" "名: %(alias)s)" +msgid "An unknown error occurred." +msgstr "不明なエラーが発生しました。" + msgid "An unknown exception occurred." msgstr "不明な例外が発生しました。" @@ -533,13 +605,33 @@ msgstr "マッピングビューへの LUN グループの関連付けのエラ msgid "Associate portgroup to mapping view error." msgstr "マッピングビューへのポートグループの関連付けのエラー。" +#, python-format +msgid "Async error: Unable to retrieve %(obj)s method %(method)s result" +msgstr "" +"非同期エラー: オブジェクト %(obj)s 、メソッド %(method)s の結果を取得でき" +"ません。" + msgid "At least one valid iSCSI IP address must be set." msgstr "有効な iSCSI IP アドレスを 1 つ以上設定する必要があります。" +#, python-format +msgid "" +"Attach volume (%(name)s) to host (%(hostname)s) initiator " +"(%(initiatorport)s) failed:\n" +"%(err)s" +msgstr "" +"ホスト (%(hostname)s) 、イニシエーター (%(initiatorport)s) へのボリュー" +"ム (%(name)s) の接続が失敗しました。\n" +"%(err)s" + #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "無効な認証キーを使用して %s を転送しようとしています。" +#, python-format +msgid "Attribute: %s not found." +msgstr "属性 %s が見つかりません。" + #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "CloudByte のストレージで認証グループ [%s] の詳細が見つかりません。" @@ -547,18 +639,12 @@ msgstr "CloudByte のストレージで認証グループ [%s] の詳細が見 msgid "Auth user details not found in CloudByte storage." msgstr "CloudByte のストレージで認証ユーザーの詳細が見つかりません。" -msgid "Authentication error" -msgstr "認証エラー" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "認証が失敗しました。スイッチのクレデンシャルを検証してください。エラーコード " "%s。" -msgid "Authorization error" -msgstr "許可エラー" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "アベイラビリティーゾーン '%(s_az)s' は無効です。" @@ -577,11 +663,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "バックエンドが存在しません(%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "" -"バックエンドで既にフェイルオーバーが完了しました。フェイルバックすることはで" -"きません。" - #, python-format msgid "Backend reports: %(message)s" msgstr "バックエンドレポート: %(message)s" @@ -592,9 +673,6 @@ msgstr "バックエンドレポート: 項目は既に存在します" msgid "Backend reports: item not found" msgstr "バックエンドレポート: 項目が見つかりません" -msgid "Backend server not NaServer." -msgstr "バックエンドサーバーが NaServer ではありません。" - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" @@ -615,7 +693,7 @@ msgid "Backup %(backup_id)s could not be found." msgstr "バックアップ %(backup_id)s が見つかりませんでした。" msgid "Backup RBD operation failed" -msgstr "バックアップ RBD 操作が失敗しました" +msgstr "バックアップ RBD 操作が失敗しました。" msgid "Backup already exists in database." msgstr "データベースのバックアップが既に存在しています。" @@ -671,21 +749,25 @@ msgstr "" "個含まれています" msgid "Backup status must be available" -msgstr "バックアップ状況は「使用可能」でなければなりません" +msgstr "バックアップの状態は「使用可能」でなければなりません。" #, python-format msgid "Backup status must be available and not %s." -msgstr "バックアップ状況は %s ではなく「使用可能」でなければなりません。" +msgstr "バックアップの状態は %s ではなく「使用可能」でなければなりません。" msgid "Backup status must be available or error" -msgstr "バックアップ状況は「使用可能」または「エラー」でなければなりません" +msgstr "バックアップの状態は「使用可能」または「エラー」でなければなりません。" msgid "Backup to be restored has invalid size" -msgstr "復元するバックアップのサイズが無効です" +msgstr "復元するバックアップのサイズが無効です。" + +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "不正な HTTP レスポンスステータス : %(status)s" #, python-format msgid "Bad Status line returned: %(arg)s." -msgstr "正しくない状況表示行が返されました: %(arg)s。" +msgstr "正しくないステータス表示行が返されました: %(arg)s。" #, python-format msgid "Bad key(s) in quota set: %s" @@ -702,12 +784,6 @@ msgstr "" msgid "Bad project format: project is not in proper format (%s)" msgstr "不正なプロジェクト形式: プロジェクトの形式が正しくありません (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Datera クラスターに不正な要求 (無効な引数) が送信されました: %(args)s | " -"%(message)s" - msgid "Bad response from Datera API" msgstr "Datera API からの正しくない応答" @@ -724,27 +800,16 @@ msgstr "バイナリー" msgid "Blank components" msgstr "空白コンポーネント" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Blockbridge API の認証スキーム (トークンまたはパスワード)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Blockbridge API のパスワード ('password' の認証スキーム)" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Blockbridge API のトークン ('token' の認証スキーム )" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Blockbridge API のユーザー ('password' の認証スキーム)" - msgid "Blockbridge api host not configured" -msgstr "Blockbridge API のホストが設定されていません" +msgstr "Blockbridge API のホストが設定されていません。" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "Blockbridge に無効な認証スキーム '%(auth_scheme)s' が設定されています" +msgstr "" +"Blockbridge に無効な認証スキーム '%(auth_scheme)s' が設定されています。" msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge のデフォルトプールが存在しません" +msgstr "Blockbridge のデフォルトプールが存在しません。" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" @@ -753,7 +818,7 @@ msgstr "" "要)" msgid "Blockbridge pools not configured" -msgstr "Blockbridge プールが設定されていません" +msgstr "Blockbridge プールが設定されていません。" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" @@ -763,6 +828,9 @@ msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Blockbridge のユーザーが設定されていません ('password' の認証スキームに必要)" +msgid "Bourne internal server error" +msgstr "Bourne 内部サーバーエラー" + #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "BrocadeファイバーチャネルゾーニングCLIエラー:%(reason)s" @@ -829,7 +897,7 @@ msgid "Cache volume %s does not have required properties" msgstr "キャッシュボリューム %s に必須のプロパティーがありません" msgid "Call returned a None object" -msgstr "呼び出しが None オブジェクトを返しました" +msgstr "呼び出しが None オブジェクトを返しました。" msgid "Can not add FC port to host." msgstr "ホストに FC ポートを追加できません。" @@ -839,14 +907,49 @@ msgid "Can not find cache id by cache name %(name)s." msgstr "" "キャッシュ名 %(name)s によってキャッシュ ID を見つけることができません。" +#, python-format +msgid "Can not find cinder volume - %(volumeName)s" +msgstr "Cinder ボリュームが見つかりません - %(volumeName)s" + +#, python-format +msgid "Can not find cinder volume - %(volumeName)s." +msgstr "Cinder ボリュームが見つかりません - %(volumeName)s" + +#, python-format +msgid "Can not find cinder volume - %s" +msgstr "Cinder ボリュームが見つかりません - %s" + +#, python-format +msgid "Can not find cinder volume - %s." +msgstr "Cinder ボリュームが見つかりません - %s" + +#, python-format +msgid "Can not find client id. The connection target name is %s." +msgstr "クライアント ID が見つかりません。接続ターゲット名は %s です。" + +#, python-format +msgid "Can not find consistency group: %s." +msgstr "整合性グループが見つかりません: %s" + #, python-format msgid "Can not find partition id by name %(name)s." msgstr "名前 %(name)s によってパーティション ID を見つけることができません。" +#, python-format +msgid "Can not find this error code:%s." +msgstr "エラーコード %s が見つかりません。" + #, python-format msgid "Can not get pool info. pool: %s" msgstr "プール情報を取得できません。プール: %s" +msgid "" +"Can not set tiering policy for a deduplicated volume. Set the tiering policy " +"on the pool where the deduplicated volume locates." +msgstr "" +"重複排除されたボリュームには階層化ポリシーを設定できません。重複排除されたボ" +"リュームのあるプールに対して階層化ポリシーを設定します。" + #, python-format msgid "Can not translate %s to integer." msgstr "%s を整数に変換できません。" @@ -855,9 +958,6 @@ msgstr "%s を整数に変換できません。" msgid "Can't access 'scality_sofs_config': %s" msgstr "'scality_sofs_config' にアクセスできません: %s" -msgid "Can't attach snapshot." -msgstr "スナップショットを追加できません。" - msgid "Can't decode backup record." msgstr "バックアップレコードを復号化できません。" @@ -980,10 +1080,6 @@ msgstr "" "Cinder にスナップショット %s をインポートできません。スナップショットの状態が" "正常ではないか、実行状態がオンラインではありません。" -#, python-format -msgid "Can't open config file: %s" -msgstr "構成ファイルを開くことができません: %s" - msgid "Can't parse backup record." msgstr "バックアップレコードを解析できません。" @@ -1036,6 +1132,54 @@ msgstr "" "れていないため、ボリューム %(volume_id)s をこの整合性グループに追加できませ" "ん。" +#, python-format +msgid "" +"Cannot add volume %(volume_id)s to group %(group_id)s because it has no " +"volume type." +msgstr "" +"ボリューム %(volume_id)s にはボリュームタイプがないため、このボリュームをグ" +"ループ %(group_id)s に追加できません。" + +#, python-format +msgid "" +"Cannot add volume %(volume_id)s to group %(group_id)s because it is already " +"in group %(orig_group)s." +msgstr "" +"ボリューム %(volume_id)s が既にグループ %(orig_group)s 内に存在するため、この" +"ボリュームをグループ %(group_id)s に追加することはできません。" + +#, python-format +msgid "" +"Cannot add volume %(volume_id)s to group %(group_id)s because volume cannot " +"be found." +msgstr "" +"ボリューム %(volume_id)s は見つからないため、グループ %(group_id)s に追加でき" +"ません。" + +#, python-format +msgid "" +"Cannot add volume %(volume_id)s to group %(group_id)s because volume does " +"not exist." +msgstr "" +"ボリューム %(volume_id)s は存在しないため、グループ %(group_id)s に追加できま" +"せん。" + +#, python-format +msgid "" +"Cannot add volume %(volume_id)s to group %(group_id)s because volume is in " +"an invalid state: %(status)s. Valid states are: %(valid)s." +msgstr "" +"ボリューム %(volume_id)s は無効な状態 %(status)s であるため、グルー" +"プ%(group_id)s に追加できません。有効な状態は %(valid)s です。" + +#, python-format +msgid "" +"Cannot add volume %(volume_id)s to group %(group_id)s because volume type " +"%(volume_type)s is not supported by the group." +msgstr "" +"ボリュームタイプ %(volume_type)s はグループ %(group_id)s ではサポートされてい" +"ないため、ボリューム %(volume_id)s をこのグループに追加できません。" + #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " @@ -1058,13 +1202,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "ECOM サーバーに接続できません。" -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"サイズ %(src_vol_size)s のボリュームからサイズ %(vol_size)s の複製を作成でき" -"ません" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1088,6 +1225,12 @@ msgstr "ディレクトリー %s を作成できません。" msgid "Cannot create encryption specs. Volume type in use." msgstr "暗号化仕様を作成できません。ボリュームタイプは使用中です。" +#, python-format +msgid "Cannot create group_type with name %(name)s and specs %(group_specs)s" +msgstr "" +"名前 %(name)s および仕様 %(group_specs)s を使用して group_type を作成できませ" +"ん。" + #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." @@ -1112,13 +1255,6 @@ msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "名前が %(sgGroupName)s のストレージグループを作成または検出できません。" -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"サイズ %(snap_size)s のスナップショットからサイズ %(vol_size)s のボリュームを" -"作成できません" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "サイズが %s のボリュームを作成できません: 8GB の倍数ではありません。" @@ -1127,7 +1263,7 @@ msgstr "サイズが %s のボリュームを作成できません: 8GB の倍 msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "名前 %(name)s および仕様 %(extra_specs)s を使用して volume_type を作成できま" -"せん" +"せん。" #, python-format msgid "Cannot delete LUN %s while snapshots exist." @@ -1151,6 +1287,14 @@ msgstr "" "新されたこのキャッシュボリュームには 現在 %(numclones)s のボリュームインスタ" "ンスがあります。" +#, python-format +msgid "" +"Cannot delete consistency group %(id)s. %(reason)s, and it cannot be the " +"source for an ongoing CG or CG Snapshot creation." +msgstr "" +"整合性グループ %(id)s を削除できません。理由 : %(reason)s 。 これを進行中の " +"CG または CG スナップショットのソースとすることはできません。" + msgid "Cannot delete encryption specs. Volume type in use." msgstr "暗号化仕様を削除できません。ボリュームタイプは使用中です。" @@ -1158,7 +1302,7 @@ msgid "Cannot determine storage pool settings." msgstr "ストレージプールの設定を決定できません。" msgid "Cannot execute /sbin/mount.sofs" -msgstr "/sbin/mount.sofs を実行できません" +msgstr "/sbin/mount.sofs を実行できません。" #, python-format msgid "Cannot find CG group %s." @@ -1212,6 +1356,10 @@ msgstr "システム %s でレプリケーションサービスが見つかり msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "ソース CG のインスタンスが見つかりません。consistencygroup_id: %s。" +#, python-format +msgid "Cannot get iSCSI ipaddresses or multipath flag. Exception is %(ex)s. " +msgstr "iSCSI IP アドレスか、マルチパスフラグを取得できません。例外: %(ex)s" + #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "チャンネル ID %(channel_id)s によって mcs_id を取得できません。" @@ -1263,10 +1411,10 @@ msgstr "マスキングビューからポートグループを取得できませ msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -"Scality SOFS をマウントできません。syslog でエラーについて確認してください" +"Scality SOFS をマウントできません。syslog でエラーについて確認してください。" msgid "Cannot ping DRBDmanage backend" -msgstr "DRBDmanage のバックエンドに ping を送信できません" +msgstr "DRBDmanage のバックエンドに ping を送信できません。" #, python-format msgid "Cannot place volume %(id)s on %(host)s" @@ -1281,10 +1429,10 @@ msgstr "" "'source_cgid' の両方を提供することができません。" msgid "Cannot register resource" -msgstr "リソースを登録できません" +msgstr "リソースを登録できません。" msgid "Cannot register resources" -msgstr "リソースを登録できません" +msgstr "リソースを登録できません。" #, python-format msgid "" @@ -1303,6 +1451,22 @@ msgstr "" "ボリューム %(volume_id)s は無効な状態 %(status)s であるため、整合性グルー" "プ%(group_id)s から削除できません。有効な状態は %(valid)s です。" +#, python-format +msgid "" +"Cannot remove volume %(volume_id)s from group %(group_id)s because it is not " +"in the group." +msgstr "" +"ボリューム %(volume_id)s はグループ %(group_id)s 内に存在しないため、このグ" +"ループから削除できません。" + +#, python-format +msgid "" +"Cannot remove volume %(volume_id)s from group %(group_id)s because volume is " +"in an invalid state: %(status)s. Valid states are: %(valid)s." +msgstr "" +"ボリューム %(volume_id)s は無効な状態 %(status)s であるため、グループ " +"%(group_id)s から削除できません。有効な状態は %(valid)s です。" + #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "HPE3PARDriver から %s にタイプ変更することはできません。" @@ -1342,12 +1506,32 @@ msgstr "" "有効な名前、説明、add_volumes、または remove_volumes が指定されなかったため、" "整合性グループ %(group_id)s を更新できません。" +#, python-format +msgid "" +"Cannot update consistency group %s, status must be available, and it cannot " +"be the source for an ongoing CG or CG Snapshot creation." +msgstr "" +"整合性グループ %s を更新できません。状態は「使用可能」である必要があります。 " +"これを進行中の CG または CG スナップショットのソースとすることはできません。" + msgid "Cannot update encryption specs. Volume type in use." msgstr "暗号化仕様を更新できません。ボリュームタイプは使用中です。" +#, python-format +msgid "" +"Cannot update group %(group_id)s because no valid name, description, " +"add_volumes, or remove_volumes were provided." +msgstr "" +"有効な名前、説明、add_volumes 、または remove_volumes が指定されなかったた" +"め、グループ %(group_id)s を更新できません。" + +#, python-format +msgid "Cannot update group_type %(id)s" +msgstr "group_type %(id)s を更新できません。" + #, python-format msgid "Cannot update volume_type %(id)s" -msgstr "volume_type %(id)s を更新できません" +msgstr "volume_type %(id)s を更新できません。" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." @@ -1357,6 +1541,13 @@ msgstr "オブジェクト %(instanceName)s の存在を確認できません。 msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "CgSnapshot %(cgsnapshot_id)s が見つかりませんでした。" +msgid "" +"CgSnapshot status must be available or error, and no CG can be currently " +"using it as source for its creation." +msgstr "" +"cgsnapshot の状態は「使用可能」または「エラー」でなければなりません。またこれ" +"をソースとして用いて CG を作成することは現在できません。" + msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost が空です。整合性グループは作成されません。" @@ -1409,6 +1600,16 @@ msgstr "" msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "CiscoファイバーチャネルゾーニングCLIエラー:%(reason)s" +#, python-format +msgid "Client with ip %s wasn't found " +msgstr "IP %s のクライアントが見つかりませんでした。" + +msgid "" +"Clone can't be taken individually on a volume that is part of a Consistency " +"Group" +msgstr "" +"整合性グループの一部のボリュームのクローンを個別に取得することはできません。" + #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "%(storageSystem)s では複製フィーチャーはライセンス交付されていません。" @@ -1421,6 +1622,25 @@ msgstr "" "クローンタイプ '%(clone_type)s' は無効です。有効な値は '%(full_clone)s' およ" "び '%(linked_clone)s' です。" +msgid "Cluster" +msgstr "クラスター" + +#, python-format +msgid "Cluster %(id)s could not be found." +msgstr "クラスター %(id)s が見つかりませんでした。" + +#, python-format +msgid "Cluster %(id)s still has hosts." +msgstr "クラスター %(id)s はまだホストを持っています。" + +#, python-format +msgid "Cluster %(name)s already exists." +msgstr "クラスター %(name)s は既に存在します。" + +#, python-format +msgid "Cluster %s successfully removed." +msgstr "クラスター %s は正常に削除されました。" + msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" @@ -1432,15 +1652,11 @@ msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Coho Data Cinder ドライバーの失敗: %(message)s" msgid "Coho rpc port is not configured" -msgstr "Coho の rpc ポートが設定されていません" +msgstr "Coho の rpc ポートが設定されていません。" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "CLI でブロックされたコマンド %(cmd)s を取り消しました" - -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: %s タイムアウト" +msgstr "CLI でブロックされたコマンド %(cmd)s を取り消しました。" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." @@ -1498,15 +1714,61 @@ msgstr "Swift との接続に失敗しました: %(reason)s" #, python-format msgid "Connector does not provide: %s" -msgstr "コネクターが %s を提供しません" +msgstr "コネクターが %s を提供しません。" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "コネクターは必要な情報を持っていません: %(missing)s" +#, python-format +msgid "" +"Consistency Group %(cg_uri)s: update failed\n" +"%(err)s" +msgstr "" +"整合性グループ %(cg_uri)s: 更新に失敗しました。\n" +"%(err)s" + +#, python-format +msgid "" +"Consistency Group %(name)s: create failed\n" +"%(err)s" +msgstr "" +"整合性グループ %(name)s: 作成に失敗しました。\n" +"%(err)s" + +#, python-format +msgid "" +"Consistency Group %(name)s: delete failed\n" +"%(err)s" +msgstr "" +"整合性グループ %(name)s: 削除に失敗しました。\n" +"%(err)s" + +#, python-format +msgid "Consistency Group %s not found" +msgstr "整合性グループ %s が見つかりません。" + +#, python-format +msgid "Consistency Group %s: not found" +msgstr "整合性グループ %s: 見つかりませんでした。" + msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "整合性グループが空です。cgsnapshot は作成されません。" +msgid "" +"Consistency group must not have attached volumes, volumes with snapshots, or " +"dependent cgsnapshots" +msgstr "" +"整合性グループは、接続されたボリューム、スナップショットが含まれるボリュー" +"ム、従属 cgsnapshot を持つことができません。" + +msgid "" +"Consistency group status must be available or error and must not have " +"volumes or dependent cgsnapshots" +msgstr "" +"整合性グループの状態は「使用可能」または「エラー」でなければならず、ボリュー" +"ムや従属 cgsnapshot を持つことができません。" + #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "ConsistencyGroup %(consistencygroup_id)s が見つかりませんでした。" @@ -1555,6 +1817,10 @@ msgstr "ローに変換されましたが、現在の形式は %s です。" msgid "Coordinator uninitialized." msgstr "初期化されていないコーディネーター。" +#, python-format +msgid "CoprHD internal server error. Error details: %s" +msgstr "CoprHD 内部サーバーエラー。エラー詳細: %s" + #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" @@ -1603,24 +1869,18 @@ msgstr "GPFS クラスター ID が見つかりませんでした: %s。" msgid "Could not find GPFS file system device: %s." msgstr "GPFS ファイルシステムデバイスが見つかりませんでした: %s。" -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"タイプ %(type_id)s を持つボリューム %(volume_id)s のホストが見つかりませんで" -"した。" - #, python-format msgid "Could not find config at %(path)s" -msgstr "%(path)s で config が見つかりませんでした" +msgstr "%(path)s で config が見つかりませんでした。" #, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." +msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" "ボリューム %(volumeName)s の iSCSI エクスポートが見つかりませんでした。" #, python-format msgid "Could not find iSCSI export for volume %s" -msgstr "ボリューム %s の iSCSI エクスポートが見つかりませんでした" +msgstr "ボリューム %s の iSCSI エクスポートが見つかりませんでした。" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." @@ -1635,11 +1895,11 @@ msgstr "" #, python-format msgid "Could not find parameter %(param)s" -msgstr "パラメーター %(param)s が見つかりませんでした" +msgstr "パラメーター %(param)s が見つかりませんでした。" #, python-format msgid "Could not find target %s" -msgstr "ターゲット %s が見つかりませんでした" +msgstr "ターゲット %s が見つかりませんでした。" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." @@ -1659,7 +1919,7 @@ msgstr "システム名を取得できませんでした。" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -"paste アプリケーション '%(name)s' を %(path)s からロードできませんでした" +"paste アプリケーション '%(name)s' を %(path)s からロードできませんでした。" #, python-format msgid "Could not read %s. Re-running with sudo" @@ -1685,6 +1945,19 @@ msgstr "設定を %(file_path)s に保存できませんでした: %(exc)s" msgid "Could not start consistency group snapshot %s." msgstr "整合性グループのスナップショット %s を開始できませんでした。" +#, python-format +msgid "Couldn't find ORM model for Persistent Versioned Object %s." +msgstr "" +"バージョンが設定された永続オブジェクト %s 用のORMモデルが見つかりません。" + +#, python-format +msgid "Couldn't remove cluster %s because it doesn't exist." +msgstr "クラスター %s は存在しないため削除できませんでした。" + +#, python-format +msgid "Couldn't remove cluster %s because it still has hosts." +msgstr "クラスター %s はまだホストを持っているため削除できませんでした。" + #, python-format msgid "Counter %s not found" msgstr "カウンター %s が見つかりません" @@ -1697,7 +1970,7 @@ msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" -"バックアップの作成が中止しました。予期していたバックアップ状況は " +"バックアップの作成が中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format @@ -1705,20 +1978,9 @@ msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" -"バックアップの作成が中止しました。予期していたボリューム状況は " +"バックアップの作成が中止しました。予期していたボリュームの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" -msgid "Create consistency group failed." -msgstr "整合性グループの作成に失敗しました。" - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"イメージ %(image)s からのタイプ %(type)s の暗号化されたボリュームの作成はサ" -"ポートされません。" - msgid "Create export for volume failed." msgstr "ボリュームのエクスポートの作成に失敗しました。" @@ -1742,7 +2004,7 @@ msgid "Create lungroup error." msgstr "LUN グループ作成のエラー。" msgid "Create manager volume flow failed." -msgstr "マネージャーボリュームフローの作成が失敗しました" +msgstr "マネージャーボリュームフローの作成が失敗しました。" msgid "Create port group error." msgstr "ポートグループ作成のエラー。" @@ -1806,19 +2068,13 @@ msgstr "" "現在、ボリューム %(vol)s のマップ済みホストは、サポート対象ではない " "%(group)s のホストグループ内にあります。" -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "非推奨: Cinder API の v1 をデプロイしてください。" - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "提供を終了しています: Cinder API の v2 を実装してください。" - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "DRBDmanage ドライバーのエラー: 予期されたキー \"%s\" が答えに含まれていませ" -"ん。DRBDmanage のバージョンが間違っていませんか。" +"ん。DRBDmanage のバージョンが間違っている可能性があります。" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " @@ -1831,7 +2087,7 @@ msgstr "" msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "" "DRBDmanage が 1 つのリソース (\"%(res)s\") を予期しましたが、%(n)d が得られま" -"した" +"した。" #, python-format msgid "" @@ -1896,13 +2152,13 @@ msgid "Dedup luns cannot be extended" msgstr "Dedup luns は拡張できません" msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" +"Deduplication Enabler is not installed. Can not create deduplicated volume." msgstr "" "重複排除イネーブラーがインストールされていません。重複排除されたボリュームを" -"作成できません" +"作成できません。" -msgid "Default pool name if unspecified." -msgstr "デフォルトのプール名 (プール名を指定していない場合)" +msgid "Default group type can not be found." +msgstr "デフォルトのグループ種別が見つかりません。" #, python-format msgid "" @@ -1917,12 +2173,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "デフォルトのボリュームタイプが見つかりません。" -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"一連の影響を受けるプールに加え関連するバックエンドのクエリー文字列を定義しま" -"す。" - msgid "Delete LUNcopy error." msgstr "LUN コピー削除のエラー。" @@ -1984,12 +2234,18 @@ msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" -"バックアップの削除が中止しました。予期していたバックアップ状況は " +"バックアップの削除が中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" msgid "Deleting volume from database and skipping rpc." msgstr "データベースからボリュームを作成中。rpc をスキップします。" +#, python-format +msgid "Deleting volume metadata is not allowed for volumes in %s status." +msgstr "" +"ボリュームの状態が %s である場合は、ボリュームメタデータの削除は許可されませ" +"ん。" + #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "ゾーンの削除に失敗しました: (command=%(cmd)s error=%(err)s)。" @@ -2006,10 +2262,8 @@ msgstr "" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" -"Dell Cinder ドライバーの設定エラー の replication_device %s が見つかりません" - -msgid "Deploy v3 of the Cinder API." -msgstr "Cinder API の v3 を実装してください。" +"Dell Cinder ドライバーの設定エラー の replication_device %s が見つかりませ" +"ん。" msgid "Describe-resource is admin only functionality" msgstr "Describe-resource は管理者専用の機能です" @@ -2032,9 +2286,15 @@ msgstr "" msgid "Detach volume from instance and then try again." msgstr "ボリュームをインスタンスから切り離して、再試行してください。" +#, python-format +msgid "Detaching volume %(volumename)s from host %(hostname)s failed: %(err)s" +msgstr "" +"ホスト %(hostname)s からの ボリューム%(volumename)s の切断に失敗しました : " +"%(err)s" + #, python-format msgid "Detected more than one volume with name %(vol_name)s" -msgstr "名前 %(vol_name)s を持つ複数のボリュームが検出されました" +msgstr "名前 %(vol_name)s を持つ複数のボリュームが検出されました。" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." @@ -2051,6 +2311,9 @@ msgstr "「無効理由」に無効な文字が含まれているか、理由が msgid "Domain with name %s wasn't found." msgstr "名前が %s のドメインが見つかりませんでした。" +msgid "Down Hosts" +msgstr "停止中のホスト" + #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " @@ -2064,15 +2327,8 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "ドライバーの初期化接続に失敗しました (エラー: %(err)s)。" -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"ボリューム (LUN {}) にマイグレーションが禁止されているスナップショットが含ま" -"れているため、ドライバーのタイプを変更することができません。" - msgid "Driver must implement initialize_connection" -msgstr "ドライバーは initialize_connection を実装する必要があります" +msgstr "ドライバーは initialize_connection を実装する必要があります。" #, python-format msgid "" @@ -2082,6 +2338,30 @@ msgstr "" "ドライバーがインポートされたバックアップデータを正常に復号化しましたが、欠け" "ているフィールド (%s) があります。" +msgid "Dsware config file not exists!" +msgstr "Dsware 設定ファイルが見つかりません。" + +#, python-format +msgid "Dsware create volume failed! Result is: %s." +msgstr "Dsware でボリュームの作成に失敗しました。 結果は %s です。" + +#, python-format +msgid "Dsware fails to start cloning volume %s." +msgstr "Dsware がボリューム %s のクローニングの開始に失敗しました。" + +msgid "Dsware get manager ip failed, volume provider_id is None!" +msgstr "" +"Dsware が管理 ip の取得に失敗しました。 volume provider_id が None です。" + +#, python-format +msgid "Dsware: create volume from snap failed. Result: %s." +msgstr "" +"Dsware: スナップショットからのボリュームの作成に失敗しました。 結果: %s" + +msgid "Dsware: volume size can not be less than snapshot size." +msgstr "" +"Dsware: ボリュームのサイズはスナップショットのサイズより小さくはできません。" + #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " @@ -2561,6 +2841,10 @@ msgstr "" "イニシエーターのグループへの追加エラー: %(groupName)s。戻りコード: " "%(rc)lu。 エラー: %(error)s。" +#, python-format +msgid "Error adding volume %(vol)s to %(sg). %(error)s." +msgstr " %(sg) へのボリューム %(vol)s の追加エラー。%(error)s。" + #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "複合ボリュームへのボリュームの追加エラー。エラー: %(error)s。" @@ -2578,10 +2862,6 @@ msgstr "" "ストレージグループ %(storageGroupName)s の FAST ポリシー %(fastPolicyName)sへ" "の関連付けエラーです。エラーの説明: %(errordesc)s。" -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "ボリューム %s の接続エラー。ターゲットの制限に達します。" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2642,7 +2922,7 @@ msgstr "" "CloudByte API [%(cmd)s] の実行中にエラーが発生しました。エラー: %(err)s。" msgid "Error executing EQL command" -msgstr "EQL コマンドを実行するときにエラーが発生しました" +msgstr "EQL コマンドを実行するときにエラーが発生しました。" #, python-format msgid "Error executing command via ssh: %s" @@ -2721,6 +3001,11 @@ msgstr "" "レプリケーションターゲットの詳細の取得中にエラーが発生しました。戻りコード: " "%(ret.status)d メッセージ: %(ret.data)s。" +#, python-format +msgid "Error getting sdc id from ip %(sdc_ip)s: %(sdc_id_message)s" +msgstr "" +"IP %(sdc_ip)s からの sdc id の取得でエラーが発生しました: %(sdc_id_message)s" + #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " @@ -2744,7 +3029,7 @@ msgstr "SolidFire API 応答にエラーがあります: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "%(size)d GB のサイズの %(space)s のスペースの作成のエラー" +msgstr "%(size)d GB のサイズの %(space)s のスペースの作成のエラーです。" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" @@ -2754,10 +3039,6 @@ msgstr "追加の %(size)d GB のボリューム %(space)s のスペース拡張 msgid "Error managing volume: %s." msgstr "ボリュームの管理中にエラーが発生しました: %s。" -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "ボリューム %(vol)s のマッピングエラー。%(error)s。" - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2801,16 +3082,13 @@ msgstr "cgsnapshot %s を削除中にエラーが発生しました。" msgid "Error occurred when updating consistency group %s." msgstr "整合性グループ %s を更新中にエラーが発生しました。" -#, python-format -msgid "Error parsing config file: %s" -msgstr "構成ファイルの解析エラー: %s" - msgid "Error promoting secondary volume to primary" -msgstr "2 次ボリュームの 1 次ボリュームへのプロモート中にエラーが発生しました" +msgstr "" +"2 次ボリュームの 1 次ボリュームへのプロモート中にエラーが発生しました。" #, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "ボリューム %(vol)s の削除エラー。%(error)s。" +msgid "Error removing volume %(vol)s from %(sg). %(error)s." +msgstr " %(sg) からのボリューム %(vol)s の削除エラー。%(error)s。" #, python-format msgid "Error renaming volume %(vol)s: %(err)s." @@ -2881,7 +3159,7 @@ msgstr "ファームウェアバージョン %s の検査中にエラーが発 #, python-format msgid "Error while checking transaction status: %s" -msgstr "トランザクション状況の検査中にエラーが発生しました: %s" +msgstr "トランザクション状態の検査中にエラーが発生しました: %s" #, python-format msgid "Error while checking whether VF is available for management %s." @@ -2988,6 +3266,10 @@ msgstr "" msgid "Error writing field to database" msgstr "データベースへのフィールドの書き込みに失敗しました" +#, python-format +msgid "Error: Failed to %(operation_type)s %(component)s" +msgstr "エラー: 失敗しました。%(operation_type)s %(component)s" + #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "ボリューム ID の取得中にエラーが発生しました [%(stat)s - %(res)s]。" @@ -3069,16 +3351,24 @@ msgstr "" #, python-format msgid "Expected volume size was %d" -msgstr "予期されたボリュームサイズは %d でした" +msgstr "予期されたボリュームサイズは %d でした。" + +#, python-format +msgid "Export Group %s: not found" +msgstr "エクスポートグループ %s: 見つかりませんでした。" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" -"バックアップのエクスポートが中止しました。予期していたバックアップ状況は " +"バックアップのエクスポートが中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" +#, python-format +msgid "Export group with name %s already exists" +msgstr "名前 %s を持つエクスポートグループはすでに存在します。" + #, python-format msgid "" "Export record aborted, the backup service currently configured " @@ -3099,13 +3389,13 @@ msgstr "" "対してサポートされます。" msgid "Extend volume not implemented" -msgstr "ボリュームの拡張が実装されていません" +msgstr "ボリュームの拡張が実装されていません。" msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" +"FAST VP Enabler is not installed. Can not set tiering policy for the volume." msgstr "" "FAST VP イネーブラーがインストールされていません。ボリュームの階層化ポリシー" -"を設定できません" +"を設定できません。" msgid "FAST is not supported on this array." msgstr "FAST はこのアレイでサポートされていません。" @@ -3113,6 +3403,36 @@ msgstr "FAST はこのアレイでサポートされていません。" msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC がプロトコルですが、wwpns が OpenStack によって提供されていません。" +msgid "FSS cinder volume driver not ready: Unable to determine session id." +msgstr "" +"FSS cinder ボリュームドライバーが準備できていません: セッション ID を判別でき" +"ません。" + +msgid "FSS do not support multipathing." +msgstr "FSS はマルチパスをサポートしていません。" + +#, python-format +msgid "FSS get mirror sync timeout on vid: %s " +msgstr "FSS でミラー同期がタイムアウトしました。vid : %s " + +#, python-format +msgid "FSS get timemark copy timeout on vid: %s" +msgstr "FSS でタイムマークコピーがタイムアウトしました。vid : %s " + +#, python-format +msgid "" +"FSS rest api return failed, method=%(method)s, uri=%(url)s, response=" +"%(response)s" +msgstr "" +"FSS rest API が失敗を返しました。メソッド = %(method)s 、 url = %(url)s 、 " +"レスポンス = %(response)s" + +msgid "" +"FSSISCSIDriver manage_existing requires vid to identify an existing volume." +msgstr "" +"FSSISCSI ドライバーで既存のボリュームを特定するには、 manage_existing で vid " +"が必要です。" + #, python-format msgid "Faield to unassign %(volume)s" msgstr "%(volume)s の割り当て解除に失敗しました" @@ -3126,7 +3446,7 @@ msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "ファブリック %(fabric)s の接続の追加に失敗しました。エラー: %(err)s" msgid "Failed cgsnapshot" -msgstr "cgsnapshot が失敗しました" +msgstr "cgsnapshot が失敗しました。" #, python-format msgid "Failed creating snapshot for group: %(response)s." @@ -3151,7 +3471,7 @@ msgstr "ファブリック %(fabric)s の接続の削除に失敗しました。 #, python-format msgid "Failed to Extend Volume %(volname)s" -msgstr "ボリューム %(volname)s を拡張できませんでした" +msgstr "ボリューム %(volname)s を拡張できませんでした。" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" @@ -3172,10 +3492,6 @@ msgstr "" "リソースロックを獲得できませんでした。(シリアル: %(serial)s、inst: %(inst)s、" "ret: %(ret)s、stderr: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "%(vol)s の %(sg)s への追加が、%(retries)s 回の再試行後に失敗しました。" - msgid "Failed to add the logical device." msgstr "論理デバイスを追加できませんでした。" @@ -3247,6 +3563,13 @@ msgstr "イメージをボリュームにコピーできませんでした: %(re msgid "Failed to copy metadata to volume: %(reason)s" msgstr "メタデータをボリュームにコピーできませんでした: %(reason)s" +msgid "" +"Failed to copy volume to image as image quota has been met. Please delete " +"images or have your limit increased, then try again." +msgstr "" +"イメージのクォータに到達したため、ボリュームのイメージへのコピーが失敗しまし" +"た。イメージを削除するか、上限値を増やして再試行してください。" + msgid "Failed to copy volume, destination device unavailable." msgstr "ボリュームのコピーに失敗しました。宛先デバイスが使用できません。" @@ -3262,9 +3585,6 @@ msgstr "" msgid "Failed to create IG, %s" msgstr "IG を作成できませんでした。%s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "SolidFire イメージボリュームの作成に失敗しました" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "ボリュームグループを作成できませんでした: %(vg_name)s" @@ -3377,10 +3697,7 @@ msgstr "スケジューラーマネージャーのボリュームフローを作 #, python-format msgid "Failed to create snapshot %s" -msgstr "スナップショット %s の作成に失敗しました" - -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "LUN ID が指定されていないため、スナップショットの作成に失敗しました" +msgstr "スナップショット %s の作成に失敗しました。" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." @@ -3427,7 +3744,7 @@ msgstr "シンプールの作成に失敗しました。エラーメッセージ #, python-format msgid "Failed to create volume %s" -msgstr "ボリューム %s の作成に失敗しました" +msgstr "ボリューム %s の作成に失敗しました。" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." @@ -3482,7 +3799,7 @@ msgstr "複製の削除に失敗しました。" #, python-format msgid "Failed to delete snapshot %s" -msgstr "スナップショット %s の削除に失敗しました" +msgstr "スナップショット %s の削除に失敗しました。" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." @@ -3531,19 +3848,19 @@ msgstr "" "スナップショットリソースエリアの確認に失敗しました。ID %s のボリュームを見つ" "けることができませんでした" -msgid "Failed to establish SSC connection." -msgstr "SSC 接続の確立に失敗しました。" +msgid "Failed to establish a stable connection" +msgstr "安定した接続の確立に失敗しました。" msgid "Failed to establish connection with Coho cluster" -msgstr "Coho クラスターとの接続に失敗しました" +msgstr "Coho クラスターとの接続に失敗しました。" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" -"CloudByte API [%(cmd)s] の実行に失敗しました。HTTP 状況: %(status)s、エラー: " -"%(error)s。" +"CloudByte API [%(cmd)s] の実行に失敗しました。HTTP ステータス: %(status)s、エ" +"ラー: %(error)s。" msgid "Failed to execute common command." msgstr "共通のコマンドの実行に失敗しました。" @@ -3557,7 +3874,7 @@ msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "ボリューム %(name)s の拡張に失敗しました。エラーメッセージ: %(msg)s。" msgid "Failed to find QoSnode" -msgstr "QoSNode が見つかりません" +msgstr "QoSNode が見つかりません。" msgid "Failed to find Storage Center" msgstr "ストレージセンターが見つかりませんでした" @@ -3566,7 +3883,15 @@ msgid "Failed to find a vdisk copy in the expected pool." msgstr "予期されるプールに vdisk コピーが見つかりませんでした。" msgid "Failed to find account for volume." -msgstr "ボリュームのアカウントが見つかりませんでした" +msgstr "ボリュームのアカウントが見つかりませんでした。" + +#, python-format +msgid "Failed to find available FC targets for %s." +msgstr "%s で利用可能な FCターゲットを見つけることに失敗しました。" + +#, python-format +msgid "Failed to find available iSCSI targets for %s." +msgstr "%s で利用可能な iSCSI ターゲットを見つけることに失敗しました。" #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." @@ -3576,7 +3901,7 @@ msgstr "" #, python-format msgid "Failed to find group snapshot named: %s" -msgstr "%s という名前のグループスナップショットが見つかりませんでした" +msgstr "%s という名前のグループスナップショットが見つかりませんでした。" #, python-format msgid "Failed to find host %s." @@ -3588,10 +3913,6 @@ msgstr "" "%(initiator)s を含む iSCSI イニシエーターグループを見つけることに失敗しまし" "た。" -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "ソースボリューム %s のストレージプールの検出に失敗しました。" - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "アカウント [%s] の CloudByte アカウント詳細を取得できませんでした。" @@ -3637,7 +3958,7 @@ msgstr "" #, python-format msgid "Failed to get all associations of qos specs %s" -msgstr "qos 仕様 %s のすべての関連付けは取得できませんでした" +msgstr "qos 仕様 %s のすべての関連付けは取得できませんでした。" msgid "Failed to get channel info." msgstr "チャンネル情報の取得に失敗しました。" @@ -3653,6 +3974,14 @@ msgstr "デバイス情報の取得に失敗しました。" msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "CPG (%s) がアレイ上に存在しないため、ドメインを取得できませんでした。" +#, python-format +msgid "Failed to get iSCSI target info for the LUN: %s" +msgstr "LUN の iSCSI ターゲット情報の取得に失敗しました: %s" + +#, python-format +msgid "Failed to get iSCSI target info for the LUN: %s." +msgstr "LUN の iSCSI ターゲット情報の取得に失敗しました: %s" + msgid "Failed to get image snapshots." msgstr "イメージ のスナップショットの獲得に失敗しました。" @@ -3678,7 +4007,7 @@ msgid "Failed to get migration task." msgstr "マイグレーションタスクの取得に失敗しました。" msgid "Failed to get model update from clone" -msgstr "複製からのモデル更新の取得に失敗しました" +msgstr "複製からのモデル更新の取得に失敗しました。" msgid "Failed to get name server info." msgstr "ネームサーバー情報の取得に失敗しました。" @@ -3817,27 +4146,6 @@ msgstr "" "変更されたサイズ %(size)s が浮動小数点数ではなかったため、既存ボリューム " "%(name)s の管理に失敗しました。" -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"選択されたボリューム種別のプールがボリューム参照で渡された NFS 共有と一致しな" -"いため、既存のボリュームの管理に失敗しました。" - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"選択されたボリューム種別のプールが、ボリューム参照で渡されたファイルシステム" -"と一致しないため、既存のボリュームの管理に失敗しました。" - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"選択されたボリューム種別のプールがホストのプールと一致しないため、既存のボ" -"リュームの管理に失敗しました。" - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -3905,6 +4213,17 @@ msgstr "" "論理デバイスをマッピングできませんでした。(LDEV: %(ldev)s、LUN: %(lun)s、ポー" "ト: %(port)s、ID: %(id)s)" +#, python-format +msgid "Failed to migrate volume %(src)s." +msgstr "ボリューム %(src)s のマイグレーションに失敗しました。" + +#, python-format +msgid "" +"Failed to migrate volume between source vol %(src)s and dest vol %(dst)s." +msgstr "" +"ソースボリューム %(src)s と宛先ボリューム %(dst)s の間のボリュームのマイグ" +"レーションが失敗しました。" + msgid "Failed to migrate volume for the first time." msgstr "ボリュームのマイグレーションに失敗しました (初回)。" @@ -3937,6 +4256,13 @@ msgstr "" " stdout: %(out)s\n" " stderr: %(err)s。" +msgid "" +"Failed to parse the configuration option 'glance_catalog_info', must be in " +"the form ::" +msgstr "" +"構成オプション 'glance_catalog_info' の解析に失敗しました。:" +": という形式でなければなりません。" + msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" @@ -3960,6 +4286,14 @@ msgstr "" "ゼロページレクラメーションを実行できませんでした。(LDEV: %(ldev)s、理由: " "%(reason)s)" +#, python-format +msgid "" +"Failed to recognize JSON payload:\n" +"[%s]" +msgstr "" +"JSON ペイロードの認識に失敗しました:\n" +"[%s]" + #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "ボリューム %(volume)s のエクスポートを削除できませんでした: %(reason)s" @@ -4006,6 +4340,10 @@ msgstr "" msgid "Failed to retrieve active zoning configuration %s" msgstr "アクティブなゾーニング構成 %s の取得に失敗しました" +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "ボリューム %(name)s の接続を取得できません。" + #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" @@ -4096,7 +4434,7 @@ msgstr "スナップショットの更新に失敗しました。" #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" -"ドライバーで指定されたモデル %(model)s によるモデルの更新に失敗しました" +"ドライバーで指定されたモデル %(model)s によるモデルの更新に失敗しました。" #, python-format msgid "" @@ -4106,6 +4444,10 @@ msgstr "" "指定の %(src_type)s %(src_id)s メタデータを使用してボリューム %(vol_id)s メタ" "データを更新することができませんでした" +msgid "Failover requested on non replicated backend." +msgstr "" +"複製されていないバックエンド上でフェイルオーバーがリクエストされました。" + #, python-format msgid "Failure creating volume %s." msgstr "ボリューム %s の作成に失敗しました。" @@ -4187,9 +4529,6 @@ msgstr "ホスト LUN ID 検索のエラー。" msgid "Find lun group from mapping view error." msgstr "マッピングビューからの LUN グループ検索のエラー。" -msgid "Find lun number error." -msgstr "LUN 番号検索のエラー。" - msgid "Find mapping view error." msgstr "マッピングビュー検索のエラー。" @@ -4400,6 +4739,10 @@ msgstr "Flexvisor ボリューム %(id)s はグループ %(vgid)s の結合に msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "フォルダー %s は Nexenta Store アプライアンスに存在しません" +#, python-format +msgid "GET method is not supported by resource: %s" +msgstr "GET メソッドはリソースではサポートされていません: %s" + #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS が実行されていません。状態: %s。" @@ -4483,6 +4826,12 @@ msgstr "LUN ID による LUN グループ ID 取得のエラー。" msgid "Get lungroup information error." msgstr "LUN グループの情報取得のエラー。" +msgid "Get manageable snapshots not implemented." +msgstr "管理可能スナップショットの取得は実装されていません。" + +msgid "Get manageable volumes not implemented." +msgstr "管理可能ボリュームの取得は実装されていません。" + msgid "Get migration task error." msgstr "マイグレーションタスク取得のエラー。" @@ -4568,17 +4917,120 @@ msgstr "Google Cloud Storage の oauth2 エラー: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "DRBDmanage (%s) から不正なパスの情報が提供されました" +msgstr "DRBDmanage (%s) から不正なパスの情報が提供されました。" + +#, python-format +msgid "Group %(group_id)s could not be found." +msgstr "グループ %(group_id)s が見つかりませんでした。" + +#, python-format +msgid "" +"Group %s still contains volumes. The delete-volumes flag is required to " +"delete it." +msgstr "" +"グループ %s にはまだボリュームがあります。これを削除するには delete-volumes " +"フラグが必要です。" + +#, python-format +msgid "" +"Group Type %(group_type_id)s deletion is not allowed with groups present " +"with the type." +msgstr "" +"グループ種別 %(group_type_id)s を持つグループでは、そのグループ種別は削除でき" +"ません。" + +#, python-format +msgid "Group Type %(group_type_id)s has no specs with key %(group_specs_key)s." +msgstr "" +"グループ種別 %(group_type_id)s にはキー %(group_specs_key)s を持つスペックは" +"ありません。" + +#, python-format +msgid "Group Type %(id)s already exists." +msgstr "グループ種別 %(id)s は既に存在します。" + +#, python-format +msgid "Group Type %(type_id)s has no extra spec with key %(id)s." +msgstr "" +"グループ種別 %(type_id)s には、キー %(id)s に関する追加の仕様がありません。" + +#, python-format +msgid "Group status must be available or error, but current status is: %s" +msgstr "" +"グループの状態は「使用可能」または「エラー」でなければなりませんが、現在の状" +"態は %s です。" + +#, python-format +msgid "Group status must be available, but current status is: %s." +msgstr "" +"グループの状態は「使用可能」でなければなりませんが、現在の状態は %s です。" + +#, python-format +msgid "Group type %(group_type_id)s could not be found." +msgstr "グループ種別 %(group_type_id)s が見つかりませんでした。" + +#, python-format +msgid "" +"Group type access for %(group_type_id)s / %(project_id)s combination already " +"exists." +msgstr "" +"%(group_type_id)s / %(project_id)s の組み合わせのグループ種別アクセスは既に存" +"在します。" + +#, python-format +msgid "" +"Group type access not found for %(group_type_id)s / %(project_id)s " +"combination." +msgstr "" +"%(group_type_id)s / %(project_id)s の組み合わせのグループ種別アクセスが見つか" +"りません。" + +#, python-format +msgid "Group type encryption for type %(type_id)s already exists." +msgstr "タイプ %(type_id)s のグループ種別暗号化は既に存在します。" + +#, python-format +msgid "Group type encryption for type %(type_id)s does not exist." +msgstr "タイプ %(type_id)s に対するグループ種別暗号化は存在しません。" + +msgid "Group type name can not be empty." +msgstr "グループ種別名を空にすることはできません" + +#, python-format +msgid "Group type with name %(group_type_name)s could not be found." +msgstr "名前 %(group_type_name)s を持つグループ種別が見つかりませんでした。" + +#, python-format +msgid "" +"Group volume type mapping for %(group_id)s / %(volume_type_id)s combination " +"already exists." +msgstr "" +" %(group_id)s / %(volume_type_id)s の組み合わせのグループボリューム種別のマッ" +"ピングはすでに存在します。" + +#, python-format +msgid "GroupSnapshot %(group_snapshot_id)s could not be found." +msgstr "" +"グループスナップショット %(group_snapshot_id)s は見つかりませんでした。" msgid "HBSD error occurs." msgstr "HBSD エラーが発生しました。" -msgid "HNAS has disconnected SSC" -msgstr "HNAS に接続解除された SSC があります" - msgid "HPELeftHand url not found" msgstr "HPELeftHand url が見つかりません" +#, python-format +msgid "HTTP code: %(status_code)s, %(reason)s [%(error_msg)s]" +msgstr "HTTP コード: %(status_code)s, %(reason)s [%(error_msg)s]" + +#, python-format +msgid "HTTP code: %(status_code)s, response: %(reason)s [%(error_msg)s]" +msgstr "HTTP コード: %(status_code)s 、レスポンス: %(reason)s [%(error_msg)s]" + +#, python-format +msgid "HTTP exit code: [%(code)s]" +msgstr "HTTP 終了コード : [%(code)s]" + #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " @@ -4592,6 +5044,9 @@ msgstr "" msgid "Have not created %(tier_levels)s tier(s)." msgstr "%(tier_levels)s 層が作成されていません。" +msgid "Heartbeat" +msgstr "ハートビート" + #, python-format msgid "Hint \"%s\" not supported." msgstr "ヒント「%s」はサポートされていません。" @@ -4615,14 +5070,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "ホスト %s に FC イニシエーターがありません" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "ホスト %s に iSCSI イニシエーターがありません" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "ホスト '%s' が見つかりませんでした。" - #, python-format msgid "Host group with name %s not found" msgstr "名前が %s のホストグループが見つかりません" @@ -4637,8 +5084,8 @@ msgstr "ホストは固定化されていません。" msgid "Host is already Frozen." msgstr "ホストは既に固定化されています。" -msgid "Host not found" -msgstr "ホストが見つかりません" +msgid "Host must be specified in query parameters" +msgstr "クエリーパラメーターではホストの指定が必要です。" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." @@ -4655,10 +5102,17 @@ msgstr "" msgid "Host type %s not supported." msgstr "ホストタイプ %s はサポートされていません。" +#, python-format +msgid "Host with name: %s not found" +msgstr "名前 %s を持つホスト: 見つかりませんでした。" + #, python-format msgid "Host with ports %(ports)s not found." msgstr "ポート %(ports)s が設定されたホストが見つかりません。" +msgid "Hosts" +msgstr "ホスト" + msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro とレプリケーションは、同一の volume_type で使用できません。" @@ -4671,9 +5125,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Blockbridge API の IP アドレスとホスト名。" - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4684,6 +5135,12 @@ msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "nofmtdisk が True に設定される場合、rsize も -1 に設定しなければなりません。" +msgid "" +"If you want to create a thin provisioning volume, this param must be True." +msgstr "" +"シンプロビジョニングボリュームを作成したい場合は、このパラメーターは True で" +"なければいけません。" + #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " @@ -4729,6 +5186,9 @@ msgstr "イメージ %(image_id)s は受け入れられません: %(reason)s" msgid "Image location not present." msgstr "イメージロケーションが存在しません。" +msgid "Image quota exceeded" +msgstr "イメージのクォータを超えました。" + #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " @@ -4754,8 +5214,16 @@ msgstr "" "レコードのインポートに失敗しました。インポートを実行するバックアップサービス" "が見つかりません。要求サービス %(service)s" +#, python-format +msgid "" +"Incorrect port number. Load balanced port is: %(lb_api_port)s, api service " +"port is: %(apisvc_port)s" +msgstr "" +"正しくないポート番号です。ロードバランス用ポート: %(lb_api_port)s 、APIサービ" +"ス用ポート: %(apisvc_port)s" + msgid "Incorrect request body format" -msgstr "要求本体の形式が正しくありません" +msgstr "要求本体の形式が正しくありません。" msgid "Incorrect request body format." msgstr "要求本体の形式が正しくありません。" @@ -4771,11 +5239,8 @@ msgstr "" "Infortrend CLI の例外: %(err)s。パラメーター: %(param)s (戻りコード: %(rc)s) " "(出力: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "当初の層 {} のポリシー {} が無効です。" - -msgid "Input type {} is not supported." -msgstr "入力タイプ {} はサポートされません。" +msgid "Initiators of host cannot be empty." +msgstr "ホストのイニシエーターは空にできません。" msgid "Input volumes or snapshots are invalid." msgstr "入力ボリュームまたはスナップショットが無効です。" @@ -4793,13 +5258,6 @@ msgstr "ボリュームを拡張するために十分な空きスペースがあ msgid "Insufficient privileges" msgstr "不十分な権限" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "ceph クラスターへの接続と接続の間の間隔の値 (秒)。" - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "無効な %(protocol)s ポート %(port)s が io_port_list に指定されました。" - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "無効な 3PAR ドメイン: %(err)s" @@ -4808,7 +5266,7 @@ msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "無効な ALUA 値。ALUA 値は、1 または 0 でなければなりません。" msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "バックアップ rbd 操作に指定された Ceph 引数が無効です" +msgstr "バックアップ RBD 操作に指定された Ceph 引数が無効です。" #, python-format msgid "Invalid CgSnapshot: %(reason)s" @@ -4818,9 +5276,37 @@ msgstr "無効な CgSnapshot: %(reason)s" msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "無効な ConsistencyGroup: %(reason)s" +#, python-format +msgid "" +"Invalid ConsistencyGroup: Cannot delete consistency group %(id)s. " +"%(reason)s, and it cannot be the source for an ongoing CG or CG Snapshot " +"creation." +msgstr "" +"無効な整合性グループ : 整合性グループ %(id)s を削除できません。理由 : " +"%(reason)s 。 これを進行中の CG または CG スナップショットのソースとすること" +"はできません。" + +#, python-format +msgid "" +"Invalid ConsistencyGroup: Cannot update consistency group %s, status must be " +"available, and it cannot be the source for an ongoing CG or CG Snapshot " +"creation." +msgstr "" +"無効な整合性グループ : 整合性グループ %s を更新できません。状態は「使用可能」" +"である必要があります。 これを進行中の CG または CG スナップショットのソースと" +"することはできません。" + msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" -"無効な ConsistencyGroup: 整合性グループを作成するためのホストがありません。" +"無効な整合性グループ : 整合性グループを作成するためのホストがありません。" + +#, python-format +msgid "Invalid Group: %(reason)s" +msgstr "無効なグループ: %(reason)s" + +#, python-format +msgid "Invalid GroupSnapshot: %(reason)s" +msgstr "無効なグループスナップショット: %(reason)s" #, python-format msgid "" @@ -4844,10 +5330,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "無効なレプリケーションターゲット: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "無効な VNX 認証タイプ: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4867,14 +5349,14 @@ msgstr "" "以下のプロジェクトのクォータに定義した割り当て済みのクォータが無効です: %s" msgid "Invalid argument" -msgstr "引数が無効です" +msgstr "引数が無効です。" msgid "Invalid argument - negative seek offset." msgstr "引数が無効です。シークオフセットが負の値です。" #, python-format msgid "Invalid argument - whence=%s not supported" -msgstr "引数が無効です: whence=%s はサポートされていません" +msgstr "引数が無効です: whence=%s はサポートされていません。" #, python-format msgid "Invalid argument - whence=%s not supported." @@ -4884,6 +5366,10 @@ msgstr "引数が無効です。whence=%s はサポートされません。" msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "接続モード '%(mode)s' はボリューム %(volume_id)s には無効です。" +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "ボリューム %(name)s の接続が無効です : %(reason)s" + #, python-format msgid "Invalid auth key: %(reason)s" msgstr "認証キーが無効です: %(reason)s" @@ -4893,12 +5379,10 @@ msgid "Invalid backup: %(reason)s" msgstr "無効なバックアップ: %(reason)s" #, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" +msgid "Invalid body provided for creating volume. Request API version: %s." msgstr "" -"Barbican API の URL が無効です: バージョンが必要です ('http[s]://|" -"[:port]/' など)。指定された URL は %s です。" +"ボリューム作成に、無効な本文が指定されました。リクエストされた API のバージョ" +"ン: %s" msgid "Invalid chap user details found in CloudByte storage." msgstr "CloudByte のストレージで無効な chap ユーザーの詳細が見つかりました。" @@ -4939,6 +5423,22 @@ msgstr "ディスクタイプが無効です: %(disk_type)s。" msgid "Invalid disk type: %s." msgstr "ディスクタイプが無効です: %s。" +#, python-format +msgid "" +"Invalid disk-format '%(disk_format)s' is specified. Allowed disk-formats are " +"%(allowed_disk_formats)s." +msgstr "" +"無効なディスクフォーマット '%(disk_format)s' が指定されました。許可される" +"ディスクフォーマットは %(allowed_disk_formats)s です。" + +#, python-format +msgid "Invalid filter keys: %s" +msgstr "無効なフィルターキー : %s" + +#, python-format +msgid "Invalid group type: %(reason)s" +msgstr "無効なグループ種別: %(reason)s" + #, python-format msgid "Invalid host: %(reason)s" msgstr "無効なホスト: %(reason)s" @@ -5054,9 +5554,17 @@ msgstr "シープドッグクラスターの状態が無効です。" msgid "Invalid snapshot: %(reason)s" msgstr "無効なスナップショット: %(reason)s" +#, python-format +msgid "Invalid sort dirs passed: %s" +msgstr "無効なソート方向が渡されました : %s" + +#, python-format +msgid "Invalid sort keys passed: %s" +msgstr "無効なソートキーが渡されました : %s" + #, python-format msgid "Invalid status: '%s'" -msgstr "無効な状況: '%s'" +msgstr "無効な状態: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." @@ -5069,10 +5577,6 @@ msgstr "無効なストレージプール %s が指定されました。" msgid "Invalid storage pool is configured." msgstr "無効なストレージプールが設定されています。" -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "無効な同期モードが指定されました。許可されるモードは %s です。" - msgid "Invalid transport type." msgstr "無効なトランスポートタイプ。" @@ -5081,12 +5585,8 @@ msgid "Invalid update setting: '%s'" msgstr "無効な更新設定: '%s'" #, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"URL が無効です: 'http[s]://|[:port]/' の形式である必要" -"があります。指定された URL は %s です" +msgid "Invalid value '%s' for delete-volumes flag." +msgstr "delete-volumes フラッグの値 '%s' が無効です。" #, python-format msgid "Invalid value '%s' for force." @@ -5233,9 +5733,6 @@ msgstr "" "レプリケーションが適切に設定されていないため、fail-over の発行が失敗しまし" "た。" -msgid "Item not found" -msgstr "項目が見つかりません" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" @@ -5248,6 +5745,14 @@ msgstr "" "CloudByte のボリューム [%s] 削除に関するレスポンスにジョブ ID が見つかりませ" "ん。" +#, python-format +msgid "Kaminario retryable exception: %(reason)s" +msgstr "再試行可能な Kaminario 例外: %(reason)s" + +#, python-format +msgid "KaminarioCinderDriver failure: %(reason)s" +msgstr "Kaminario Cinder ドライバー障害です: %(reason)s" + msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." @@ -5268,22 +5773,23 @@ msgstr "" msgid "LU does not exist for volume: %s" msgstr "ボリューム: %s 用の LU は存在しません" +msgid "LUN doesn't exist." +msgstr "LUN が存在しません。" + msgid "LUN export failed!" msgstr "LUN のエクスポートが失敗しました。" -msgid "LUN id({}) is not valid." -msgstr "LUN id({}) は無効です。" - msgid "LUN map overflow on every channel." msgstr "すべてのチャンネルでの LUN マップのオーバーフロー。" +#, python-format +msgid "LUN not found by UUID: %(uuid)s." +msgstr "UUID %(uuid)s の LUN が見つかりません。" + #, python-format msgid "LUN not found with given ref %s." msgstr "指定された参照 %s を持つ LUN が見つかりません。" -msgid "LUN number ({}) is not an integer." -msgstr "LUN 番号 ({}) が整数ではありません。" - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 番号がチャンネル ID: %(ch_id)s の境界を越えています。" @@ -5313,6 +5819,10 @@ msgstr "" msgid "Lock acquisition failed." msgstr "ロックの取得に失敗しました。" +#, python-format +msgid "Login failure code: %(statuscode)s Error: %(responsetext)s" +msgstr "ログイン失敗 コード: %(statuscode)s エラー: %(responsetext)s" + msgid "Logout session error." msgstr "ログアウトセッションのエラー。" @@ -5424,6 +5934,9 @@ msgstr "" "FAST が有効化されている場合、ボリュームの管理はサポートされません。FAST ポリ" "シー: %(fastPolicyName)s。" +msgid "Manage volume type invalid." +msgstr "管理対象のボリューム種別が不正です。" + msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "フェイルオーバーされたボリュームへのスナップショットを管理対象にすることは許" @@ -5446,13 +5959,16 @@ msgstr "" msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "マスキングビュー %(maskingViewName)s は正常に削除されませんでした" +msgid "Maximum age is count of days since epoch." +msgstr "最長存続時間は、エポック以降の日数です。" + #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "バックアップの許容最大数 (%(allowed)d) を超えました" +msgstr "バックアップの許容最大数 (%(allowed)d) を超えました。" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "スナップショットの許容最大数 (%(allowed)d) を超えました" +msgstr "スナップショットの許容最大数 (%(allowed)d) を超えました。" #, python-format msgid "" @@ -5466,38 +5982,27 @@ msgstr "" msgid "May specify only one of %s" msgstr "指定できる %s は 1 つのみです" +#, python-format +msgid "Message %(message_id)s could not be found." +msgstr "メッセージ %(message_id)s が見つかりませんでした。" + msgid "Metadata backup already exists for this volume" -msgstr "このボリュームのメタデータバックアップは既に存在します" +msgstr "このボリュームのメタデータバックアップは既に存在します。" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "メタデータのバックアップオブジェクト '%s' は既に存在します" -msgid "Metadata item was not found" -msgstr "メタデータ項目が見つかりませんでした" - -msgid "Metadata item was not found." -msgstr "メタデータ項目が見つかりませんでした。" +#, python-format +msgid "Metadata property key %s greater than 255 characters." +msgstr "メタデータプロパティーのキー %s の文字数が255文字を超えています。" #, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "メタデータのプロパティーキー %s が 255 文字を超えています" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "メタデータのプロパティーキー %s 値が 255 文字を超えています" - -msgid "Metadata property key blank" -msgstr "メタデータプロパティーキーがブランクです" +msgid "Metadata property key %s value greater than 255 characters." +msgstr "メタデータプロパティーのキー %s の値の文字数が255文字を超えています。" msgid "Metadata property key blank." -msgstr "メタデータプロパティーのキーがブランクです。" - -msgid "Metadata property key greater than 255 characters." -msgstr "メタデータプロパティーのキーが 255 文字を超えています。" - -msgid "Metadata property value greater than 255 characters." -msgstr "メタデータプロパティー値が 255 文字を超えています。" +msgstr "メタデータプロパティーのキーが空です。" msgid "Metadata restore failed due to incompatible version" msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました" @@ -5506,21 +6011,8 @@ msgid "Metadata restore failed due to incompatible version." msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました。" #, python-format -msgid "Migrate volume %(src)s failed." -msgstr "ボリューム %(src)s のマイグレーションが失敗しました。" - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"ソースボリューム %(src)s と宛先ボリューム %(dst)s の間のボリュームのマイグ" -"レーションが失敗しました。" - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "LUN %s のマイグレーションが停止したか、障害が発生しました。" - -msgid "MirrorView/S enabler is not installed." -msgstr "MirrorView/S イネーブラーがインストールされていません。" +msgid "Method %(method)s is not defined" +msgstr "メソッド %(method)s は定義されていません。" msgid "" "Missing 'purestorage' python module, ensure the library is installed and " @@ -5534,14 +6026,18 @@ msgstr "" "Fibre Channel の SAN 構成パラメーターの fc_fabric_names が欠落しています" msgid "Missing request body" -msgstr "要求本体がありません" +msgstr "要求本体がありません。" msgid "Missing request body." msgstr "要求本体がありません。" +#, python-format +msgid "Missing required element '%(element)s' in request body." +msgstr "リクエストの本文に必要な要素 '%(element)s' がありません。" + #, python-format msgid "Missing required element '%s' in request body" -msgstr "要求本体に必須要素 '%s' がありません" +msgstr "要求本体に必須要素 '%s' がありません。" #, python-format msgid "Missing required element '%s' in request body." @@ -5550,8 +6046,8 @@ msgstr "リクエストの本文に必要な要素 '%s' がありません。" msgid "Missing required element 'consistencygroup' in request body." msgstr "リクエストの本文に必要な要素の 'consistencygroup' がありません。" -msgid "Missing required element 'host' in request body." -msgstr "リクエストの本文に必要な要素 'host' がありません。" +msgid "Missing required element 'delete' in request body." +msgstr "リクエストの本文に必要な要素の 'delete' がありません。" msgid "Missing required element quota_class_set in request body." msgstr "要求本体に必須要素 quota_class_set がありません。" @@ -5617,10 +6113,10 @@ msgid "Must specify 'host'." msgstr "'host' を指定する必要があります。" msgid "Must specify 'new_volume'" -msgstr "'new_volume' を指定する必要があります" +msgstr "'new_volume' を指定する必要があります。" msgid "Must specify 'status'" -msgstr "'status' を指定する必要があります" +msgstr "'status' を指定する必要があります。" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." @@ -5629,10 +6125,10 @@ msgstr "" "必要があります。" msgid "Must specify a valid attach status" -msgstr "有効な接続状況を指定してください" +msgstr "有効な接続状態を指定してください。" msgid "Must specify a valid migration status" -msgstr "有効なマイグレーション状況を指定してください" +msgstr "有効なマイグレーション状態を指定してください。" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." @@ -5648,7 +6144,7 @@ msgstr "" "'%(prov)s' は無効です。" msgid "Must specify a valid status" -msgstr "有効な状況を指定してください" +msgstr "有効な状態を指定してください。" msgid "Must specify an ExtensionManager class" msgstr "ExtensionManager クラスを指定する必要があります" @@ -5656,6 +6152,13 @@ msgstr "ExtensionManager クラスを指定する必要があります" msgid "Must specify bootable in request." msgstr "要求にブート可能を指定する必要があります。" +msgid "" +"Must specify one or more of the following keys to update: name, description, " +"add_volumes, remove_volumes." +msgstr "" +"更新を行うには、次のキーを一つ以上指定する必要があります : 名前、説明、 " +"add_volumes 、 remove_volumes" + msgid "Must specify protection domain name or protection domain id." msgstr "保護ドメインの名前か ID を指定しなければなりません。" @@ -5676,9 +6179,6 @@ msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "ストレージプールを指定しなければなりません。オプション: sio_storage_pools。" -msgid "Must supply a positive value for age" -msgstr "年齢には正の値を提供する必要があります" - msgid "Must supply a positive, non-zero value for age" msgstr "存続期間には正の非ゼロ値を指定してください" @@ -5687,11 +6187,11 @@ msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "NAS 構成「%(name)s=%(value)s」は無効です。「auto」、「true」、「false」のいず" -"れかでなければなりません" +"れかでなければなりません。" #, python-format msgid "NFS config file at %(config)s doesn't exist" -msgstr "NFS 構成ファイルが %(config)s に存在しません" +msgstr "NFS 構成ファイルが %(config)s に存在しません。" #, python-format msgid "NFS file %s not discovered." @@ -5700,6 +6200,9 @@ msgstr "NFS ファイル %s は検出されていません。" msgid "NFS file could not be discovered." msgstr "NFS ファイルを検出できませんでした。" +msgid "NULL host not allowed for volume backend lookup." +msgstr "NULL ホストはボリューム・バックエンド検索では許可されません。" + msgid "NaElement name cannot be null." msgstr "NaElement 名は NULL にできません。" @@ -5757,6 +6260,9 @@ msgstr "ホストには FC イニシエーターを追加できません。" msgid "No FC port connected to fabric." msgstr "ファブリックに接続された FC ポートはありません。" +msgid "No FC targets found" +msgstr "FC ターゲットが見つかりません。" + msgid "No FCP targets found" msgstr "FCP ターゲットが見つかりません" @@ -5781,7 +6287,7 @@ msgid "No backups available to do an incremental backup." msgstr "増分バックアップを実行するために使用可能なバックアップがありません。" msgid "No big enough free disk" -msgstr "十分な大きさの空きディスクがありません" +msgstr "十分な大きさの空きディスクがありません。" #, python-format msgid "No cgsnapshot with id %s" @@ -5790,6 +6296,9 @@ msgstr "ID %s の cgsnapshot は存在しません" msgid "No cinder entries in syslog!" msgstr "cinder 項目が syslog にありません。" +msgid "No clients in vdev information." +msgstr "vdev 情報にクライアント情報がありません。" + #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "複製された %s という名前の LUN はファイラーで見つかりません" @@ -5801,6 +6310,9 @@ msgstr "設定ノードが見つかりません。" msgid "No consistency group with id %s" msgstr "ID %s の整合性グループは存在しません" +msgid "No data information in return info." +msgstr "返された情報の中に、データ情報がありません。" + #, python-format msgid "No element by given name %s." msgstr "指定された名前 %s の要素はありません。" @@ -5808,10 +6320,19 @@ msgstr "指定された名前 %s の要素はありません。" msgid "No errors in logfiles!" msgstr "ログファイル内にエラーはありません。" +msgid "No fcdevices in given data." +msgstr "与えられたデータに FC デバイス情報がありません。" + +msgid "No fcdevices information in given data." +msgstr "与えられたデータに fc デバイス情報がありません。" + #, python-format msgid "No file found with %s as backing file." msgstr "バッキングファイルとして %s を持つファイルが見つかりません。" +msgid "No free FC initiator can be assigned to host." +msgstr "ホストに割り当て可能な、空いている FC イニシエーターがありません。" + #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " @@ -5821,7 +6342,7 @@ msgstr "" "数を超過しています。" msgid "No free disk" -msgstr "空きディスクはありません" +msgstr "空きディスクはありません。" #, python-format msgid "No good iscsi portal found in supplied list for %s." @@ -5831,6 +6352,10 @@ msgstr "正しい iscsi ポータルが %s の指定されたリストに見つ msgid "No good iscsi portals found for %s." msgstr "%s の正しい iscsi ポータルが見つかりません。" +#, python-format +msgid "No group with id %s" +msgstr "ID %s のグループは存在しません。" + #, python-format msgid "No host to create consistency group %s." msgstr "整合性グループ %s を作成するためのホストがありません。" @@ -5846,7 +6371,7 @@ msgstr "ファブリックに接続されたイニシエーターはありませ #, python-format msgid "No initiator group found for initiator %s" -msgstr "イニシエーター %s のイニシエーターグループが見つかりません" +msgstr "イニシエーター %s のイニシエーターグループが見つかりません。" msgid "No initiators found, cannot proceed" msgstr "イニシエーターが見つからないため、続行できません" @@ -5855,6 +6380,9 @@ msgstr "イニシエーターが見つからないため、続行できません msgid "No interface found on cluster for ip %s" msgstr "ip %s のクラスター上にインターフェースが見つかりませんでした" +msgid "No iocluster information in given data." +msgstr "与えられたデータに iocluster 情報がありません。" + msgid "No ip address found." msgstr "IP アドレスが見つかりません。" @@ -5871,6 +6399,15 @@ msgstr "CloudByte ボリューム [%s] の iscsi サービスが見つかりま msgid "No iscsi services found in CloudByte storage." msgstr "iscsi サービスが CloudByte ストレージに見つかりません。" +msgid "No iscsidevices information in given data." +msgstr "与えられたデータに iSCSI デバイス情報がありません。" + +msgid "No iscsitargets for target." +msgstr "ターゲット用の iSCSI ターゲットがありません、" + +msgid "No iscsitargets in return info." +msgstr "返された情報の中に、iSCSI ターゲット情報がありません。" + #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" @@ -5942,6 +6479,13 @@ msgstr "" "整合性グループ %s を作成するためのソーススナップショットが提供されていませ" "ん。" +msgid "" +"No storage could be allocated for this volume request. You may be able to " +"try another size or volume type." +msgstr "" +"このボリュームのリクエストに対して、ストレージを割り当てられませんでした。サ" +"イズやボリューム種別を変更して試すことができるかもしれません。" + #, python-format msgid "No storage path found for export path %s" msgstr "エクスポートパス %s 用のストレージパスが見つかりません" @@ -5951,7 +6495,7 @@ msgid "No such QoS spec %(specs_id)s." msgstr "そのような QoS 仕様 %(specs_id)s は存在しません。" msgid "No suitable discovery ip found" -msgstr "適切なディスカバリー ip が見つかりません" +msgstr "適切なディスカバリー ip が見つかりません。" #, python-format msgid "No support to restore backup version %s" @@ -5961,6 +6505,12 @@ msgstr "バックアップバージョン %s をリストアすることがで msgid "No target id found for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s のターゲット ID が見つかりません。" +msgid "No target in given data." +msgstr "与えられたデータにターゲット情報がありません。" + +msgid "No target information in given data." +msgstr "与えられたデータに ターゲット情報がありません。" + msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." @@ -5976,6 +6526,18 @@ msgstr "有効なホストが見つかりませんでした。%(reason)s" msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "タイプ %(type)s のボリューム %(id)s に対して有効なホストがありません" +msgid "No valid ports." +msgstr "有効なポートがありません。" + +msgid "No vdev information in given data" +msgstr "与えられたデータに vdev 情報がありません。" + +msgid "No vdev information in given data." +msgstr "与えられたデータに vdev 情報がありません。" + +msgid "No vdev sizemb in given data." +msgstr "与えられたデータに vdev sizemb 情報がありません。" + #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "参照 %s によって指定された UID を持つ vdisk がありません。" @@ -6003,9 +6565,19 @@ msgstr "テストレプリカの作成時に volume_type を指定してはな msgid "No volumes found in CloudByte storage." msgstr "ボリュームが CloudByte ストレージに見つかりません。" +#, python-format +msgid "No volumes or consistency groups exist in cluster %(current)s." +msgstr "クラスター %(current)s にはボリュームも整合性グループも存在しません。" + msgid "No weighed hosts available" msgstr "重み付けを設定したホストが存在しません" +msgid "None numeric BWS QoS limitation" +msgstr "BWS QoS 制限の数値がありません。" + +msgid "None numeric IOPS QoS limitation" +msgstr "IOPS QoS 制限の数値がありません。" + #, python-format msgid "Not a valid string: %s" msgstr "有効な文字列ではありません: %s" @@ -6018,7 +6590,7 @@ msgid "Not able to find a suitable datastore for the volume: %s." msgstr "ボリューム %s に適したデータストアが見つかりません。" msgid "Not an rbd snapshot" -msgstr "rbd スナップショットではありません" +msgstr "rbd スナップショットではありません。" #, python-format msgid "Not authorized for image %(image_id)s." @@ -6036,10 +6608,11 @@ msgstr "" "この操作を実行するために十分なストレージスペースが ZFS 共有にありません。" msgid "Not stored in rbd" -msgstr "rbd 内に保管されていません" +msgstr "rbd 内に保管されていません。" msgid "Nova returned \"error\" status while creating snapshot." -msgstr "スナップショットの作成時に Nova から「エラー」状況が返されました。" +msgstr "" +"スナップショットの作成時に Nova から「エラー」ステータスが返されました。" msgid "Null response received from CloudByte's list filesystem." msgstr "" @@ -6076,9 +6649,6 @@ msgstr "" "CloudByte のストレージで [%(operation)s] に 関するジョブ [%(job)s] の検索中" "に Null のレスポンスを受信しました。" -msgid "Number of retries if connection to ceph cluster failed." -msgstr "ceph クラスターへの接続が失敗した場合の再接続の回数。" - msgid "Object Count" msgstr "オブジェクト数" @@ -6123,7 +6693,7 @@ msgstr "非管理に設定できるのは、OpenStack が管理するボリュ #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "状況=%(status)s で操作が失敗しました。フルダンプ: %(data)s" +msgstr "状態=%(status)s で操作が失敗しました。フルダンプ: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." @@ -6138,18 +6708,11 @@ msgstr "gpfs_images_share_mode オプションが正しく設定されていま msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base オプションが正しく設定されていません。" -msgid "Option map (cls._map) is not defined." -msgstr "オプションマップ (cls._map) が定義されていません。" - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" "作成元の %(res)s %(prop)s は '%(vals)s' 値のいずれかでなければなりません。" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "" -"Blockbridge API サーバーにアクセスするために HTTPS ポートを上書きする。" - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -6177,6 +6740,14 @@ msgstr "事前に %(pool_list)s プールを作成してください。" msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "事前にプール %(pool)s に %(tier_levels)s 層を作成してください。" +#, python-format +msgid "Please provide at least one volume for parameter %s" +msgstr "パラメーター %s には、少なくとも1つのボリューム を指定してください。" + +#, python-format +msgid "Please provide valid format volume: lun for parameter %s" +msgstr "パラメーター %s に、正しい形式で ボリューム: lun を指定してください。" + msgid "Please re-run cinder-manage as root." msgstr "cinder-manage を root として再実行してください。" @@ -6203,6 +6774,9 @@ msgstr "ボリュームのプール ['host'] %(host)s が見つかりません msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "ボリュームのプール ['host'] が以下のため失敗しました: %(ex)s。" +msgid "Pool is not available in the cinder configuration fields." +msgstr "プールが cinder 設定フィールドにありません。" + msgid "Pool is not available in the volume host field." msgstr "プールがボリュームホストフィールドにありません。" @@ -6242,15 +6816,26 @@ msgstr "プール名が設定されていません。" msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "プライマリーコピーの状態: %(status)s および同期済み: %(sync)s。" +#, python-format +msgid "Programming error in Cinder: %(reason)s" +msgstr "Cinder でのプログラミングエラー : %(reason)s" + msgid "Project ID" msgstr "プロジェクト ID" +msgid "Project name not specified" +msgstr "プロジェクト名が指定されていません。" + #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "ネストされたクォータに対してプロジェクトのクォータが適切に設定されていませ" "ん: %(reason)s" +#, python-format +msgid "Project: %s not found" +msgstr "プロジェクト %s: 見つかりませんでした。" + msgid "Protection Group not ready." msgstr "保護グループの準備ができていません。" @@ -6285,6 +6870,9 @@ msgstr "" msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder ドライバー障害です: %(reason)s" +msgid "Purge command failed, check cinder-manage logs for more details." +msgstr "Purge コマンドが失敗しました。詳細はログを確認して下さい。" + #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS 仕様 %(specs_id)s は既に存在します。" @@ -6332,6 +6920,10 @@ msgstr "" msgid "Qos specs still in use." msgstr "Qos 仕様はまだ使用中です。" +#, python-format +msgid "Query Dsware version failed! Retcode is %s." +msgstr "Dsware でバージョンのクエリーに失敗しました。 Retcode は %s です。" + msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." @@ -6351,7 +6943,7 @@ msgid "Quota class %(class_name)s could not be found." msgstr "クォータクラス %(class_name)s が見つかりませんでした。" msgid "Quota could not be found" -msgstr "クォータが見つかりませんでした" +msgstr "クォータが見つかりませんでした。" #, python-format msgid "Quota exceeded for resources: %(overs)s" @@ -6389,6 +6981,9 @@ msgstr "RBD diff 操作が失敗しました: (ret=%(ret)s stderr=%(stderr)s)" msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" +msgid "REST Async Error: Command not accepted." +msgstr "REST 非同期エラー: コマンドが許可されていません。" + msgid "REST server IP must by specified." msgstr "REST サーバーの IP を指定しなければなりません。" @@ -6402,19 +6997,11 @@ msgid "RPC Version" msgstr "RPC のバージョン" msgid "RPC server response is incomplete" -msgstr "RPC サーバーの応答が完了していません" +msgstr "RPC サーバーの応答が完了していません。" msgid "Raid did not have MCS Channel." msgstr "RAID に MCS チャンネルがありません。" -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"構成オプション max_luns_per_storage_group によって設定された制限に達しまし" -"た。%(vol)s をストレージグループ %(sg)s に追加する操作は拒否されます。" - #, python-format msgid "Received error string: %s" msgstr "エラー文字列を受信しました: %s" @@ -6528,6 +7115,12 @@ msgstr "複製サービス機能が %(storageSystemName)s に見つかりませ msgid "Replication Service not found on %(storageSystemName)s." msgstr "複製サービスが %(storageSystemName)s に見つかりません。" +msgid "" +"Replication is configured, but no MirrorView/S enabler installed on VNX." +msgstr "" +"レプリケーションが設定されていますが、 VNX にMirrorView/S イネーブラーがイン" +"ストールされていません。" + msgid "Replication is not enabled" msgstr "複製が有効になっていません" @@ -6537,21 +7130,35 @@ msgstr "ボリュームの複製が有効になっていません" msgid "Replication not allowed yet." msgstr "まだレプリケーションを行うことはできません。" +msgid "" +"Replication setup failure: replication has been enabled but no replication " +"target has been specified for this backend." +msgstr "" +"レプリケーションセットアップ失敗: レプリケーションが有効になっていますが、レ" +"プリケーションターゲットがこのバックエンド用に指定されていません。" + +msgid "" +"Replication setup failure: replication:livevolume has been enabled but more " +"than one replication target has been specified for this backend." +msgstr "" +"レプリケーションセットアップ失敗: ライブボリュームが有効になっていますが、1つ" +"以上のレプリケーションターゲットがこのバックエンド用に指定されています。" + #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" -"ボリュームの複製状況は「アクティブ」または「アクティブ - 停止」でなければなり" -"ませんが、現在の状況は %s です" +"ボリュームの複製の状態は「アクティブ」または「アクティブ - 停止」でなければな" +"りませんが、現在の状態は %s です。" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" -"ボリュームの複製状況は「非アクティブ」、「アクティブ - 停止」、または「エ" -"ラー」でなければなりませんが、現在の状況は %s です" +"ボリュームの複製の状態は「非アクティブ」、「アクティブ - 停止」、または「エ" +"ラー」でなければなりませんが、現在の状態は %s です。" msgid "Request body and URI mismatch" msgstr "要求本体と URI の不一致" @@ -6568,7 +7175,7 @@ msgstr "要求本体が空です" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" -"Datera クラスターに対する要求から、正しくない状況が返されました: %(status)s " +"Datera クラスターに対する要求から、正しくない状態が返されました: %(status)s " "| %(reason)s" #, python-format @@ -6579,6 +7186,9 @@ msgstr "" "要求されたバックアップが許容バックアップクォータ (ギガバイト) を超えていま" "す。要求量 %(requested)s G、クォータ %(quota)s G、消費量 %(consumed)s. G。" +msgid "Requested resource is currently unavailable" +msgstr "要求されたリソースは現在利用できません。" + #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " @@ -6603,18 +7213,15 @@ msgstr "必要な構成が見つかりません" msgid "Required flag %s is not set" msgstr "必須フラグ %s が設定されていません" -msgid "Requires an NaServer instance." -msgstr "NaServer インスタンスが必要です。" - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" -"バックアップ状況のリセットが中止しました。現在構成されているバックアップサー" -"ビス [%(configured_service)s] は、このバックアップの作成に使用されたバック" -"アップサービス [%(backup_service)s] ではありません。" +"バックアップの状態のリセットを中止しました。現在構成されているバックアップ" +"サービス [%(configured_service)s] は、このバックアップの作成に使用されたバッ" +"クアップサービス [%(backup_service)s] ではありません。" #, python-format msgid "Resizing clone %s failed." @@ -6648,7 +7255,7 @@ msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" -"バックアップのリストアが中止しました。予期していたボリューム状況は " +"バックアップのリストアが中止しました。予期していたボリュームの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format @@ -6666,7 +7273,7 @@ msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" -"バックアップのリストアが中止しました。予期していたバックアップ状況は " +"バックアップのリストアが中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format @@ -6683,12 +7290,18 @@ msgid "" "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "指定された Cinder ボリュームについて異なる量の SolidFire ボリュームを検出しま" -"した。%(ret)s を検出しましたが、%(des)s を期待していました" +"した。%(ret)s を検出しましたが、%(des)s を期待していました。" #, python-format msgid "Retry count exceeded for command: %s" msgstr "コマンドの再試行回数を超過しました: %s" +msgid "Retryable Dell Exception encountered" +msgstr "再試行可能な Dell 例外が発生しました" + +msgid "Retryable Pure Storage Exception encountered" +msgstr "再試行可能な Pure Storage 例外が発生しました" + msgid "Retryable SolidFire Exception encountered" msgstr "再試行可能な SolidFire 例外が発生しました" @@ -6771,6 +7384,13 @@ msgstr "" msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "スケジューラーホスト Weigher %(weigher_name)s が見つかりませんでした。" +#, python-format +msgid "" +"Search URI %s is not in the expected format, it should end with ?tag={0}" +msgstr "" +"検索 URI %s は期待される形式ではありません。 URI は ?tag={0} で終わる必要が" +"あります。" + #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " @@ -6803,16 +7423,19 @@ msgstr "ホスト %(host)s でサービス%(service_id)s が見つかりませ msgid "Service %(service_id)s could not be found." msgstr "サービス %(service_id)s が見つかりませんでした。" -#, python-format -msgid "Service %s not found." -msgstr "サービス %s が見つかりません。" - msgid "Service is too old to fulfil this request." msgstr "サービスが古すぎるため、このリクエストに対応できません。" msgid "Service is unavailable at this time." msgstr "現在サービスは使用できません。" +msgid "" +"Service temporarily unavailable: The server is temporarily unable to service " +"your request" +msgstr "" +"サービス一時利用不可: サーバーは一時的にリクエストに対するサービスを提供でき" +"ません。" + msgid "Set pair secondary access error." msgstr "ペアのセカンダリーアクセス設定のエラー。" @@ -6833,6 +7456,14 @@ msgstr "" "ファイル qos ポリシーグループの設定は、このストレージファミリーおよびontap " "バージョンではサポートされていません。" +#, python-format +msgid "" +"Share %s ignored due to invalid format. Must be of form address:/export. " +"Please check the nas_host and nas_share_path settings." +msgstr "" +"形式が無効であるため、共有 %s は無視されました。address:/export 形式でなけれ" +"ばなりません。nas_host および nas_share_path の設定を確認してください。" + #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " @@ -6875,6 +7506,16 @@ msgstr "" "指定されたイメージのサイズ %(image_size)s GB がボリュームサイズ " "%(volume_size)s GB を上回っています。" +#, python-format +msgid "" +"Snapshot %(cgsnapshot_id)s: for Consistency Group %(cg_name)s: delete " +"failed\n" +"%(err)s" +msgstr "" +"整合性グループ %(cg_name)s のスナップショット %(cgsnapshot_id)s: 作成に失敗し" +"ました。\n" +"%(err)s" + #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " @@ -6902,25 +7543,55 @@ msgstr "" "ありません。" #, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "スナップショット %s は整合性グループの一部であってはなりません。" +msgid "" +"Snapshot %(src_snapshot_name)s: clone failed\n" +"%(err)s" +msgstr "" +"スナップショット %(src_snapshot_name)s: クローンが失敗しました。\n" +"%(err)s" + +#, python-format +msgid "Snapshot %s : Delete Failed\n" +msgstr "スナップショット %s : 削除が失敗しました。\n" #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "スナップショット '%s' はアレイに存在しません。" +msgid "" +"Snapshot can't be taken individually on a volume that is part of a " +"Consistency Group" +msgstr "" +"整合性グループの一部のボリュームのスナップショットを個別に取得することはでき" +"ません。" + #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "ボリューム %(vol_id)s が「使用可能」ではないため、スナップショットを作成でき" -"ません。現在のボリューム状況は %(vol_status)s です。" +"ません。現在のボリュームの状態は %(vol_status)s です。" msgid "Snapshot cannot be created while volume is migrating." msgstr "" "ボリュームのマイグレーション中にスナップショットを作成することはできません。" +msgid "" +"Snapshot delete can't be done individually on a volume that is part of a " +"Consistency Group" +msgstr "" +"整合性グループの一部のボリュームのスナップショットを個別に削除することはでき" +"ません。" + +#, python-format +msgid "" +"Snapshot for Consistency Group %(cg_name)s: create failed\n" +"%(err)s" +msgstr "" +"整合性グループ %(cg_name)s のスナップショット: 作成に失敗しました。\n" +"%(err)s" + msgid "Snapshot of secondary replica is not allowed." msgstr "2 次レプリカのスナップショットは許可されません。" @@ -6930,19 +7601,18 @@ msgstr "状態 %s でのボリュームのスナップショットはサポー #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "スナップショットリソース \"%s\" が実装されていますか" - -msgid "Snapshot size must be multiple of 1 GB." -msgstr "スナップショットのサイズは 1 GB の倍数である必要があります。" +msgstr "" +"スナップショットリソース \"%s\" がどこかにデプロイされていないかを確認してく" +"ださい。" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -"スナップショット状況 %(cur)s は update_snapshot_status には許可されません" +"スナップショットの状態 %(cur)s は update_snapshot_status には許可されません。" msgid "Snapshot status must be \"available\" to clone." msgstr "" -"複製を行うには、スナップショット状況が「使用可能」でなければなりません。" +"複製を行うには、スナップショットの状態が「使用可能」でなければなりません。" #, python-format msgid "" @@ -6955,6 +7625,20 @@ msgstr "" msgid "Snapshot with id of %s could not be found." msgstr "%s の ID を持つスナップショットを見つけることができませんでした。" +#, python-format +msgid "Snapshot with name %(snaplabel)s already exists under %(typename)s" +msgstr "" +"名前 %(snaplabel)s を持つスナップショットは、 %(typename)s の下にすでに存在し" +"ます。" + +#, python-format +msgid "" +"Snapshot: %(snapshotname)s, create failed\n" +"%(err)s" +msgstr "" +"スナップショット %(snapshotname)s: 作成が失敗しました。\n" +"%(err)s" + #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " @@ -6977,6 +7661,13 @@ msgstr "SolidFire Cinder Driver 例外" msgid "Sort direction array size exceeds sort key array size." msgstr "ソート方向の配列サイズがソートキーの配列サイズを超えています。" +msgid "" +"Source CG cannot be empty or in 'creating' or 'updating' state. No " +"cgsnapshot will be created." +msgstr "" +"ソース CG が空もしくは 'creating' や 'updating' の状態ではいけません。 " +"cgsnapshot は作成されません。" + msgid "Source CG is empty. No consistency group will be created." msgstr "ソース CG が空です。整合性グループは作成されません。" @@ -6990,7 +7681,7 @@ msgid "Source volume not mid-migration." msgstr "ソースボリュームはマイグレーション中ではありません" msgid "SpaceInfo returned byarray is invalid" -msgstr "アレイによって返された SpaceInfo が無効です" +msgstr "アレイによって返された SpaceInfo が無効です。" #, python-format msgid "" @@ -7009,7 +7700,10 @@ msgstr "" "ID %s の指定されたスナップショットグループを見つけることができませんでした。" msgid "Specify a password or private_key" -msgstr "パスワードまたは private_key を指定してください" +msgstr "パスワードまたは private_key を指定してください。" + +msgid "Specify group type name, description or a combination thereof." +msgstr "グループ種別の名前、説明、またはこれらの組み合わせを指定してください。" msgid "Specify san_password or san_private_key" msgstr "san_password または san_private_key を指定してください" @@ -7070,7 +7764,7 @@ msgstr "ストレージグループ %(storageGroupName)s は正常に削除さ #, python-format msgid "Storage host %(svr)s not detected, verify name" -msgstr "ストレージホスト %(svr)s が検出されません。名前を検証してください" +msgstr "ストレージホスト %(svr)s が検出されません。名前を検証してください。" msgid "Storage pool is not configured." msgstr "ストレージプールが設定されていません。" @@ -7089,15 +7783,18 @@ msgstr "ストレージシステム ID が設定されていません。" msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "プール %(poolNameInStr)s のストレージシステムが見つかりません。" -msgid "Storage-assisted migration failed during manage volume." -msgstr "" -"ボリュームの管理中にストレージによってサポートされるマイグレーションが失敗し" -"ました。" - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "ストレージシステム %(array)s が見つかりません。" +#, python-format +msgid "" +"Successfully renamed %(num_vols)s volumes and %(num_cgs)s consistency groups " +"from cluster %(current)s to %(new)s" +msgstr "" +"%(num_vols)s ボリュームと %(num_cgs)s 整合性グループの、クラスター " +"%(current)s から %(new)s への名前変更が正常に行われました。" + #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " @@ -7117,13 +7814,17 @@ msgstr "ペア同期のエラー。" msgid "Synchronizing secondary volume to primary failed." msgstr "2 次ボリュームの 1 次ボリュームへの同期に失敗しました。" +#, python-format +msgid "Synology driver authentication failed: %(reason)s." +msgstr "Synology ドライバーの認証が失敗しました: %(reason)s" + #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "正しくない状態 - %(pass_status)s のシステム %(id)s が見つかりました。" #, python-format msgid "System %(id)s found with bad status - %(status)s." -msgstr "システム %(id)s で正しくない状況 %(status)s が見つかりました。" +msgstr "システム %(id)s で正しくない状態 %(status)s が見つかりました。" msgid "System does not support compression." msgstr "システムは圧縮をサポートしません。" @@ -7138,15 +7839,30 @@ msgstr "" "アカウント [%(account)s] の CloudByte ストレージで、TSM [%(tsm)s] が見つかり" "ませんでした。" +msgid "Target group type is still in use." +msgstr "ターゲットグループ種別はまだ使用中です。" + msgid "Target volume type is still in use." msgstr "ターゲットボリュームタイプはまだ使用中です。" #, python-format -msgid "Tenant ID: %s does not exist." -msgstr "テナント ID %s が存在しません。" +msgid "" +"Task did not complete in %d secs. Operation timed out. Task in CoprHD will " +"continue" +msgstr "" +"タスクが %d 秒以内に完了しませんでした。オペレーションはタイムアウトしまし" +"た。 CoprHD 内のタスクは継続されます。" + +#, python-format +msgid "Task: %(task_id)s is failed with error: %(error_message)s" +msgstr "タスク: %(task_id)s が失敗しました。エラー: %(error_message)s" + +#, python-format +msgid "Tenant %s: not found" +msgstr "テナント %s: 見つかりませんでした。" msgid "Terminate connection failed" -msgstr "接続を強制終了できませんでした" +msgstr "接続を強制終了できませんでした。" msgid "Terminate connection unable to connect to backend." msgstr "バックエンドに接続できない接続を強制終了します。" @@ -7198,6 +7914,9 @@ msgstr "" "このコマンドを実行する前に、'cinder-manage db sync' を使用してデータベースを" "作成してください。" +msgid "The allocated size must less than total size." +msgstr "割り当てサイズは合計のサイズよりも少ない必要があります。" + #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " @@ -7206,6 +7925,16 @@ msgstr "" "配列が SLO %(slo)s とワークロード %(workload)s のストレージプール設置をサポー" "トしません。配列で有効な SLO とワークロードを確認してください。" +msgid "" +"The authentication service failed to provide the location of the service URI " +"when redirecting back" +msgstr "" +"Authentication サービスで、リダイレクトで戻るサービス URI の提供に失敗しまし" +"た。" + +msgid "The authentication service failed to reply with 401" +msgstr "Authentication サービスが失敗し、 401 エラーが返されました。" + msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "" @@ -7232,6 +7961,9 @@ msgstr "" "装飾されたメソッドは、ボリュームとスナップショットオブジェクトのいずれもを受" "け付けることができません。" +msgid "The decorated method must accept image_meta." +msgstr "装飾されたメソッドは、 image_meta を受け入れる必要があります。" + #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "パス %(path)s のデバイスは使用不可です: %(reason)s" @@ -7240,10 +7972,6 @@ msgstr "パス %(path)s のデバイスは使用不可です: %(reason)s" msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "終了時刻 (%(end)s) は開始時刻 (%(start)s) より後でなければなりません。" -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec: %s が無効です。" - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "追加仕様: %(extraspec)s は無効です。" @@ -7264,6 +7992,13 @@ msgstr "" "以下の移行にはダウングレードがありますが、これは許容されません: \n" "\t%s" +msgid "" +"The given pool info must include the storage pool and naming start with " +"OpenStack-" +msgstr "" +"与えられるプール情報にはストレージプールが含まれていなければならず、名前は " +"Openstack から始まる必要があります。" + msgid "The host group or iSCSI target could not be added." msgstr "ホストグループまたは iSCSI ターゲットを追加できませんでした。" @@ -7297,11 +8032,15 @@ msgstr "iSCSI CHAP ユーザー %(user)s は存在しません。" #, python-format msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." +"The imported lun is in pool %(lun_pool)s which is not managed by the host " +"%(host)s." msgstr "" -"インポートされた LUN %(lun_id)s はホスト %(host)s が管理しないプール " -"%(lun_pool)s にあります。" +"インポートした LUN はホスト %(host)s に管理されていないプール %(lun_pool)s に" +"あります。" + +#, python-format +msgid "The job has not completed and is in a %(state)s state." +msgstr "ジョブが完了していません。状態は %(state)s です。" msgid "The key cannot be None." msgstr "キーは None に設定することはできません。" @@ -7338,6 +8077,9 @@ msgstr "" "指定されたスナップショット '%s' は指定されたボリュームのスナップショットでは" "ありません。" +msgid "The redirect location of the authentication service is not provided" +msgstr "Authentication サービスのリダイレクトの場所が指定されていません。" + msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" @@ -7374,6 +8116,12 @@ msgstr "" msgid "The resource %(resource)s was not found." msgstr "リソース %(resource)s が見つかりませんでした。" +msgid "The resource is a FSS thin device, minimum size is 10240 MB." +msgstr "リソースは FSS ボリュームです。最小のサイズは 10240 MB です。" + +msgid "The resource is a thin device, thin size is invalid." +msgstr "リソースは thin デバイスです。 thin サイズが正しくありません。" + msgid "The results are invalid." msgstr "結果が無効です。" @@ -7381,14 +8129,33 @@ msgstr "結果が無効です。" msgid "The retention count must be %s or less." msgstr "保存数は %s 以下でなければなりません。" +msgid "The san_secondary_ip param is null." +msgstr " san_secondary_ip パラメーターが null です。" + +msgid "The snapshot cannot be created when the volume is in error status." +msgstr "" +"ボリュームの状態が「エラー」である場合は、スナップショットを作成できません。" + msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "ボリュームがメンテナンスモードの場合は、スナップショットを作成できません。" #, python-format +msgid "The snapshot is unavailable: %(data)s" +msgstr "スナップショットは使用できません : %(data)s" + +#, python-format +msgid "The snapshot's parent in ScaleIO is %(ancestor)s and not %(volume)s." +msgstr "" +"Scale IO 内のこのスナップショットの親は %(ancestor)s で、 %(volume)s ではあ" +"りません。" + msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "ソースボリューム %s は現在のホストが管理するプールにありません。" +"The snapshot's parent is not the original parent due to deletion or revert " +"action, therefore this snapshot cannot be managed." +msgstr "" +"このスナップショットの親は、削除中やアクションの取り消し中などの理由で、オリ" +"ジナルのものではありません。このため、このスナップショットは管理できません。" msgid "The source volume for this WebDAV operation not found." msgstr "この WebDAV 操作のソースボリュームが見つかりません。" @@ -7480,6 +8247,9 @@ msgstr "" "ポートするように設定するか、別のプロトコルを使用するドライバーに切り替えてく" "ださい。" +msgid "The storage pool information is empty or not correct" +msgstr "ストレージプールの情報が空か、正しくありません。" + #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " @@ -7488,6 +8258,14 @@ msgstr "" "%(memberCount)s のストライプメタ数がボリュームに対して小さすぎます: " "%(volumeSize)s のサイズを持つ %(volumeName)s。" +#, python-format +msgid "The token is not generated by authentication service. %s" +msgstr "トークンが authentication サービスで生成されませんでした。 %s" + +#, python-format +msgid "The token is not generated by authentication service.%s" +msgstr "トークンが authentication サービスで生成されませんでした。 %s" + #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " @@ -7496,6 +8274,10 @@ msgstr "" "メタデータのタイプ: ボリューム/スナップショット %(id)s の %(metadata_type)s " "が無効です。" +#, python-format +msgid "The value %(value)s for key %(key)s in extra specs is invalid." +msgstr "追加スペックのキー %(key)s の値 %(value)s が無効です。" + #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " @@ -7554,6 +8336,14 @@ msgstr "" msgid "The volume label is required as input." msgstr "入力としてボリュームラベルが必要です。" +#, python-format +msgid "" +"The volume to be managed is a %(provision)s LUN and the tiering setting is " +"%(tier)s. This doesn't match with the type %(type)s." +msgstr "" +"管理対象となるボリュームは %(provision)s LUN で、ティアリングの設定は " +"%(tier)s です。これはタイプ %(type)s と適合していません。" + #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "使用できるリソースがありません。(リソース: %(resource)s)" @@ -7561,10 +8351,6 @@ msgstr "使用できるリソースがありません。(リソース: %(resourc msgid "There are no valid ESX hosts." msgstr "有効な ESX ホストがありません。" -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "%s に接続された有効なデータストアがありません。" - msgid "There are no valid datastores." msgstr "有効なデータストアがありません。" @@ -7659,10 +8445,10 @@ msgid "Thin provisioning not supported on this version of LVM." msgstr "" "このバージョンの LVM ではシンプロビジョニングはサポートされていません。" -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" +msgid "ThinProvisioning Enabler is not installed. Can not create thin volume." msgstr "" "シンプロビジョニングイネーブラーがインストールされていません。シンボリューム" -"を作成できません" +"を作成できません。" msgid "This driver does not support deleting in-use snapshots." msgstr "" @@ -7701,16 +8487,13 @@ msgstr "" "スナップショット %(id)s を削除するために Nova の更新を待機している間にタイム" "アウトになりました。" -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"ceph クラスターへの接続時にタイムアウト値 (秒) が使用されます。値が 0 より小" -"さい場合、タイムアウトは設定されず、デフォルトの librados 値が使用されます。" +#, python-format +msgid "Timeout waiting for %(condition_name)s in wait_until." +msgstr "wait_until 中の %(condition_name)s のタイムアウトを待っています。" #, python-format msgid "Timeout while calling %s " -msgstr " %s の呼び出し中にタイムアウトが発生しました" +msgstr " %s の呼び出し中にタイムアウトが発生しました。" #, python-format msgid "Timeout while requesting %(service)s API." @@ -7755,7 +8538,11 @@ msgid "" "%(qos_specs_id)s" msgstr "" "タイプ %(type_id)s は既に別の qos 仕様 %(qos_specs_id)s に関連付けられていま" -"す" +"す。" + +msgid "Type access modification is not applicable to public group type." +msgstr "" +"パブリックなグループタイプでは、タイプアクセスの変更を行うことはできません。" msgid "Type access modification is not applicable to public volume type." msgstr "" @@ -7769,6 +8556,9 @@ msgstr "タイプは NaElement に変換できません。" msgid "TypeError: %s" msgstr "TypeError: %s" +msgid "URI should end with /tag" +msgstr "URIは /tag で終わらなければいけません。" + #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s が、ボリュームの追加リストと削除リストの両方に存在します。" @@ -7787,21 +8577,18 @@ msgstr "パス %(path)s を介してバックエンドストレージにアク #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" -"スペース %(space)s のスナップショットに Cinder のホストを追加できません" +"スペース %(space)s のスナップショットに Cinder のホストを追加できません。" #, python-format msgid "Unable to complete failover of %s." msgstr "%s のフェイルオーバーを完了できません。" msgid "Unable to connect or find connection to host" -msgstr "ホストに接続できないか、ホストへの接続が見つかりません" - -msgid "Unable to create Barbican Client without project_id." -msgstr "project_id なしでは Barbican Client を作成できません。" +msgstr "ホストに接続できないか、ホストへの接続が見つかりません。" #, python-format msgid "Unable to create consistency group %s" -msgstr "整合性グループ %s を作成できません" +msgstr "整合性グループ %s を作成できません。" msgid "Unable to create lock. Coordination backend not started." msgstr "" @@ -7820,6 +8607,10 @@ msgstr "" msgid "Unable to create replica clone for volume %s." msgstr "ボリューム %s のレプリカ複製を作成できません。" +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "イニシエーター %(name)s 用にサーバーオブジェクトを作成できません。" + #, python-format msgid "Unable to create the relationship for %s." msgstr "%s の関係を作成できません。" @@ -7834,18 +8625,18 @@ msgstr "%(vol)s からボリューム %(name)s を作成できません。" #, python-format msgid "Unable to create volume %s" -msgstr "ボリューム %s を作成できません" +msgstr "ボリューム %s を作成できません。" msgid "Unable to create volume. Backend down." msgstr "ボリュームを作成できません。バックエンドがダウンしています。" #, python-format msgid "Unable to delete Consistency Group snapshot %s" -msgstr "整合性グループ %s を削除できません" +msgstr "整合性グループ %s を削除できません。" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "スナップショット %(id)s を削除できません。状況: %(status)s。" +msgstr "スナップショット %(id)s を削除できません。状態: %(status)s。" #, python-format msgid "Unable to delete snapshot policy on volume %s." @@ -7861,7 +8652,7 @@ msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" -"ボリュームを切り離すことができません。切り離すには、ボリューム状況が「使用" +"ボリュームを切り離すことができません。切り離すには、ボリュームの状態が「使用" "中」で、attach_status が「接続済み」でなければなりません。" #, python-format @@ -7898,16 +8689,13 @@ msgstr "" "Purity REST API のバージョン %(api_version)s ではレプリケーションを行うことが" "できません。%(required_versions)s のうちのいずれかのバージョンが必要です。" -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "レプリケーションとスナップコピーを同時に有効化することはできません。" - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Storwize クラスター %s とのパートナーシップを確立できません。" #, python-format msgid "Unable to extend volume %s" -msgstr "ボリューム %s を拡張できません" +msgstr "ボリューム %s を拡張できません。" #, python-format msgid "" @@ -7924,6 +8712,9 @@ msgstr "" "デフォルトにフェイルバックすることできません。フェイルバックができるのは、" "フェイルオーバーの完了後に限られます。" +msgid "Unable to failback. Backend is misconfigured." +msgstr "フェイルバックを行えません。バックエンドの設定に誤りがあります。" + #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" @@ -7939,12 +8730,15 @@ msgstr "バックエンドから接続情報を取り出すことができませ #, python-format msgid "Unable to find Purity ref with name=%s" -msgstr "名前 %s を持つ Purity 参照が見つかりません" +msgstr "名前 %s を持つ Purity 参照が見つかりません。" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "ボリュームグループが見つかりません: %(vg_name)s" +msgid "Unable to find any active VPSA controller" +msgstr "アクティブな VPSA コントローラーが見つかりません。" + msgid "Unable to find failover target, no secondary targets configured." msgstr "" "フェイルオーバーのターゲットが見つかりません。セカンダリーターゲットが設定さ" @@ -7953,6 +8747,10 @@ msgstr "" msgid "Unable to find iSCSI mappings." msgstr "iSCSI のマッピングが見つかりません。" +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "イニシエーター %(name)s 用のサーバーオブジェクトが見つかりません。" + #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file が見つかりません: %s" @@ -7977,11 +8775,15 @@ msgstr "" #, python-format msgid "Unable to find volume %s" -msgstr "ボリューム %s が見つかりません" +msgstr "ボリューム %s が見つかりません。" + +#, python-format +msgid "Unable to find volume with FSS vid =%s." +msgstr "FSS vid が %s のボリュームが見つかりませんでした。" #, python-format msgid "Unable to get a block device for file '%s'" -msgstr "ファイル '%s' のブロックデバイスを取得できません" +msgstr "ファイル '%s' のブロックデバイスを取得できません。" #, python-format msgid "" @@ -8039,6 +8841,12 @@ msgstr "マスキングビューの名前を取得できません。" msgid "Unable to get the name of the portgroup." msgstr "ポートグループの名前を取得できません。" +msgid "Unable to get the name of the storage group" +msgstr "ストレージグループの名前を取得できません。" + +msgid "Unable to get the name of the storage group." +msgstr "ストレージグループの名前を取得できません。" + #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "ボリューム %s のレプリケーション関係を取得できません。" @@ -8082,7 +8890,8 @@ msgstr "%(cert)s %(e)s から鍵をロードできません。" #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -"Solidfire デバイス上でアカウント %(account_name)s を見つけることができません" +"Solidfire デバイス上でアカウント %(account_name)s を見つけることができませ" +"ん。" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" @@ -8097,7 +8906,7 @@ msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "既に管理されているボリュームが存在するため、ボリューム %(volume_ref)s の管理" -"に失敗しました" +"に失敗しました。" #, python-format msgid "Unable to manage volume %s" @@ -8133,19 +8942,22 @@ msgstr "" #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "ボリューム %(existing)s の名前を %(newname)s に変更できません" +msgstr "ボリューム %(existing)s の名前を %(newname)s に変更できません。" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "%s の ID を持つスナップショットグループを取得できません。" +msgid "Unable to retrieve volume stats." +msgstr "ボリュームの統計を取得できません。" + #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "%(specname)s を再入力できません。要求した最新の %(spectype)s の値を受信するこ" -"とを予期していたものの、%(spec)s の値を受信しました" +"とを予期していたものの、%(spec)s の値を受信しました。" #, python-format msgid "" @@ -8163,6 +8975,10 @@ msgstr "" "種別変更ができません: 現行アクションにはボリュームコピーが必要ですが、新しい" "タイプが複製の場合は許可されません。ボリューム = %s" +#, python-format +msgid "Unable to send requests: %s" +msgstr "リクエストを送信できません: %s" + #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." @@ -8172,7 +8988,7 @@ msgstr "" #, python-format msgid "Unable to snap Consistency Group %s" -msgstr "整合性グループ %s を移動できません" +msgstr "整合性グループ %s を移動できません。" msgid "Unable to terminate volume connection from backend." msgstr "バックエンドからのボリューム接続を終了することができません。" @@ -8183,7 +8999,7 @@ msgstr "ボリューム接続を終了することができません: %(err)s" #, python-format msgid "Unable to update consistency group %s" -msgstr "整合性グループ %s を更新できません" +msgstr "整合性グループ %s を更新できません。" #, python-format msgid "" @@ -8203,6 +9019,9 @@ msgid "" msgstr "" " マッピング %(id)s の予期されないマッピング状態 %(status)s。属性: %(attr)s。" +msgid "Unexpected 'disabled_reason' found on enable request." +msgstr "enable request に予期しない 'disabled_reason' が見つかりました。" + #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " @@ -8210,6 +9029,12 @@ msgid "" msgstr "" "予期しない CLI 応答: ヘッダー/行の不一致。ヘッダー: %(header)s、行: %(row)s。" +msgid "Unexpected exception during get pools info." +msgstr "プール情報の取得中に予期しない例外が発生しました。" + +msgid "Unexpected exception during pool checking." +msgstr "プールのチェック中に予期しない例外が発生しました。" + #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." @@ -8222,6 +9047,10 @@ msgstr "" "予期しない出力。[%(expected)s] が予期されましたが、[%(output)s] を受け取りま" "した" +#, python-format +msgid "Unexpected over quota on %(name)s." +msgstr "%(name)s で、予期せずクォータを超過しました。" + msgid "Unexpected response from Nimble API" msgstr "Nimble API からの予期しない応答" @@ -8229,7 +9058,7 @@ msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Tegile IntelliFlash API からの予期しない応答" msgid "Unexpected status code" -msgstr "予期しない状況コード" +msgstr "予期しないステータスコード" #, python-format msgid "" @@ -8237,7 +9066,7 @@ msgid "" "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "URL %(page)s 用にプロトコル %(protocol)s を指定したスイッチ%(switch_id)s から" -"返された予期しない状況コード。エラー: %(error)s" +"返された予期しないステータスコード。エラー: %(error)s" msgid "Unknown Gluster exception" msgstr "不明な Gluster 例外" @@ -8291,7 +9120,7 @@ msgstr "不明な処理 %s。" #, python-format msgid "Unknown or unsupported command %(cmd)s" -msgstr "不明またはサポートされないコマンド (%(cmd)s) です" +msgstr "不明またはサポートされないコマンド (%(cmd)s) です。" #, python-format msgid "Unknown protocol: %(protocol)s." @@ -8301,15 +9130,16 @@ msgstr "不明なプロトコル: %(protocol)s。" msgid "Unknown quota resources %(unknown)s." msgstr "不明なクォータリソース %(unknown)s。" -msgid "Unknown service" -msgstr "不明なサービス" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません。" +#, python-format +msgid "Unknown/Unsupported HTTP method: %s" +msgstr "未知/未サポートの HTTP メソッド: %s" + msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "削除オプションの非管理とカスケーディングを同時に行うことはできません。" @@ -8338,6 +9168,14 @@ msgstr "認識されないバッキングフォーマット: %s" msgid "Unrecognized read_deleted value '%s'" msgstr "認識されない read_deleted 値 '%s'" +#, python-format +msgid "" +"Unrecoverable Error: Versioned Objects in DB are capped to unknown version " +"%(version)s." +msgstr "" +"リカバリー不可能なエラー : DB 内のバージョニングされたオブジェクトは、不明な" +"バージョン %(version)s でキャップされています。" + #, python-format msgid "Unset gcs options: %s" msgstr "gcs オプションの設定を解除します: %s" @@ -8361,10 +9199,14 @@ msgid "Unsupported backup metadata version (%s)" msgstr "サポートされないバックアップのメタデータバージョン (%s)" msgid "Unsupported backup metadata version requested" -msgstr "サポートされないバックアップメタデータバージョンが要求されました" +msgstr "サポートされないバックアップメタデータバージョンが要求されました。" msgid "Unsupported backup verify driver" -msgstr "サポートされないバックアップ検証ドライバー" +msgstr "サポートされないバックアップ検証ドライバーです。" + +#, python-format +msgid "Unsupported fields %s." +msgstr "サポートされないフィールド: %s" #, python-format msgid "" @@ -8374,6 +9216,10 @@ msgstr "" "スイッチ %s でサポートされないファームウェアです。スイッチでファームウェア " "v6.4 以上が実行されていることを確認してください" +#, python-format +msgid "Unsupported volume format %s" +msgstr "ボリューム形式はサポートされていません: %s " + #, python-format msgid "Unsupported volume format: %s " msgstr "ボリューム形式はサポートされていません: %s " @@ -8401,6 +9247,12 @@ msgstr "リストを更新します。volume_id が含まれません。" msgid "Updated At" msgstr "最終更新" +#, python-format +msgid "Updating volume metadata is not allowed for volumes in %s status." +msgstr "" +"ボリュームの状態が %s である場合は、ボリュームメタデータの更新は許可されませ" +"ん。" + msgid "Upload to glance of attached volume is not supported." msgstr "" "接続されたボリュームの glance へのアップロードはサポートされていません。" @@ -8419,10 +9271,7 @@ msgid "User ID" msgstr "ユーザー ID" msgid "User does not have admin privileges" -msgstr "ユーザーに管理者特権がありません" - -msgid "User is not authorized to use key manager." -msgstr "ユーザーは鍵マネージャーの使用を許可されていません。" +msgstr "ユーザーに管理者特権がありません。" msgid "User not authorized to perform WebDAV operations." msgstr "ユーザーは WebDAV 操作の実行が許可されていません。" @@ -8449,6 +9298,15 @@ msgstr "V3 のロールバック" msgid "VF is not enabled." msgstr "VF は有効になっていません。" +msgid "VNX Cinder driver does not support multiple replication targets." +msgstr "" +"VNX Cinder ドライバーは複数のレプリケーションターゲットをサポートしていませ" +"ん。" + +#, python-format +msgid "VPool %(name)s ( %(vpooltype)s ) : not found" +msgstr "Pool %(name)s ( %(vpooltype)s ) : 見つかりませんでした。" + #, python-format msgid "VV Set %s does not exist." msgstr "VV セット %s は存在しません。" @@ -8475,7 +9333,7 @@ msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "%(param_string)s の値 %(param)s がブール値ではありません。" msgid "Value required for 'scality_sofs_config'" -msgstr "'scality_sofs_config' の値が必要です" +msgstr "'scality_sofs_config' の値が必要です。" #, python-format msgid "ValueError: %s" @@ -8513,6 +9371,16 @@ msgstr "宛先 %s のボリュームコピージョブが失敗しました。" msgid "Volume %(deviceID)s not found." msgstr "ボリューム %(deviceID)s が見つかりません。" +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" +"ボリューム %(name)s が見つかりませんでした。すでに削除されている可能性があり" +"ます。" + +#, python-format +msgid "Volume %(name)s not found" +msgstr "ボリューム %(name)s が見つかりません。" + #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " @@ -8525,6 +9393,34 @@ msgstr "" msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "ボリューム %(name)s は VNX で作成されましたが、%(state)s 状態です。" +#, python-format +msgid "Volume %(name)s was not deactivated in time." +msgstr "ボリューム %(name)s は時間内に非アクティブになりませんでした。" + +#, python-format +msgid "" +"Volume %(name)s: clone failed\n" +"%(err)s" +msgstr "" +"ボリューム %(name)s: クローンが失敗しました。\n" +"%(err)s" + +#, python-format +msgid "" +"Volume %(name)s: create failed\n" +"%(err)s" +msgstr "" +"ボリューム %(name)s: 作成に失敗しました。\n" +"%(err)s" + +#, python-format +msgid "" +"Volume %(name)s: delete failed\n" +"%(err)s" +msgstr "" +"ボリューム %(name)s: 削除が失敗しました。\n" +"%(err)s" + #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "ボリューム %(vol)s をプール %(pool)s に作成できませんでした。" @@ -8533,20 +9429,30 @@ msgstr "ボリューム %(vol)s をプール %(pool)s に作成できません msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "ボリューム%(vol1)s が snapshot.volume_id %(vol2)s と一致しません。" +#, python-format +msgid "Volume %(vol_id)s status must be %(statuses)s" +msgstr "ボリューム %(vol_id)s の状態は %(statuses)s でなければいけません。" + +#, python-format +msgid "Volume %(vol_id)s status must be available to extend." +msgstr "" +"ボリューム %(vol_id)s を拡張するには、状態が「利用可能」でなければいけませ" +"ん。" + #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" -"読み取り専用フラグを更新するには、ボリューム %(vol_id)s の状況が「使用可能」" -"でなければなりませんが、現在の状況は %(vol_status)s です。" +"読み取り専用フラグを更新するには、ボリューム %(vol_id)s の状態が「使用可能」" +"でなければなりませんが、現在の状態は %(vol_status)s です。" #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" -"ボリューム %(vol_id)s の状況は「使用可能」でなければなりませんが、現在の状況" +"ボリューム %(vol_id)s の状態は「使用可能」でなければなりませんが、現在の状態" "は %(vol_status)s です。" #, python-format @@ -8593,6 +9499,26 @@ msgstr "ボリューム %(volume_id)s 複製エラー: %(reason)s" msgid "Volume %(volume_name)s is busy." msgstr "ボリューム %(volume_name)s は使用中です。" +#, python-format +msgid "" +"Volume %(volume_name)s: expand failed\n" +"%(err)s" +msgstr "" +"ボリューム %(volume_name)s: 拡張が失敗しました。\n" +"%(err)s" + +#, python-format +msgid "" +"Volume %(volume_name)s: update failed\n" +"%(err)s" +msgstr "" +"ボリューム %(volume_name)s: 更新が失敗しました。\n" +"%(err)s" + +#, python-format +msgid "Volume %s : not found" +msgstr "ボリューム %s: 見つかりませんでした。" + #, python-format msgid "Volume %s could not be created from source volume." msgstr "ボリューム %s をソースボリュームから作成できませんでした。" @@ -8651,18 +9577,14 @@ msgstr "" "ボリューム %s がオンラインです。OpenStack を使用して管理するために、ボリュー" "ムをオフラインに設定してください。" -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"ボリューム %s の移行と追加を行うことはできず、整合性グループに含まれることは" -"できず、スナップショットを持つこともできません。" - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "ボリューム %s は整合性グループの一部であってはなりません。" +#, python-format +msgid "Volume %s not found" +msgstr "ボリューム %s が見つかりません。" + #, python-format msgid "Volume %s not found." msgstr "ボリューム %s が見つかりません。" @@ -8681,18 +9603,12 @@ msgstr "ボリューム (%s) は既にアレイ上にあります。" #, python-format msgid "Volume Group %s does not exist" -msgstr "ボリュームグループ %s は存在しません" +msgstr "ボリュームグループ %s は存在しません。" #, python-format msgid "Volume Type %(id)s already exists." msgstr "ボリューム種別 %(id)s は既に存在します。" -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"ボリュームタイプ %(type_id)s には、キー %(id)s に関する追加の仕様がありませ" -"ん。" - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8743,12 +9659,16 @@ msgstr "ボリューム参照の抽出中にボリュームの作成に失敗し #, python-format msgid "Volume device file path %s does not exist." -msgstr "ボリュームデバイスのファイルパス %s が存在しません" +msgstr "ボリュームデバイスのファイルパス %s が存在しません。" #, python-format msgid "Volume device not found at %(device)s." msgstr "%(device)s でボリュームデバイスが見つかりません。" +#, python-format +msgid "Volume does not exists %s." +msgstr "ボリュームが存在しません: %s" + #, python-format msgid "Volume driver %s not initialized." msgstr "ボリュームドライバー %s が初期化されていません。" @@ -8767,6 +9687,13 @@ msgstr "" msgid "Volume has children and cannot be deleted!" msgstr "ボリュームには子が含まれており、削除できません。" +#, python-format +msgid "Volume in group %s is attached. Need to detach first." +msgstr "グループ %s のボリュームが接続されています。まず切り離してください。" + +msgid "Volume in group still has dependent snapshots." +msgstr "グループ内のボリュームには、まだ従属スナップショットがあります。" + #, python-format msgid "Volume is attached to a server. (%s)" msgstr "ボリュームがサーバーに追加されています (%s)。" @@ -8778,11 +9705,15 @@ msgid "Volume is not available." msgstr "ボリュームが利用できません。" msgid "Volume is not local to this node" -msgstr "ボリュームは、このノードに対してローカルではありません" +msgstr "ボリュームは、このノードに対してローカルではありません。" msgid "Volume is not local to this node." msgstr "ボリュームがこのノードに対してローカルではありません。" +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "バックエンド '%s' のボリューム・マネージャーは存在しません。" + msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." @@ -8833,7 +9764,7 @@ msgid "Volume not found." msgstr "ボリュームが見つかりません。" msgid "Volume not unique." -msgstr "ボリュームが一意でありません" +msgstr "ボリュームが一意ではありません。" msgid "Volume not yet assigned to host." msgstr "ボリュームがまだホストに割り当てられていません。" @@ -8847,7 +9778,7 @@ msgstr "%(volume_id)s のボリューム複製が見つかりませんでした #, python-format msgid "Volume service %s failed to start." -msgstr "ボリュームサービス %s が起動できませんでした" +msgstr "ボリュームサービス %s が起動できませんでした。" msgid "Volume should have agent-type set as None." msgstr "ボリュームには agent-type として None を設定する必要があります。" @@ -8892,36 +9823,38 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります。" -msgid "Volume size must be multiple of 1 GB." -msgstr "ボリュームサイズは 1 GB の倍数である必要があります。" - msgid "Volume size must multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります" #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" -"ボリュームのボリューム状況は「使用可能」でなければなりませんが、現在の状況は " -"%s です" +"ボリュームの状態は「使用可能」でなければなりませんが、現在の状態は %s です。" #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -"スナップショットに関しては、ボリューム状況が「使用可能」または「使用中」でな" -"ければなりません (現在は %s です)" +"スナップショットに関しては、ボリュームの状態が「使用可能」または「使用中」で" +"なければなりません (現在は %s です)" msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "ボリューム状況は「使用可能」または「使用中」でなければなりません。" +msgstr "ボリュームの状態は「使用可能」または「使用中」でなければなりません。" #, python-format msgid "Volume status must be %s to reserve." msgstr "ボリュームを予約するにはボリューム状態が %s である必要があります。" msgid "Volume status must be 'available'." -msgstr "ボリューム状況は「使用可能」でなければなりません。" +msgstr "ボリュームの状態は「使用可能」でなければなりません。" + +#, python-format +msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" +msgstr "" +"スナップショット %(id)s に関しては、ボリュームの状態が「利用可能」でなければ" +"いけません。 (現在は %(status)s です)" msgid "Volume to Initiator Group mapping already exists" -msgstr "ボリュームからイニシエーターグループへのマッピングは既に存在します" +msgstr "ボリュームからイニシエーターグループへのマッピングは既に存在します。" #, python-format msgid "" @@ -8929,10 +9862,10 @@ msgid "" "is \"%s\"." msgstr "" "バックアップ対象のボリュームが利用可能か使用中である必要がありますが、現在の" -"状況は \"%s\" です。" +"状態は \"%s\" です。" msgid "Volume to be restored to must be available" -msgstr "復元するボリュームは「使用可能」でなければなりません" +msgstr "復元するボリュームは「使用可能」でなければなりません。" #, python-format msgid "Volume type %(volume_type_id)s could not be found." @@ -8974,8 +9907,8 @@ msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名前 %(volume_type_name)s を持つボリューム種別が見つかりませんでした。" #, python-format -msgid "Volume with volume id %s does not exist." -msgstr "ボリューム ID %s のボリュームが存在しません。" +msgid "Volume%s: not found" +msgstr "ボリューム %s: 見つかりませんでした。" #, python-format msgid "" @@ -8991,18 +9924,10 @@ msgstr "" "ボリューム %(volumeName)s がストレージグループ %(sgGroupName)s に追加されませ" "んでした。" -#, python-format -msgid "Volume: %s could not be found." -msgstr "ボリューム %s が見つかりませんでした。" - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "ボリューム: %s はすでに Cinder によって管理されています。" -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "" -"ボリュームは、このサイズ (メガバイト) のオブジェクトにチャンク化されます。" - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" @@ -9036,6 +9961,14 @@ msgstr "" msgid "We should not do switch over on primary array." msgstr "プライマリーアレイで切り替えを行ってはなりません。" +#, python-format +msgid "Worker for %(type)s %(id)s already exists." +msgstr "%(type)s のワーカー %(id)s はすでに存在します。" + +#, python-format +msgid "Worker with %s could not be found." +msgstr "%s の ID を持つワーカーを見つけることができませんでした。" + msgid "Wrong resource call syntax" msgstr "正しくないリソース呼び出し構文" @@ -9081,6 +10014,13 @@ msgstr "ゾーン" msgid "Zoning Policy: %s, not recognized" msgstr "ゾーニングポリシー %s は認識されていません" +#, python-format +msgid "" +"[%(group)s] Invalid %(protocol)s ports %(port)s specified for io_port_list." +msgstr "" +"[%(group)s] 無効な %(protocol)s ポート %(port)s が io_port_list に指定されま" +"した。" + #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data: vdisk %s の属性を取得できませんでした。" @@ -9239,6 +10179,9 @@ msgstr "" "_find_pool、eternus_pool:%(eternus_pool)s、EnumerateInstances、ETERNUS に接続" "できません。" +msgid "_get_async_url: Invalid URL." +msgstr "_get_async_url: 正しくないURLです。" + #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " @@ -9361,7 +10304,7 @@ msgstr "_get_unmanaged_replay: %s という名前のスナップショットが #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: ボリューム ID %s が見つかりません" +msgstr "_get_unmanaged_replay: ボリューム ID %s が見つかりません。" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: ソース名を指定する必要があります。" @@ -9526,7 +10469,7 @@ msgstr "" "%(path)s を取得できません。" msgid "being attached by different mode" -msgstr "別のモードで接続しています" +msgstr "別のモードで接続しています。" #, python-format msgid "call failed: %r" @@ -9562,6 +10505,12 @@ msgstr "JSON を解釈できません" msgid "cg-%s" msgstr "cg: %s" +msgid "" +"cg_creating_from_src must be called with cg_id or cgsnapshot_id parameter." +msgstr "" +"cg_creating_from_src は cg_id または cgsnapshot_id パラメーターと共に呼び出す" +"必要があります。" + msgid "cgsnapshot assigned" msgstr "割り当てられた cgsnapshot" @@ -9590,10 +10539,23 @@ msgstr "check_for_setup_error: システム名を判別できません。" msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist エラー。" +msgid "cinder-all is deprecated in Newton and will be removed in Ocata." +msgstr "cinder-all は Newton で非推奨となり、 Ocata で削除される予定です。" + #, python-format msgid "clone depth exceeds limit of %s" msgstr "複製の深さが限度 %s を超えています" +msgid "cluster assigned" +msgstr "割り当てられたクラスター" + +msgid "cluster changed" +msgstr "変更されたクラスター" + +#, python-format +msgid "consistency group with name: %s already exists" +msgstr "名前 %s を持つ整合性グループはすでに存在します。" + msgid "consistencygroup assigned" msgstr "割り当てられた整合性グループ" @@ -9603,6 +10565,27 @@ msgstr "変更された整合性グループ" msgid "control_location must be defined" msgstr "control_location を定義する必要があります" +msgid "coprhd_hostname is not set in cinder configuration" +msgstr "cinder 設定で coprhd_hostname が設定されていません。" + +msgid "coprhd_password is not set in cinder configuration" +msgstr "cinder 設定で coprhd_password が設定されていません。" + +msgid "coprhd_port is not set in cinder configuration" +msgstr "cinder 設定で coprhd_port が設定されていません。" + +msgid "coprhd_project is not set in cinder configuration" +msgstr "cinder 設定で coprhd_project が設定されていません。" + +msgid "coprhd_tenant is not set in cinder configuration" +msgstr "cinder 設定で coprhd_tenant が設定されていません。" + +msgid "coprhd_username is not set in cinder configuration" +msgstr "cinder 設定で coprhd_username が設定されていません。" + +msgid "coprhd_varray is not set in cinder configuration" +msgstr "cinder 設定で coprhd_varray が設定されていません。" + msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume、ETERNUS にソースボリュームが存在しません。" @@ -9640,13 +10623,6 @@ msgstr "" "create_consistencygroup_from_src は 1 つの cgsnapshot ソースまたは整合性グ" "ループソースのみをサポートします。複数ソースは使用できません。" -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src は 1 つの cgsnapshot source または整合性グ" -"ループソースのみをサポートします。複数ソースは使用できません。" - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: ソース vdisk %(src)s (%(src_id)s) は存在しません。" @@ -9700,8 +10676,8 @@ msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" -"create_snapshot: スナップショットのボリューム状況は「使用可能」または「使用" -"中」でなければなりません。無効な状況は %s です。" +"create_snapshot: スナップショットのボリュームの状態は「使用可能」または「使用" +"中」でなければなりません。無効な状態は %s です。" msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: ソースボリュームの取得に失敗しました。" @@ -9752,11 +10728,8 @@ msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" -"create_volume_from_snapshot: ボリュームを作成するには、スナップショット状況が" -"「使用可能」でなければなりません。無効な状況は %s です。" - -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot: ソースと宛先のサイズが異なっています。" +"create_volume_from_snapshot: ボリュームを作成するには、スナップショットの状態" +"が「使用可能」でなければなりません。無効な状態は %s です。" msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " @@ -9765,9 +10738,6 @@ msgstr "" "create_volume_from_snapshot: ボリュームサイズが、スナップショットベースボ" "リュームと異なります。" -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "重複排除と自動階層化の両方を有効にすることはできません。" - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -9805,14 +10775,11 @@ msgid "deleting volume %(volume_name)s that has snapshot" msgstr "スナップショットを含むボリューム %(volume_name)s の削除中" msgid "detach snapshot from remote node" -msgstr "リモートノードからスナップショットを切断します" +msgstr "リモートノードからスナップショットを切断します。" msgid "do_setup: No configured nodes." msgstr "do_setup: 構成されたノードがありません。" -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries は 0 以上でなければなりません" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -9822,6 +10789,18 @@ msgstr "" "クトの MD5 %(etag)s が Swift に送信されたオブジェクトの MD5 %(md5)s と同じで" "はありません" +#, python-format +msgid "" +"error: Incorrect value of new size: %(new_size_in_gb)s GB\n" +"New size must be greater than current size: %(current_size)s GB" +msgstr "" +"エラー: 新しいサイズの値が正しくありません: %(new_size_in_gb)s GB\n" +"新しいサイズは現在のサイズより大きくなければいけません。現在のサイズ: " +"%(current_size)s GB" + +msgid "error: task list is empty, no task response found" +msgstr "エラー: タスクリストが空です。タスクの応答が見つかりませんでした。" + #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" @@ -9856,13 +10835,13 @@ msgstr "偽" #, python-format msgid "file already exists at %s" -msgstr "ファイルは %s に既に存在します" +msgstr "ファイルは %s に既に存在します。" msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "SheepdogIOWrapper は fileno をサポートしません" +msgstr "SheepdogIOWrapper は fileno をサポートしません。" msgid "fileno() not supported by RBD()" -msgstr "fileno() は RBD() でサポートされていません" +msgstr "fileno() は RBD() でサポートされていません。" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" @@ -9909,6 +10888,9 @@ msgstr "" msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: ボリューム %s の属性の取得に失敗しました。" +msgid "gid is null. FSS failed to delete cgsnapshot." +msgstr "gid が null です。 FSS は cgsnapshot の削除に失敗しました。" + msgid "glance_metadata changed" msgstr "変更された glance_metadata" @@ -9928,33 +10910,41 @@ msgstr "" "gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " "%(img)s は異なるファイルセットに属しています。" +#, python-format +msgid "group_type must be provided to create group %(name)s." +msgstr "" +"グループ %(name)s を作成するには、group_type を指定する必要があります。" + +msgid "group_type_id cannot be None" +msgstr "group_type_id を None に設定することはできません。" + #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "cinder.conf で hgst_group %(grp)s と hgst_user %(usr)s が適切なユーザーとグ" -"ループに合致する必要があります" +"ループに合致する必要があります。" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "cinder.conf で指定した hgst_net %(net)s がクラスターで見つかりません" +msgstr "cinder.conf で指定した hgst_net %(net)s がクラスターで見つかりません。" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "cinder.conf で hgst_redundancy を 0 (HA でない) または 1 (HA) に設定する必要" -"があります" +"があります。" msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr " cinder.conf で hgst_space_mode は octal/int である必要があります" +msgstr " cinder.conf で hgst_space_mode は octal/int である必要があります。" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "" -"hgst_storage サーバー %(svr)s で : の形式が設定されていません" +"hgst_storage サーバー %(svr)s で : の形式が設定されていません。" msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "cinder.conf で hgst_storage_servers を定義する必要があります" +msgstr "cinder.conf で hgst_storage_servers を定義する必要があります。" msgid "" "http service may have been abruptly disabled or put to maintenance state in " @@ -9964,7 +10954,7 @@ msgstr "" "す。" msgid "id cannot be None" -msgstr "ID を None にすることはできません" +msgstr "ID を None にすることはできません。" #, python-format msgid "image %s not found" @@ -10016,21 +11006,18 @@ msgstr "iscsiadm の実行が失敗しました。" msgid "key manager error: %(reason)s" msgstr "鍵マネージャーのエラー: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key が定義されていません" - msgid "limit param must be an integer" -msgstr "limit パラメーターは整数でなければなりません" +msgstr "limit パラメーターは整数でなければなりません。" msgid "limit param must be positive" -msgstr "limit パラメーターは正でなければなりません" +msgstr "limit パラメーターは正でなければなりません。" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" +"this volume from existing hosts before importing." msgstr "" "manage_existing はホストに接続されたボリュームを管理できません。インポートを" -"行う前にこのボリュームを既存のホストから切断してください" +"行う前にこのボリュームを既存のホストから切断してください。" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" @@ -10042,11 +11029,15 @@ msgid "" "%(vol)s" msgstr "" "manage_existing_snapshot: ボリューム %(vol)s 上で既存のリプレー %(ss)s の管理" -"でエラーが発生しました" +"でエラーが発生しました。" #, python-format msgid "marker [%s] not found" -msgstr "マーカー [%s] が見つかりません" +msgstr "マーカー [%s] が見つかりません。" + +#, python-format +msgid "marker not found: %s" +msgstr "マーカーが見つかりません: %s" #, python-format msgid "mdiskgrp missing quotes %s" @@ -10067,22 +11058,24 @@ msgid "mock" msgstr "モック" msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs がインストールされていません" +msgstr "mount.glusterfs がインストールされていません。" #, python-format msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage が名前 %s を持つ複数のリソースを発見しました" +msgstr "drbdmanage が名前 %s を持つ複数のリソースを発見しました。" #, python-format msgid "multiple resources with snapshot ID %s found" -msgstr "スナップショット ID %s を持つ複数のリソースが見つかりました" +msgstr "スナップショット ID %s を持つ複数のリソースが見つかりました。" msgid "name cannot be None" -msgstr "名前を None に設定することはできません" +msgstr "名前を None に設定することはできません。" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: NAVISECCLI ツール %(path)s が見つかりませんでした。" +msgid "no \"access-key\" field" +msgstr " \"access-key\" フィールドがありません。" + +msgid "no \"user\" field" +msgstr " \"user\" フィールドがありません。" #, python-format msgid "no REPLY but %r" @@ -10090,15 +11083,15 @@ msgstr "REPLY がないものの %r があります" #, python-format msgid "no snapshot with id %s found in drbdmanage" -msgstr "drbdmanage で ID %s を持つスナップショットが見つかりません" +msgstr "drbdmanage で ID %s を持つスナップショットが見つかりません。" #, python-format msgid "not exactly one snapshot with id %s" -msgstr "ID %s を持つスナップショットは 1つだけではありません" +msgstr "ID %s を持つスナップショットは 1つだけではありません。" #, python-format msgid "not exactly one volume with id %s" -msgstr "ID %s を持つボリュームは 1 つだけではありません" +msgstr "ID %s を持つボリュームは 1 つだけではありません。" #, python-format msgid "obj missing quotes %s" @@ -10108,7 +11101,7 @@ msgid "open_access_enabled is not off." msgstr "open_access_enabled がオフになっていません。" msgid "progress must be an integer percentage" -msgstr "進行状況は整数のパーセンテージでなければなりません" +msgstr "進行状況は整数のパーセンテージでなければなりません。" msgid "promote_replica not implemented." msgstr "promote_replica が実装されていません。" @@ -10141,7 +11134,12 @@ msgstr "" msgid "rados and rbd python libraries not found" msgstr "" -"rados python ライブラリーおよび rbd python ライブラリーが見つかりません" +"rados python ライブラリーおよび rbd python ライブラリーが見つかりません。" + +msgid "rawtimestamp is null. FSS failed to create_volume_from_snapshot." +msgstr "" +"rawtimestamp が null です。 FSS は create_volume_from_snapshot に失敗しまし" +"た。" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" @@ -10149,14 +11147,6 @@ msgstr "" "read_deleted には 'no', 'yes', 'only' のいずれかのみを指定できます。%r は指定" "できません" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "replication_device は backend: %s で設定する必要があります。" - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "backend_id [%s] の replication_device が欠落しています。" - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover が失敗しました。%s が見つかりません。" @@ -10217,9 +11207,6 @@ msgstr "san_ip が設定されていません。" msgid "san_ip must be set" msgstr "san_ip を設定する必要があります" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: 必須フィールド構成。san_ip が設定されていません。" - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10228,18 +11215,19 @@ msgstr "" "バーに設定されていません。この情報を設定して、cinder-volume サービスを再開し" "てください。" +msgid "" +"scaleio_verify_server_certificate is True but " +"scaleio_server_certificate_path is not provided in cinder configuration" +msgstr "" +"scaleio_verify_server_certificate が True ですが、Cinder設定で " +"scaleio_server_certificate_path が指定されていません。" + msgid "serve() can only be called once" -msgstr "serve() は一度しか呼び出せません" - -msgid "service not found" -msgstr "サービスが見つかりません" - -msgid "snapshot does not exist" -msgstr "スナップショットが存在しません" +msgstr "serve() は一度しか呼び出せません。" #, python-format -msgid "snapshot id:%s not found" -msgstr "スナップショット ID %s が見つかりません" +msgid "snapshot with the name: %s Not Found" +msgstr "名前 %s を持つスナップショット: 見つかりませんでした。" #, python-format msgid "snapshot-%s" @@ -10251,13 +11239,9 @@ msgstr "割り当てられたスナップショット" msgid "snapshots changed" msgstr "変更されたスナップショット" -#, python-format -msgid "source vol id:%s not found" -msgstr "ソースボリューム ID %s が見つかりません" - #, python-format msgid "source volume id:%s is not replicated" -msgstr "ソースボリューム ID %s が複製されていません" +msgstr "ソースボリューム ID %s が複製されていません。" msgid "source-name cannot be empty." msgstr "source-name は空にできません。" @@ -10266,12 +11250,15 @@ msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" "source-name 形式は 'vmdk_path@vm_inventory_path' でなければなりません。" +msgid "specs must be a dictionary." +msgstr "スペックはディクショナリーである必要があります。" + #, python-format msgid "status must be %s and" msgstr "状態は %s である必要があります" msgid "status must be available" -msgstr "状況は「使用可能」でなければなりません" +msgstr "状態は「使用可能」でなければなりません。" msgid "stop_hypermetro error." msgstr "stop_hypermetro エラー。" @@ -10285,6 +11272,10 @@ msgstr "sync_hypermetro エラー。" msgid "sync_replica not implemented." msgstr "sync_replica が実装されていません。" +#, python-format +msgid "target=%(target)s, lun=%(lun)s" +msgstr "target=%(target)s, lun=%(lun)s" + #, python-format msgid "" "targetcli not installed and could not create default directory " @@ -10297,7 +11288,8 @@ msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: コネクターからホスト名を取得できませんでした。" msgid "timeout creating new_volume on destination host" -msgstr "宛先ホスト上に new_volume を作成しているときにタイムアウトになりました" +msgstr "" +"宛先ホスト上に new_volume を作成しているときにタイムアウトになりました。" msgid "too many body keys" msgstr "本体キーが多すぎます" @@ -10318,11 +11310,11 @@ msgstr "アンマウント: : ターゲットが使用中です" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot: %s という名前のスナップショットが見つかりません" +msgstr "unmanage_snapshot: %s という名前のスナップショットが見つかりません。" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: ボリューム ID %s が見つかりません" +msgstr "unmanage_snapshot: ボリューム ID %s が見つかりません。" #, python-format msgid "unrecognized argument %s" @@ -10335,16 +11327,32 @@ msgstr "サポートされない圧縮アルゴリズム: %s" msgid "valid iqn needed for show_target" msgstr "show_target に必要とされる有効な iqn" +#, python-format +msgid "varray %s: not found" +msgstr "varray %s: 見つかりませんでした。" + #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s が定義されていません。" +msgid "vid is null. FSS failed to create snapshot." +msgstr "vid が null です。 FSS はスナップショットの作成に失敗しました。" + +msgid "vid is null. FSS failed to create_volume_from_snapshot." +msgstr "vid が null です。 FSS は create_volume_from_snapshot に失敗しました。" + +msgid "vid is null. FSS failed to delete snapshot" +msgstr "vid が null です。 FSS はスナップショットの削除に失敗しました。" + +msgid "vid is null. FSS failed to delete volume." +msgstr "vid が null です。 FSS はボリュームの削除に失敗しました。" + msgid "vmemclient python library not found" msgstr "vmemclient python ライブラリーが見つかりません" #, python-format msgid "volume %s not found in drbdmanage" -msgstr "drbdmanage でボリューム %s が見つかりません" +msgstr "drbdmanage でボリューム %s が見つかりません。" msgid "volume assigned" msgstr "割り当てられたボリューム" @@ -10352,29 +11360,23 @@ msgstr "割り当てられたボリューム" msgid "volume changed" msgstr "変更されたボリューム" -msgid "volume does not exist" -msgstr "ボリュームが存在しません" - msgid "volume is already attached" -msgstr "ボリュームは既に接続されています" +msgstr "ボリュームは既に接続されています。" msgid "volume is not local to this node" -msgstr "ボリュームは、このノードに対してローカルではありません" +msgstr "ボリュームは、このノードに対してローカルではありません。" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "ボリュームサイズ %(volume_size)d は、サイズ %(size)d のバックアップを復元する" -"には小さすぎます" +"には小さすぎます。" #, python-format msgid "volume size %d is invalid." msgstr "ボリュームサイズ %d は無効です。" -msgid "volume_type cannot be None" -msgstr "volume_type に None を設定することはできません" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10382,7 +11384,7 @@ msgstr "" "ます。" msgid "volume_type_id cannot be None" -msgstr "volume_type_id を None に設定することはできません" +msgstr "volume_type_id を None に設定することはできません。" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." @@ -10395,6 +11397,11 @@ msgid "volume_types must be provided to create consistency group %s." msgstr "" "整合性グループ %s を作成するには、volume_types を指定する必要があります。" +#, python-format +msgid "volume_types must be provided to create group %(name)s." +msgstr "" +"グループ %(name)s を作成するには、volume_types を指定する必要があります。" + msgid "volumes assigned" msgstr "割り当てられたボリューム" @@ -10412,6 +11419,3 @@ msgid "" msgstr "" "zfssa_manage_policy プロパティーは 'strict' または 'loose' に設定する必要があ" "ります。現在の値は %s です。" - -msgid "{} is not a valid option." -msgstr "{} は有効なオプションではありません。" diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po index 60c51a0dc..fe7e2b6b8 100644 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -100,10 +100,6 @@ msgstr "" msgid "Array Serial Number must be in the file %(fileName)s." msgstr "배열 일련 번호가 %(fileName)s 파일에 있어야 합니다." -#, python-format -msgid "Array mismatch %(myid)s vs %(arid)s" -msgstr "%(myid)s과(와) %(arid)s 배열 불일치" - #, python-format msgid "Array query failed - No response (%d)!" msgstr "배열 쿼리 실패 - 응답이 없음(%d)!" @@ -175,10 +171,6 @@ msgstr "Nova를 호출하여 스냅샷을 작성하는 데 실패" msgid "Call to json.loads() raised an exception: %s." msgstr "json.loads() 호출에서 예외 발생: %s." -#, python-format -msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." -msgstr "lun %(lun)s을(를) 일관성 그룹 %(cg_name)s에 추가할 수 없습니다." - #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "%(target_iqn)s(으)로 %(target_ip)s에서 검색할 수 없습니다." @@ -186,14 +178,6 @@ msgstr "%(target_iqn)s(으)로 %(target_ip)s에서 검색할 수 없습니다." msgid "Can not open the recent url, login again." msgstr "최신 url을 열 수 없습니다. 다시 로그인하십시오." -#, python-format -msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "일관성 그룹 %(cg_name)s에 새 LUNs %(luns)s을(를) 둘 수 없습니다." - -#, python-format -msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "일관성 그룹 %(cg_name)s의 LUNs %(luns)s을(를) 제거할 수 없습니다." - #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "%(key)s을(를) 맵핑할 볼륨을 찾을 수 없음, %(msg)s" @@ -369,10 +353,6 @@ msgstr "하트비트를 조정 백엔드에 보내는 중에 연결 오류가 msgid "Connection to %s failed and no secondary!" msgstr "%s에 연결에 실패했으며 보조가 없습니다." -#, python-format -msgid "Consistency group %s: create failed" -msgstr "일관성 그룹 %s: 작성 실패" - #, python-format msgid "Controller GET failed (%d)" msgstr "제어기 GET 실패(%d)" @@ -412,22 +392,6 @@ msgstr "이미지 볼륨 %(id)s을(를) 삭제할 수 없습니다." msgid "Could not delete the image volume %(id)s." msgstr "이미지 볼륨 %(id)s을(를) 삭제할 수 없습니다." -#, python-format -msgid "" -"Could not do delete of snapshot %s on filer, falling back to exec of \"rm\" " -"command." -msgstr "" -"파일 프로그램에서 스냅샷 %s을(를) 삭제할 수 없어, \"rm\" 명령의 실행으로 장" -"애 복구할 수 없습니다." - -#, python-format -msgid "" -"Could not do delete of volume %s on filer, falling back to exec of \"rm\" " -"command." -msgstr "" -"파일 프로그램에서 볼륨 %s을(를) 삭제할 수 없어, \"rm\" 명령의 실행으로 장애 " -"복구할 수 없습니다." - #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "일관성 그룹 %(group_id)s의 호스트를 찾을 수 없습니다." @@ -475,10 +439,6 @@ msgstr "스케줄러 옵션 파일 %(filename)s의 통계를 낼 수 없습니 msgid "Could not validate device %s" msgstr "%s 장치를 검증할 수 없음" -#, python-format -msgid "Create cg snapshot %s failed." -msgstr "cg 스냅샷 %s 작성에 실패했습니다." - #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " @@ -487,10 +447,6 @@ msgstr "" "이미지 %(image_id)s의 clone_image_volume: %(volume_id)s 작성 실패(예외: " "%(except)s)" -#, python-format -msgid "Create consistency group %s failed." -msgstr "일관성 그룹 %s을(를) 생성하는 데 실패했습니다." - #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." @@ -566,14 +522,6 @@ msgid "" msgstr "" "기본 볼륨 유형을 찾을 수 없습니다. default_volume_type 구성을 확인하십시오." -#, python-format -msgid "Delete cgsnapshot %s failed." -msgstr "cgsnapshot %s 삭제에 실패했습니다." - -#, python-format -msgid "Delete consistency group %s failed." -msgstr "일관성 그룹 %s 삭제에 실패했습니다. " - msgid "Delete consistency group failed to update usages." msgstr "일관성 그룹을 삭제하는 중 사용법을 업데이트하지 못했습니다." @@ -692,9 +640,6 @@ msgstr "TypeError 오류. %s" msgid "Error activating LV" msgstr "LV 활성화 오류" -msgid "Error adding HBA to server" -msgstr "HBA를 서버에 추가하는 중 오류 발생" - #, python-format msgid "Error changing Storage Profile for volume %(original)s to %(name)s" msgstr "" @@ -717,12 +662,6 @@ msgstr "" "'%(method)s', %(extra)s의 Glance 서버 '%(netloc)s'에 접속하는 중 오류가 발생" "했습니다." -msgid "Error copying key." -msgstr "키 복사 오류." - -msgid "Error creating Barbican client." -msgstr "Barbican 클라이언트 생성 오류." - #, python-format msgid "Error creating QOS rule %s" msgstr "QOS 규칙 %s 작성 오류" @@ -739,9 +678,6 @@ msgstr "chap 레코드를 작성하는 중에 오류가 발생했습니다." msgid "Error creating cloned volume" msgstr "복제된 볼륨 작성 오류" -msgid "Error creating key." -msgstr "키 작성 오류." - msgid "Error creating snapshot" msgstr "스냅샷 작성 오류" @@ -755,9 +691,6 @@ msgstr "볼륨을 작성하는 중에 오류가 발생했습니다. 메시지 - msgid "Error deactivating LV" msgstr "LV 비활성화 오류" -msgid "Error deleting key." -msgstr "키 삭제 오류." - msgid "Error deleting snapshot" msgstr "스냅샷 삭제 에러" @@ -849,32 +782,15 @@ msgstr "배열, 풀, SLO 및 워크로드를 가져오는 중 오류가 발생 msgid "Error getting chap record." msgstr "chap 레코드를 가져오는 중에 오류가 발생했습니다." -#, python-format -msgid "Error getting iSCSI target info from EVS %(evs)s." -msgstr "EVS %(evs)s에서 iSCSI 대상을 가져오는 중에 오류가 발생했습니다." - -msgid "Error getting key." -msgstr "키 가져오기 오류." - msgid "Error getting name server info." msgstr "이름 서버 정보 가져오기 오류." -msgid "Error getting secret data." -msgstr "시크릿 데이터 가져오기 오류." - -msgid "Error getting secret metadata." -msgstr "시크릿 메타데이터 가져오기 오류." - msgid "Error getting show fcns database info." msgstr "표시 fcns 데이터베이스 정보 가져오기 오류." msgid "Error getting target pool name and array." msgstr "대상 풀 이름과 배열을 가져오는 중 오류가 발생했습니다." -#, python-format -msgid "Error happened during storage pool querying, %s." -msgstr "스토리지 풀 쿼리 중에 오류 발생, %s." - #, python-format msgid "Error has occurred: %s" msgstr "오류 발생: %s" @@ -1025,14 +941,6 @@ msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "" "이미지 %(image_id)s에서 볼륨 %(id)s을(를) 작성하는 중에 오류가 발생했습니다." -#, python-format -msgid "Error on adding lun to consistency group. %s" -msgstr "일관성 그룹에 lun을 추가하는 중에 오류가 발생했습니다. %s" - -#, python-format -msgid "Error on enable compression on lun %s." -msgstr "lun %s에서 압축을 사용하는 중에 오류가 발생했습니다." - #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" @@ -1107,9 +1015,6 @@ msgstr "플래시 캐시 정책을 %s(으)로 설정하는 중 오류 발생 - msgid "Error starting coordination backend." msgstr "조정 백엔드를 시작하는 중에 오류가 발생했습니다." -msgid "Error storing key." -msgstr "키 저장 오류." - #, python-format msgid "Error trying to change %(opt)s from %(old)s to %(new)s" msgstr "%(opt)s을(를) %(old)s에서 %(new)s(으)로 변경하려는 중 오류 발생" @@ -1662,10 +1567,6 @@ msgstr "" msgid "Failed to find %(s)s. Result %(r)s" msgstr "%(s)s을(를) 찾지 못했습니다. 결과 %(r)s" -#, python-format -msgid "Failed to find available iSCSI targets for %s." -msgstr "%s의 사용 가능한 iSCSI 대상을 찾지 못했습니다." - msgid "Failed to get IQN!" msgstr "IQN을 가져오는 데 실패" @@ -1794,9 +1695,6 @@ msgstr "복제 장애 복구 수행 실패" msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "볼륨 %(name)s (%(status)d)을(를) 표시하는 데 실패했습니다." -msgid "Failed to query migration status of LUN." -msgstr "LUN의 마이그레이션 상태를 쿼리하지 못했습니다." - msgid "Failed to re-export volume, setting to ERROR." msgstr "볼륨을 다시 내보내는 데 실패했습니다. ERROR로 설정합니다." @@ -1986,14 +1884,6 @@ msgstr "" "제공된 스냅샷 %(snapshot_id)s 메타데이터를 사용하여 %(volume_id)s 메타데이터" "를 업데이트하는 데 실패했습니다." -#, python-format -msgid "" -"Failed to update initiator data for initiator %(initiator)s and backend " -"%(backend)s" -msgstr "" -"개시자 %(initiator)s 및 백엔드 %(backend)s의 드라이버 개시자 데이터를 업데이" -"트하는 데 실패" - #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "볼륨 전송 id %s를 제공하는 할당량을 업데이트하는 데 실패" @@ -2047,10 +1937,6 @@ msgstr "/etc/scst.conf에 쓰지 못했습니다." msgid "Failed to write persistence file: %(path)s." msgstr "영구 파일을 쓰는 데 실패: %(path)s." -#, python-format -msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" -msgstr "%(object_type)s %(object_id)s을(를) %(update)s(으)로 업데이트 실패" - #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " @@ -2171,10 +2057,6 @@ msgstr "메소드 가져오기 오류." msgid "Get replication status for volume failed." msgstr "볼륨의 복제본 상태를 가져오는 데 실패했습니다." -#, python-format -msgid "HDP not found: %s" -msgstr "HDP를 찾을 수 없음: %s" - #, python-format msgid "Host PUT failed (%s)." msgstr "호스트 PUT 실패(%s)." @@ -2215,10 +2097,6 @@ msgstr "올바르지 않은 호스트 이름 %(host)s" msgid "Invalid replication target specified for failover" msgstr "장애 복구를 위해 올바르지 않은 복제 대상이 지정됨" -#, python-format -msgid "Invalid value for %(key)s, value is %(value)s." -msgstr "%(key)s의 올바르지 않은 값입니다, 값은 %(value)s입니다." - msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "복제가 적절하게 구성되지 않았으므로 장애 복구 실행에 실패했습니다." @@ -2288,10 +2166,6 @@ msgstr "볼륨 %(vol)s 스냅샷 %(snap)s의 Lun 작성 스냅샷 실패" msgid "Lun delete for %s failed!" msgstr "%s의 Lun 삭제 실패" -#, python-format -msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "볼륨 %(vol)s 스냅샷 %(snap)s의 Lun 삭제 스냅샷 실패" - msgid "Lun mapping returned null!" msgstr "Lun 맵핑에서 널(null)을 리턴했습니다!" @@ -2329,10 +2203,6 @@ msgstr "" msgid "Message: %s" msgstr "메시지: %s" -#, python-format -msgid "Migration of LUN %s failed to complete." -msgstr "LUN %s의 마이그레이션을 완료하지 못했습니다." - msgid "Model update failed." msgstr "모델 업데이트에 실패했습니다." @@ -2352,10 +2222,6 @@ msgstr "%(share)s의 마운트 실패." msgid "Multiple replay profiles under name %s" msgstr "%s(이)라는 이름의 여러 재생 프로파일" -#, python-format -msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" -msgstr "NFS 공유 %(share)s에 서비스 항목이 없음: %(svc)s -> %(hdp)s" - msgid "No CLI output for firmware version check" msgstr "펌웨어 버전 확인을 위한 CLI 출력이 없음" @@ -2378,14 +2244,6 @@ msgstr "" "작업이 필요하지 않습니다. 볼륨: %(volumeName)s이(가) 이미 slo/워크로드 조합: " "%(targetCombination)s의 일부입니다." -#, python-format -msgid "No configuration found for service: %s" -msgstr "서비스의 구성을 찾을 수 없음: %s" - -#, python-format -msgid "No configuration found for service: %s." -msgstr "서비스의 구성을 찾을 수 없음: %s." - #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " @@ -2708,9 +2566,6 @@ msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "커넥터에 필수 정보가 없음: wwpns가 누락되어 있음" -msgid "The given extra_spec or valid_values is None." -msgstr "지정된 extra_spec 또는 valid_values가 None입니다." - msgid "The list of iscsi_ip_addresses is empty" msgstr "iscsi_ip_addresses 목록이 비어 있음" @@ -2934,12 +2789,6 @@ msgid "Unable to manage existing volume. Volume driver %s not initialized." msgstr "" "기존 볼륨을 관리할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않았습니다." -msgid "Unable to manage_existing snapshot on a disabled service." -msgstr "사용되지 않는 서비스에서 기존 스냅샷을 관리할 수 없습니다." - -msgid "Unable to manage_existing volume on a disabled service." -msgstr "사용되지 않는 서비스에서 기존 볼륨을 관리할 수 없습니다." - #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "%(vol)s을(를) %(srv)s에 맵핑할 수 없음" @@ -3338,9 +3187,6 @@ msgstr "" "삭제: %(vol_id)s이(가) stdout에 실패함. stdout: %(out)s\n" " stderr: %(err)s" -msgid "delete_vol: provider location empty." -msgstr "delete_vol: 제공자 위치가 비어 있습니다." - #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "ensure_export: 스토리지에 볼륨 %s을(를) 찾을 수 없습니다." @@ -3355,10 +3201,6 @@ msgstr "볼륨 상태를 새로 고치는 중 오류 발생" msgid "horcm command timeout." msgstr "horcm 명령의 제한시간이 초과되었습니다." -#, python-format -msgid "iSCSI portal not found for service: %s" -msgstr "서비스의 iSCSI 포털을 찾을 수 없음: %s" - msgid "import pywbem failed!! pywbem is necessary for this volume driver." msgstr "" "pywbem 가져오기에 실패했습니다. 이 볼륨 드라이버에 pywbem이 필요합니다." diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po index ee873aa60..68ca63dea 100644 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev522\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-31 10:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -65,10 +65,6 @@ msgstr "" "응답 데이터:%(res)s\n" "\n" -#, python-format -msgid "%(element)s: %(val)s" -msgstr "%(element)s: %(val)s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" @@ -106,10 +102,6 @@ msgstr "" "볼륨 '%(name)s' 의 3PAR vlun이 삭제되었지만, 다음 이유로 인해 호스트 " "'%(host)s'이(가) 삭제됨: %(reason)s" -#, python-format -msgid "AUTH properties: %(authProps)s" -msgstr "AUTH 특성: %(authProps)s" - #, python-format msgid "AUTH properties: %s." msgstr "AUTH 특성: %s." @@ -184,30 +176,14 @@ msgstr "" msgid "Availability Zones retrieved successfully." msgstr "가용 구역이 성공적으로 검색되었습니다." -#, python-format -msgid "Available services: %s" -msgstr "사용 가능한 서비스: %s" - -#, python-format -msgid "Available services: %s." -msgstr "사용 가능한 서비스: %s." - #, python-format msgid "Backend name is %s." msgstr "백엔드 이름이 %s입니다." -#, python-format -msgid "Backend type: %s" -msgstr "백엔드 유형: %s" - #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "백업 VM: %(backing)s의 이름이 %(new_name)s(으)로 변경되었습니다." -#, python-format -msgid "Backing consistency group snapshot %s available for deletion" -msgstr "삭제할 수 있는 일관성 그룹 스냅샷 %s 백업" - msgid "Backing not available, no operation to be performed." msgstr "백업을 사용할 수 없습니다. 작업이 수행되지 않습니다." @@ -235,10 +211,6 @@ msgstr "" msgid "Backup service: %s." msgstr "백업 서비스: %s." -#, python-format -msgid "Bandwidth limit is: %s." -msgstr "대역폭 한계: %s." - #, python-format msgid "Begin backup of volume %s." msgstr "볼륨 %s의 백업을 시작하십시오." @@ -268,10 +240,6 @@ msgstr "CONCERTO 버전: %s" msgid "Calling os-brick to detach ScaleIO volume." msgstr "ScaleIO 볼륨의 연결을 해제하기 위해 os-brick 호출." -#, python-format -msgid "Cancelling Migration from LUN %s." -msgstr "LUN %s에서 마이그레이션 취소." - #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " @@ -312,15 +280,6 @@ msgstr "" "볼륨이 다른 백엔드에 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공" "할 수 없습니다." -#, python-format -msgid "" -"Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu" -msgstr "" -"배열 %(arrayName)s의 SRP 풀 %(poolName)s에 대한 용량 통계 total_capacity_gb=" -"%(total_capacity_gb)lu, free_capacity_gb=%(free_capacity_gb)lu" - #, python-format msgid "Cgsnapshot %s: creating." msgstr "Cgsnapshot %s: 작성 중." @@ -337,23 +296,11 @@ msgstr "Glance 공유의 이미지 복제 %s 확인." msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "볼륨 %(volume)s의 원본 %(origin)s 확인." -#, python-format -msgid "" -"Cinder ISCSI volume with current path %(path)s is no longer being managed. " -"The new name is %(unm)s." -msgstr "" -"현재 경로가 %(path)s인 Cinder ISCSI 볼륨이 더 이상 관리되지 않습니다. 새 이름" -"은 %(unm)s입니다." - #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "현재 경로가 \"%(cr)s\"인 Cinder NFS 볼륨이 더 이상 관리되지 않습니다." -#, python-format -msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." -msgstr "현재 경로 %(cr)s의 Cinder NFS 볼륨이 더 이상 관리되지 않습니다." - msgid "Cinder secure environment indicator file exists." msgstr "Cinder 보안 환경 표시기 파일이 있습니다." @@ -407,12 +354,6 @@ msgstr "" "%(reserve)sagent-type=%(agent-type)s perfpol-name=%(perfpol-name)s 암호화=" "%(encryption)s 암호=%(cipher)s 다중 개시자r=%(multi-initiator)s에서 볼륨 복제" -#, python-format -msgid "" -"Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" -msgstr "" -"volume_name %(vname)s clone_name %(cname)s export_path %(epath)s(으)로 복제" - #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "명령 [%s]에 대해 CloudByte API가 성공적으로 실행되었습니다." @@ -430,10 +371,6 @@ msgstr "볼륨 전체 마이그레이션이 성공적으로 완료되었습니 msgid "Completed: convert_to_base_volume: id=%s." msgstr "완료됨: convert_to_base_volume: id=%s." -#, python-format -msgid "Configured pools: %s" -msgstr "구성된 풀: %s" - #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " @@ -453,22 +390,6 @@ msgstr "백엔드 사용 마이그레이션의 대상 호스트 %s에 연결." msgid "Connector returning fcnsinfo-%s" msgstr "커넥터에서 fcnsinfo-%s 리턴" -#, python-format -msgid "Consistency group %(cg)s is created successfully." -msgstr "일관성 그룹 %(cg)s이(가) 성공적으로 작성되었습니다." - -#, python-format -msgid "Consistency group %s was deleted successfully." -msgstr "일관성 그룹 %s이(가) 성공적으로 삭제되었습니다." - -#, python-format -msgid "Consistency group %s: created successfully" -msgstr "일관성 그룹 %s: 성공적으로 작성됨" - -#, python-format -msgid "Consistency group %s: creating" -msgstr "일관성 그룹 %s: 작성 중" - #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "%(mbps).2f MB/s에서 %(sz).2f MB 이미지 전환 " @@ -571,14 +492,6 @@ msgstr "볼륨 %(volume_id)s 작성이 완료되었습니다." msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "스냅샷 %(snapshot_id)s에서 볼륨 %(volume_id)s 작성이 완료되었습니다." -#, python-format -msgid "" -"Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " -"%(provisioning)s tiering: %(tiering)s " -msgstr "" -"볼륨: %(volume)s 크기: %(size)s 풀: %(pool)s 프로비저닝: %(provisioning)s 계" -"층: %(tiering)s 작성" - #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " @@ -594,9 +507,6 @@ msgstr "백업 작성이 완료됨. 백업: %s." msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "백업 작성이 시작됨, 백업: %(backup_id)s 볼륨: %(volume_id)s." -msgid "Create consistency group completed successfully." -msgstr "일관성 그룹 생성이 성공적으로 완료되었습니다." - #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "source-%(source)s에서 일관성 그룹 생성이 성공적으로 완료되었습니다." @@ -617,10 +527,6 @@ msgstr "일관성 그룹 %(cgId)s의 스냅샷 cgsnapshotID: %(cgsnapshot)s 작 msgid "Create snapshot from volume %s" msgstr "%s 볼륨에서 스냅샷 작성" -#, python-format -msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" -msgstr "스냅샷 작성: %(snapshot)s: 볼륨: %(volume)s" - #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " @@ -841,10 +747,6 @@ msgstr "일관성 그룹 %(group)s 삭제." msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "스냅샷 %(snapshot_id)s 삭제가 완료되었습니다." -#, python-format -msgid "Delete Snapshot: %(snapshot)s" -msgstr "스냅샷 삭제: %(snapshot)s" - #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "스냅샷 %(snapshot)s 삭제" @@ -976,10 +878,6 @@ msgstr "스냅샷 삭제: %s" msgid "Deleting stale snapshot: %s" msgstr "시간이 경과된 스냅샷 삭제: %s" -#, python-format -msgid "Deleting unneeded host %(host_name)r." -msgstr "불필요한 호스트 %(host_name)r 삭제." - #, python-format msgid "Deleting volume %s " msgstr "볼륨 %s 삭제" @@ -1055,10 +953,6 @@ msgstr "드라이버 초기화가 성공적으로 완료되었습니다." msgid "Driver post RPC initialization completed successfully." msgstr "드라이버 post RPC 초기화가 성공적으로 완료되었습니다." -#, python-format -msgid "Driver stats: %s" -msgstr "드라이버 통계: %s" - #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " @@ -1083,10 +977,6 @@ msgstr "EQL-드라이버: \"%s\" 실행." msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "마스크 %(mask)s(으)로 볼륨 %(vol)s 편집" -#, python-format -msgid "Elapsed time for clear volume: %.2f sec" -msgstr "볼륨 지우기를 위해 경과된 시간: %.2f sec" - msgid "Embedded mode detected." msgstr "임베드된 모드가 발견되었습니다." @@ -1161,10 +1051,6 @@ msgstr "" "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." -#, python-format -msgid "FC Initiators %(in)s of %(ins)s need registration" -msgstr "%(ins)s의 FC 개시자 %(in)s을(를) 등록해야 합니다." - msgid "Failed over to replication target successfully." msgstr "복제 대상으로 장애 복구되었습니다." @@ -1193,10 +1079,6 @@ msgstr "처리된 결함: %s" msgid "Fetched vCenter server version: %s" msgstr "가져온 vCenter 서버 버전: %s" -#, python-format -msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" -msgstr "필터 %(cls_name)s에서 %(obj_len)d 호스트 리턴" - #, python-format msgid "Filtered targets for SAN is: %(targets)s" msgstr "SAN의 필터링된 대상: %(targets)s" @@ -1292,18 +1174,6 @@ msgstr "구성 후의 선호 구역 이름: %(zonename)s" msgid "Generating transfer record for volume %s" msgstr "볼륨 %s의 전송 레코드 생성" -#, python-format -msgid "Get FC targets %(tg)s to register initiator %(in)s." -msgstr "개시자 %(in)s을(를) 등록하기 위해 FC 대상 %(tg)s을(를) 가져옵니다." - -#, python-format -msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." -msgstr "개시자 %(in)s을(를) 등록하기 위해 ISCSI 대상 %(tg)s을(를) 가져옵니다." - -#, python-format -msgid "Get Volume response: %s" -msgstr "볼륨 가져오기 응답: %s" - msgid "Get all snapshots completed successfully." msgstr "모든 스냅샷 가져오기가 성공적으로 완료되었습니다." @@ -1314,10 +1184,6 @@ msgstr "모든 볼륨 가져오기가 성공적으로 완료되었습니다." msgid "Get domain by name response: %s" msgstr "이름별 도메인 가져오기 응답: %s" -#, python-format -msgid "Get service: %(lbl)s->%(svc)s" -msgstr "서비스 가져오기: %(lbl)s->%(svc)s" - msgid "Get snapshot metadata completed successfully." msgstr "스냅샷 메타데이터 가져오기가 성공적으로 완료되었습니다." @@ -1348,10 +1214,6 @@ msgstr "vol_name=%s의 볼륨 정보 가져오기" msgid "Going to perform request again %s with valid token." msgstr "올바른 토큰으로 %s 요청을 다시 수행합니다." -#, python-format -msgid "HDP list: %s" -msgstr "HDP 목록: %s" - #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" @@ -1368,17 +1230,9 @@ msgstr "HTTP 예외 처리: %s" msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "Hypermetro id: %(metro_id)s. 원격 lun id: %(remote_lun_id)s." -#, python-format -msgid "ISCSI properties: %(properties)s" -msgstr "ISCSI 특성: %(properties)s" - msgid "ISCSI provider_location not stored, using discovery." msgstr "ISCSI provider_location이 저장되지 않음, 검색 사용." -#, python-format -msgid "ISCSI volume is: %(volume)s" -msgstr "ISCSI 볼륨: %(volume)s" - #, python-format msgid "Ignored LU creation error \"%s\" while ensuring export." msgstr "내보내기를 확인하는 중에 LU 작성 오류 \"%s\"이(가) 무시되었습니다." @@ -1481,14 +1335,6 @@ msgstr "개시자 %(iname)s의 개시자 그룹 이름이 %(grp)s입니다." msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s이(가) %(size)sGB로 확장되었습니다." -#, python-format -msgid "LUN %(lun)s extended to %(size)s GB." -msgstr "LUN %(lun)s이(가) %(size)sGB로 확장되었습니다." - -#, python-format -msgid "LUN %(lun)s of size %(sz)s MB is created." -msgstr "크기가 %(sz)sMB인 LUN %(lun)s이(가) 작성되었습니다." - #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" @@ -1615,13 +1461,6 @@ msgstr "FC Zone을 제거해야 하며, 개시자 대상 맵 빌드" msgid "Need to remove FC Zone, building initiator target map." msgstr "FC Zone을 제거해야 하며, 개시자 대상 맵 빌드." -msgid "" -"Neither security file nor plain text credentials are specified. Security " -"file under home directory will be used for authentication if present." -msgstr "" -"보안 파일 및 일반 텍스트 자격 증명이 모두 지정되지 않았습니다. 홈 디렉토리에 " -"보안 파일이 있는 경우 이 파일을 인증에 사용합니다." - #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " @@ -1714,10 +1553,6 @@ msgstr "%(volume_id)s 볼륨을 %(backup_id)s 백업의 복원으로 겹쳐씀" msgid "Params for add volume request: %s." msgstr "볼륨 요청 추가 매개 변수: %s." -#, python-format -msgid "Parse_loc: %s" -msgstr "Parse_loc: %s" - #, python-format msgid "Performing post clone for %s" msgstr "%s의 사후 복제 수행" @@ -1726,9 +1561,6 @@ msgstr "%s의 사후 복제 수행" msgid "Performing secure delete on volume: %s" msgstr "%s 볼륨에서 보안 삭제 수행" -msgid "Plain text credentials are being used for authentication" -msgstr "일반 텍스트 자격 증명이 인증에 사용 중임" - #, python-format msgid "Pool id is %s." msgstr "풀 id는 %s입니다." @@ -1970,14 +1802,6 @@ msgstr "볼륨 삭제 재개가 성공적으로 완료되었습니다." msgid "Resuming delete on backup: %s." msgstr "백업에서 삭제 재개: %s." -#, python-format -msgid "Retrieving secret for service: %s." -msgstr "서비스의 시크릿 검색: %s." - -#, python-format -msgid "Retrieving target for service: %s." -msgstr "서비스의 대상 검색: %s." - #, python-format msgid "Return FC info is: %s." msgstr "리턴 FC 정보: %s." @@ -2053,21 +1877,9 @@ msgstr "볼륨 다시 입력 요청이 성공적으로 실행되었습니다." msgid "Retype was to same Storage Profile." msgstr "동일한 스토리지 프로파일에 다시 입력되었습니다." -#, python-format -msgid "Review shares: %s" -msgstr "공유 검토: %s" - msgid "Roll detaching of volume completed successfully." msgstr "볼륨 연결 해제가 성공적으로 롤링되었습니다." -#, python-format -msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" -msgstr "%(server)s 및 vserver %(vs)s의 클러스터 최신 ssc 작업 실행" - -#, python-format -msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" -msgstr "%(server)s 및 vserver %(vs)s의 시간이 경과된 ssc 새로 고치기 작업 실행" - #, python-format msgid "Running with vmemclient version: %s" msgstr "vmemclient 버전으로 실행: %s" @@ -2076,10 +1888,6 @@ msgstr "vmemclient 버전으로 실행: %s" msgid "SC server created %s" msgstr "SC 서버 작성 %s" -#, python-format -msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" -msgstr "%(svc)s의 서비스 저장 -> %(hdp)s, %(path)s" - #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " @@ -2165,10 +1973,6 @@ msgstr "백엔드 상태가 동결로 설정되었습니다." msgid "Set newly managed Cinder volume name to %(name)s." msgstr "새로 관리된 Cinder 볼륨 이름을 %(name)s(으)로 설정합니다." -#, python-format -msgid "Set tgt CHAP secret for service: %s." -msgstr "서비스의 tgt CHAP 시크릿 설정: %s." - #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "%(host)s 호스트를 %(state)s(으)로 설정 중입니다. " @@ -2204,12 +2008,6 @@ msgstr "볼륨 %s이(가) 없으므로 삭제를 건너뜁니다." msgid "Skipping ensure_export. Found existing iSCSI target." msgstr "ensure_export를 건너뜁니다. 기존 iSCSI 대상이 발견되었습니다." -#, python-format -msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" -msgstr "" -"ensure_export를 건너뜁니다. %s 볼륨에 대한 iscsi_target을 프로비저닝하지 않습" -"니다. " - #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current Tenant." @@ -2281,10 +2079,6 @@ msgstr "스냅샷 %s에 종속 복제본이 있으므로 나중에 삭제됩니 msgid "Snapshot %s not found" msgstr "스냅샷 %s을(를) 찾을 수 없음" -#, python-format -msgid "Snapshot %s was deleted successfully." -msgstr "스냅샷 %s이(가) 성공적으로 삭제되었습니다." - #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "스냅샷 '%(ref)s'의 이름이 '%(new)s'(으)로 변경됩니다." @@ -2358,10 +2152,6 @@ msgstr "볼륨 드라이버 %(driver_name)s (%(version)s) 시작" msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "스토리지 그룹 %(storageGroupName)s이(가) 성공적으로 삭제되었습니다." -#, python-format -msgid "Storage Group %s was empty." -msgstr "스토리지 그룹 %s이(가) 비어 있습니다." - #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "스토리지 그룹이 정책과 연관되지 않았습니다. 예외는 %s입니다." @@ -2381,17 +2171,6 @@ msgstr "%s 사용자로 로그인 완료" msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "%(volumeName)s이(가) %(sgGroupName)s에 성공적으로 추가되었습니다." -#, python-format -msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" -msgstr "%(server)s 및 vserver %(vs)s의 완료된 ssc 작업이 성공적으로 완료됨" - -#, python-format -msgid "" -"Successfully completed stale refresh job for %(server)s and vserver %(vs)s" -msgstr "" -"%(server)s 및 vserver %(vs)s의 시간이 경과된 새로 고치기 작업이 성공적으로 완" -"료됨" - #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr " %(src)s의 디스크를 %(dest)s(으)로 복사했습니다." @@ -2544,10 +2323,6 @@ msgid "" "System with controller addresses [%s] is not registered with web service." msgstr "제어기 주소가 [%s]인 시스템이 웹 서비스에 등록되지 않았습니다." -#, python-format -msgid "Target is %(map)s! Targetlist = %(tgtl)s." -msgstr "대상은 %(map)s입니다. Targetlist = %(tgtl)s." - #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "마스킹 보기 %(maskingView)s의 wwns: %(targetWwns)s." @@ -2654,10 +2429,6 @@ msgstr "" "스냅샷이 작성된 볼륨 %(snap)s의 스냅샷 지점이 없습니다. 볼륨 %(vol)s의 백업" "을 작성하지 않습니다." -#, python-format -msgid "Toggle san_ip from %(current)s to %(new)s." -msgstr "san_ip를 %(current)s에서 %(new)s(으)로 전환합니다." - msgid "Token is invalid, going to re-login and get a new one." msgstr "토큰이 올바르지 않으므로, 다시 로그인하여 새 토큰을 얻으십시오." @@ -2853,18 +2624,6 @@ msgstr "구성에서 겹쳐쓴 vmware_host_version 사용: %s" msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "%(cpg)s 대신 풀 %(pool)s 사용" -#, python-format -msgid "Using security file in %s for authentication" -msgstr "인증을 위해 %s에서 보안 파일 사용" - -#, python-format -msgid "Using service label: %s" -msgstr "서비스 레이블 사용: %s" - -#, python-format -msgid "Using target label: %s." -msgstr "대상 레이블 사용: %s." - msgid "VF context is changed in the session." msgstr "VF 컨텍스트가 세션에서 변경되었습니다." @@ -3045,10 +2804,6 @@ msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" "지정된 ref %s이(가) 있는 볼륨은 관리 조작 중에 이름을 바꾸지 않아도 됩니다." -#, python-format -msgid "Volume with the name %s wasn't found, can't unmanage" -msgstr "이름이 %s인 볼륨을 찾을 수 없음. 관리 취소할 수 없음" - #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " @@ -3095,10 +2850,6 @@ msgstr "I-T 모드에 구역이 있습니다. %s의 구역 작성을 건너뜁 msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" msgstr "I-T 모드에 구역이 있습니다. %(zonename)s의 구역 작성을 건너뜁니다." -#, python-format -msgid "Zone map to add: %(zonemap)s" -msgstr "추가할 구역 맵: %(zonemap)s" - #, python-format msgid "Zone map to add: %s" msgstr "추가할 구역 맵: %s" @@ -3155,10 +2906,6 @@ msgstr "" "_delete_volume_setting, volumename:%(volumename)s, ETERNUS에 볼륨이 없습니" "다. " -#, python-format -msgid "_get_service_target hdp: %s." -msgstr "_get_service_target hdp: %s." - #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "_get_tgt_ip_from_portgroup: ip 가져오기: %s." @@ -3203,10 +2950,6 @@ msgstr "cgsnapshot %s: 성공적으로 삭제됨" msgid "cgsnapshot %s: deleting" msgstr "cgsnapshot %s: 삭제" -#, python-format -msgid "config[services]: %s." -msgstr "config[services]: %s." - #, python-format msgid "create_cloned_volume, info: %s, Exit method." msgstr "create_cloned_volume, 정보: %s, 메소드를 종료합니다." @@ -3250,10 +2993,6 @@ msgstr "create_volume, 정보: %s, 메소드를 종료합니다." msgid "create_volume, volume id: %s, Enter method." msgstr "create_volume, 볼륨 id: %s, 메소드를 입력합니다." -#, python-format -msgid "create_volume: create_lu returns %s" -msgstr "create_volume: create_lu에서%s을(를) 리턴" - #, python-format msgid "create_volume_from_snapshot, info: %s, Exit method." msgstr "create_volume_from_snapshot, 정보: %s, 메소드를 종료합니다." @@ -3274,14 +3013,6 @@ msgstr "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." -#, python-format -msgid "del_iscsi_conn: hlun not found %s." -msgstr "del_iscsi_conn: hlun을 찾을 수 없음 %s." - -#, python-format -msgid "delete lun loc %s" -msgstr "lun loc %s 삭제" - #, python-format msgid "delete_snapshot, delete: %s, Exit method." msgstr "delete_snapshot, 삭제: %s, 메소드를 종료합니다." @@ -3307,10 +3038,6 @@ msgstr "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." -#, python-format -msgid "do_setup: %s" -msgstr "do_setup: %s" - #, python-format msgid "extend_volume, used pool name: %s, Exit method." msgstr "extend_volume, 사용된 풀 이름: %s, 메소드를 종료합니다." @@ -3323,26 +3050,10 @@ msgstr "extend_volume, 볼륨 id: %s, 메소드를 입력합니다." msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "풀 %(pool)s의 사용 가능한 용량: %(free)s, 총 용량: %(total)s." -#, python-format -msgid "iSCSI Initiators %(in)s of %(ins)s need registration." -msgstr "%(ins)s의 iSCSI 개시자 %(in)s을(를) 등록해야 합니다." - -#, python-format -msgid "iSCSI portal found for service: %s" -msgstr "서비스의 iSCSI 포털 발견: %s" - #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "개시자 %(iname)s의 igroup %(grp)s을(를) 찾을 수 없음" -#, python-format -msgid "initialize volume %(vol)s connector %(conn)s" -msgstr "볼륨 %(vol)s 커넥터 %(conn)s 초기화" - -#, python-format -msgid "initialize_ connection: %(vol)s:%(initiator)s" -msgstr "initialize_ connection: %(vol)s:%(initiator)s" - #, python-format msgid "initialize_connection success. Return data: %s." msgstr "initialize_connection 성공: 데이터 리턴: %s." @@ -3396,10 +3107,6 @@ msgid "" "initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." msgstr "initialize_connection_fc, 개시자: %(wwpns)s, 볼륨 이름: %(volume)s." -#, python-format -msgid "initiate: connection %s" -msgstr "시작: 연결 %s" - msgid "initiator has no password while using chap,adding it" msgstr "chap을 사용하는 동안 개시자의 비밀번호가 없음, 추가" @@ -3407,25 +3114,6 @@ msgstr "chap을 사용하는 동안 개시자의 비밀번호가 없음, 추가" msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." msgstr "개시자 이름: %(initiator_name)s, LUN ID: %(lun_id)s." -msgid "" -"initiator_auto_registration: False. Initiator auto registration is not " -"enabled. Please register initiator manually." -msgstr "" -"initiator_auto_registration: False. 개시자 자동 등록이 사용되지 않습니다. 개" -"시자를 수동으로 등록하십시오." - -#, python-format -msgid "iops limit is: %s." -msgstr "iops 한계: %s." - -#, python-format -msgid "iscsi_initiators: %s" -msgstr "iscsi_initiators: %s" - -#, python-format -msgid "location is: %(location)s" -msgstr "위치: %(location)s" - #, python-format msgid "" "manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has been " @@ -3445,10 +3133,6 @@ msgstr "" msgid "new cloned volume: %s" msgstr "새로 복제된 볼륨: %s" -#, python-format -msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" -msgstr "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" - #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "%(ip)s에서 %(ssn)s에 대한 open_connection" @@ -3465,14 +3149,6 @@ msgstr "복제 장애 복구 보조가 %(ssn)s임" msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "볼륨 %s을(를) error_restoring(으)로 설정(복원-백업)." -#, python-format -msgid "share: %(share)s -> %(info)s" -msgstr "공유: %(share)s -> %(info)s" - -#, python-format -msgid "share: %s incorrect entry" -msgstr "공유: %s 올바르지 않은 항목" - #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "smis_do_iscsi_discovery: %(out)s." @@ -3485,22 +3161,10 @@ msgstr "%s 스냅샷이 없습니다." msgid "source volume for cloning: %s" msgstr "복제할 소스 볼륨: %s" -#, python-format -msgid "stats: stats: %s." -msgstr "통계: 통계: %s." - #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "stop_snapshot: 스냅샷 이름: %(snapshot)s, 볼륨 이름: %(volume)s." -#, python-format -msgid "targetlist: %s" -msgstr "targetlist: %s" - -#, python-format -msgid "terminate: connection %s" -msgstr "종료: 연결 %s" - #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "terminate_connection 볼륨: %(volume)s, 커넥터: %(con)s" diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po index 082ad8951..a6931ced2 100644 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -264,9 +264,6 @@ msgstr "'상태'가 지정되어야 합니다." msgid "'volume_id' must be specified" msgstr "'volume_id'를 지정해야 함" -msgid "'{}' object has no attribute '{}'" -msgstr "'{}' 오브젝트에 '{}' 속성이 없음" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -424,21 +421,9 @@ msgstr "" msgid "An API version request must be compared to a VersionedMethod object." msgstr "API 버전 요청은 VersionedMethod 오브젝트와 비교해야 합니다." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver에서 오류가 발생했습니다(이유: %(reason)s). " - msgid "An error has occurred during backup operation" msgstr "백업 조작 중에 오류가 발생함 " -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "스냅샷 '%s'을(를) 수정하려는 중에 오류가 발생했습니다." - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "볼륨 \"%s\"을(를) 검색하는 중에 오류가 발생했습니다. " - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -541,16 +526,10 @@ msgstr "CloudByte 스토리지에서 인증 그룹 [%s] 세부사항을 찾을 msgid "Auth user details not found in CloudByte storage." msgstr "CloudByte 스토리지에서 인증 사용자 세부사항을 찾을 수 없습니다. " -msgid "Authentication error" -msgstr "인증 오류" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "인증 실패, 스위치 자격 증명 확인, 오류 코드 %s." -msgid "Authorization error" -msgstr "권한 부여 오류" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "가용성 구역 '%(s_az)s'이(가) 올바르지 않습니다. " @@ -568,9 +547,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "백엔드가 존재하지 않음(%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "백엔드가 이미 장애 복구되었으므로, 장애 복구할 수 없습니다." - #, python-format msgid "Backend reports: %(message)s" msgstr "백엔드 보고서: %(message)s" @@ -581,9 +557,6 @@ msgstr "백엔드 보고서: 항목이 이미 존재함" msgid "Backend reports: item not found" msgstr "백엔드 보고서: 항목을 찾을 수 없음" -msgid "Backend server not NaServer." -msgstr "백엔드 서버가 NaServer가 아님." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "백엔드 서비스 재시도 제한시간 도달: %(timeout)s초" @@ -676,12 +649,6 @@ msgstr "스토리지 볼륨 백엔드 API로부터 잘못되었거나 예상치 msgid "Bad project format: project is not in proper format (%s)" msgstr "잘못된 프로젝트 형식: 프로젝트 형식이 올바르지 않음(%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Datera 클러스터에 잘못된 요청이 전송됨: 올바르지 않은 인수: %(args)s | " -"%(message)s" - msgid "Bad response from Datera API" msgstr "Datera API의 잘못된 응답" @@ -698,18 +665,6 @@ msgstr "2진" msgid "Blank components" msgstr "비어 있는 구성요소" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Blockbridge API 인증 스킴(토큰 또는 비밀번호)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Blockbridge API 비밀번호(인증 스킴 '비밀번호'의 경우)" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Blockbridge API 토큰(인증 스킴 '토큰'의 경우)" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Blockbridge API 사용자(인증 스킴 '비밀번호'의 경우)" - msgid "Blockbridge api host not configured" msgstr "Blockbridge api 호스트가 구성되지 않음" @@ -821,9 +776,6 @@ msgstr "%s을(를) 정수로 변환할 수 없습니다. " msgid "Can't access 'scality_sofs_config': %s" msgstr "'scality_sofs_config'에 액세스할 수 없음: %s" -msgid "Can't attach snapshot." -msgstr "스냅샷을 연결할 수 없습니다." - msgid "Can't decode backup record." msgstr "백업 레코드를 디코드할 수 없습니다. " @@ -932,10 +884,6 @@ msgstr "" "스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷 상태가 정상이 아니거나 실" "행 상태가 온라인이 아닙니다." -#, python-format -msgid "Can't open config file: %s" -msgstr "구성 파일을 열 수 없음: %s" - msgid "Can't parse backup record." msgstr "백업 레코드를 구문 분석할 수 없습니다. " @@ -1010,13 +958,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "ECOM 서버를 연결할 수 없습니다. " -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"크기가 %(src_vol_size)s인 볼륨에서 크기가 %(vol_size)s인 복제를 작성할 수 없" -"음" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1062,12 +1003,6 @@ msgstr "" msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "이름이 %(sgGroupName)s인 스토리지 그룹을 작성하거나 찾을 수 없습니다. " -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"크기가 %(snap_size)s인 스냅샷에서 크기가 %(vol_size)s인 볼륨을 작성할 수 없음" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "크기가 %s인 볼륨을 작성할 수 없습니다. 8GB의 배수가 아닙니다. " @@ -1372,10 +1307,6 @@ msgstr "Coho rpc 포트가 구성되지 않음" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "명령 %(cmd)s이(가) CLI에서 차단되어 취소됨" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: %s 제한시간" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s 제한시간" @@ -1532,18 +1463,10 @@ msgstr "GPFS 클러스터 ID를 찾을 수 없음: %s." msgid "Could not find GPFS file system device: %s." msgstr "GPFS 파일 시스템 디바이스를 찾을 수 없음: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "유형이 %(type_id)s인 볼륨 %(volume_id)s의 호스트를 찾을 수 없습니다." - #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s에서 구성을 찾을 수 없음" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "%(volumeName)s 볼륨에 대한 iSCSI 내보내기를 찾을 수 없습니다. " - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "%s 볼륨에 대한 iSCSI 내보내기를 찾을 수 없음" @@ -1625,16 +1548,6 @@ msgstr "" "백업 작성 중단. 예상 볼륨 상태는 %(expected_status)s이지만 %(actual_status)s" "인 동안에는 인스턴스 연관을 변경할 수 없습니다." -msgid "Create consistency group failed." -msgstr "일관성 그룹을 생성하는 데 실패했습니다." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"유형이 %(type)s인 암호화된 볼륨을 이미지 %(image)s에서 작성할 수 없습니다. " - msgid "Create export for volume failed." msgstr "볼륨에 대한 내보내기 작성에 실패했습니다. " @@ -1719,12 +1632,6 @@ msgstr "" "볼륨 %(vol)s에 대해 현재 맵핑된 호스트가 %(group)s이(가) 포함된 지원되지 않" "는 호스트 그룹에 있습니다. " -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "더 이상 사용되지 않음: Cinder API의 v1 배치" - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "더 이상 사용되지 않음: Cinder API의 v2 배치" - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1805,15 +1712,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "Dedup lun을 확장할 수 없음" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"중복 제거 인에이블러가 설치되어 있지 않습니다. 중복 제거된 볼륨을 작성할 수 " -"없습니다." - -msgid "Default pool name if unspecified." -msgstr "기본 풀 이름입니다(지정되지 않은 경우). " - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1827,10 +1725,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "기본 볼륨 유형을 찾을 수 없습니다." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "노출된 풀 및 해당 연관된 백엔드 조회 문자열의 세트를 정의함" - msgid "Delete LUNcopy error." msgstr "LUNcopy 삭제 오류입니다. " @@ -1916,9 +1810,6 @@ msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Dell Cinder 드라이버 구성 오류 replication_device %s을(를) 찾을 수 없음" -msgid "Deploy v3 of the Cinder API." -msgstr "Cinder API의 v3 배치" - msgid "Describe-resource is admin only functionality" msgstr "Describe-resource 기능은 관리자만 사용가능" @@ -1969,13 +1860,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "드라이버 연결 초기화에 실패했습니다(오류: %(err)s). " -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"볼륨(LUN {})에 마이그레이션이 금지된 스냅샷이 있으므로 드라이버에서 다시 입력" -"을 수행할 수 없습니다." - msgid "Driver must implement initialize_connection" msgstr "드라이버가 initialize_connection을 구현해야 함" @@ -2453,12 +2337,6 @@ msgstr "" "%(storageGroupName)s 스토리지 그룹을 fast 정책 %(fastPolicyName)s과(와) 연관" "시키는 중 오류 발생. 오류 설명: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "" -"볼륨 %s에 연결하는 중에 오류가 발생했습니다. 목표 한계에 도달했을 수 있습니" -"다! " - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2626,10 +2504,6 @@ msgstr "추가로 %(size)dGB를 가진 볼륨 %(space)s에 대한 공간 확장 msgid "Error managing volume: %s." msgstr "볼륨 관리 중 오류 발생: %s." -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "%(vol)s 볼륨 맵핑 중 오류: %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2673,17 +2547,9 @@ msgstr "cg 스냅샷 %s을(를) 삭제하는 중에 오류가 발생했습니다 msgid "Error occurred when updating consistency group %s." msgstr "일관성 그룹 %s을(를) 업데이트하는 중에 오류가 발생했습니다. " -#, python-format -msgid "Error parsing config file: %s" -msgstr "구성 파일 구문 분석 중 오류: %s" - msgid "Error promoting secondary volume to primary" msgstr "2차 볼륨을 1차로 승격시키는 중 오류 발생" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "%(vol)s 볼륨 제거 중 오류: %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "볼륨 %(vol)s 이름 변경 중 오류 발생: %(err)s." @@ -2966,12 +2832,6 @@ msgstr "스냅샷이 없는 경우에만 이 드라이버에 대해 확장 볼 msgid "Extend volume not implemented" msgstr "볼륨 확장이 구현되지 않음" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"FAST VP 인에이블러가 설치되어 있지 않습니다. 볼륨에 대해 계층화 정책을 설정" -"할 수 없습니다." - msgid "FAST is not supported on this array." msgstr "이 배열에서는 FAST가 지원되지 않습니다." @@ -3036,10 +2896,6 @@ msgstr "" "자원 잠금을 획득하는 데 실패했습니다.(serial: %(serial)s, inst: %(inst)s, " "ret: %(ret)s, stderr: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "%(retries)s번 시도 후 %(sg)s에 %(vol)s을(를) 추가하지 못했습니다." - msgid "Failed to add the logical device." msgstr "논리 디바이스를 추가하는 데 실패했습니다." @@ -3122,9 +2978,6 @@ msgstr "스냅샷 %(cgSnapshot)s에서 CG %(cgName)s을(를) 작성하지 못했 msgid "Failed to create IG, %s" msgstr "IG를 작성하지 못함, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "SolidFire 이미지 볼륨 작성 실패" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "볼륨 그룹을 작성할 수 없음: %(vg_name)s" @@ -3233,9 +3086,6 @@ msgstr "스케줄러 관리자 볼륨 플로우 작성 실패" msgid "Failed to create snapshot %s" msgstr "스냅샷 %s 작성 실패" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "LUN ID를 지정하지 않아서 스냅샷을 작성하지 못함" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "cg의 스냅샷 작성 실패: %(cgName)s. " @@ -3374,9 +3224,6 @@ msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "스냅샷 자원 영역 보장 실패, id %s에 대한 볼륨을 찾을 수 없음" -msgid "Failed to establish SSC connection." -msgstr "SSC 연결 설정에 실패했습니다. " - msgid "Failed to establish connection with Coho cluster" msgstr "Coho 클러스터와 연결하는 데 실패" @@ -3426,10 +3273,6 @@ msgstr "호스트 %s을(를) 찾지 못했습니다. " msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "%(initiator)s을(를) 포함하는 iSCSI 개시자 그룹을 찾지 못했습니다." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "소스 볼륨 %s의 스토리지 풀을 찾는 데 실패했습니다. " - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "계정 [%s]의 CloudByte 계정 세부사항을 가져오지 못했습니다." @@ -3646,27 +3489,6 @@ msgstr "" "보고된 크기 %(size)s이(가) 부동 소수점 숫자가 아니므로 기존 볼륨 %(name)s을" "(를) 관리하지 못했습니다." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"선택한 볼륨 유형의 풀이 볼륨 참조에서 전달된 NFS 공유와 일치하지 않기 때문에 " -"기존 볼륨을 관리하지 못했습니다. " - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"선택한 볼륨 유형의 풀이 볼륨 참조에서 전달된 파일 시스템과 일치하지 않기 때문" -"에 기존 볼륨을 관리하지 못했습니다. " - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"선택한 볼륨 유형의 풀이 호스트의 풀과 일치하지 않기 때문에 기존 볼륨을 관리하" -"지 못했습니다. " - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4002,9 +3824,6 @@ msgstr "호스트 lun id 찾기 오류입니다. " msgid "Find lun group from mapping view error." msgstr "맵핑 보기에서 lun 그룹 찾기 오류입니다. " -msgid "Find lun number error." -msgstr "Lun 번호 찾기 오류입니다. " - msgid "Find mapping view error." msgstr "맵핑 보기 찾기 오류입니다. " @@ -4358,9 +4177,6 @@ msgstr "DRBDmanage에서 잘못된 경로 정보를 가져왔습니다(%s)! " msgid "HBSD error occurs." msgstr "HBSD 오류가 발생했습니다." -msgid "HNAS has disconnected SSC" -msgstr "HNAS에서 SSC의 연결을 끊음" - msgid "HPELeftHand url not found" msgstr "HPELeftHand url을 찾을 수 없음" @@ -4399,14 +4215,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "%s 호스트에 FC 개시자가 없음" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "%s 호스트에 iSCSI 개시자가 없음" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "'%s' 호스트를 찾을 수 없습니다. " - #, python-format msgid "Host group with name %s not found" msgstr "이름이 %s인 호스트 그룹을 찾을 수 없음" @@ -4421,9 +4229,6 @@ msgstr "호스트가 동결되지 않았습니다." msgid "Host is already Frozen." msgstr "호스트가 이미 동결되었습니다." -msgid "Host not found" -msgstr "호스트를 찾을 수 없음" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" @@ -4454,9 +4259,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Blockbridge API의 IP 주소/호스트 이름입니다. " - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "압축이 True로 설정되면 rsize도 설정해야 합니다(-1이 아님). " @@ -4548,12 +4350,6 @@ msgstr "" "Infortrend CLI 예외: %(err)s 매개변수: %(param)s(리턴 코드: %(rc)s) (출력: " "%(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "초기 계층: {}, 정책: {}가 올바르지 않습니다." - -msgid "Input type {} is not supported." -msgstr "입력 유형 {}는 지원되지 않습니다." - msgid "Input volumes or snapshots are invalid." msgstr "입력 볼륨 또는 스냅샷이 올바르지 않습니다. " @@ -4570,15 +4366,6 @@ msgstr "볼륨을 확장하는 데 충분한 여유 공간이 없습니다." msgid "Insufficient privileges" msgstr "권한이 충분하지 않음" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "ceph 클러스터에 대한 연결 재시도 사이의 간격 값(초)입니다. " - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "" -"io_port_list에 대해 올바르지 않은 %(protocol)s 포트 %(port)s이(가) 지정되었습" -"니다. " - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "3PAR 도메인이 잘못되었습니다: %(err)s" @@ -4622,10 +4409,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "올바르지 않은 복제 대상: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "올바르지 않은 VNX 인증 유형: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4668,14 +4451,6 @@ msgstr "잘못된 인증 키: %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "올바르지 않은 백업: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"올바르지 않은 barbican api url: 버전이 필요합니다(예: 'http[s]://|" -"[:port]/') 지정된 url은 %s입니다." - msgid "Invalid chap user details found in CloudByte storage." msgstr "" "CloudByte 스토리지에서 올바르지 않은 chap 사용자 세부사항이 발견되었습니다. " @@ -4848,10 +4623,6 @@ msgstr "올바르지 않은 스토리지 풀 %s이(가) 지정되었습니다." msgid "Invalid storage pool is configured." msgstr "올바르지 않은 스토리지 풀이 구성되었습니다." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "올바르지 않은 동기화 모드가 지정되었습니다. 허용된 모드는 %s입니다." - msgid "Invalid transport type." msgstr "올바르지 않은 전송 유형입니다." @@ -4859,14 +4630,6 @@ msgstr "올바르지 않은 전송 유형입니다." msgid "Invalid update setting: '%s'" msgstr "올바르지 않은 업데이트 설정: '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"올바르지 않은 url: 'http[s]://|[:port]/' 형식이어야 " -"함, 지정된 url은 %s입니다." - #, python-format msgid "Invalid value '%s' for force." msgstr "강제 실행에 대한 올바르지 않은 값 '%s'입니다. " @@ -5009,9 +4772,6 @@ msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "복제가 적절하게 구성되지 않았으므로 장애 복구 실행에 실패했습니다." -msgid "Item not found" -msgstr "항목을 찾을 수 없음" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "CloudByte의 볼륨 작성[%s] 응답에서 작업 ID를 찾을 수 없습니다. " @@ -5040,9 +4800,6 @@ msgstr "볼륨의 LU가 없음: %s" msgid "LUN export failed!" msgstr "LUN 내보내기 실패! " -msgid "LUN id({}) is not valid." -msgstr "LUN id({})가 올바르지 않습니다." - msgid "LUN map overflow on every channel." msgstr "모든 채널의 LUN 맵 오버플로우입니다. " @@ -5050,9 +4807,6 @@ msgstr "모든 채널의 LUN 맵 오버플로우입니다. " msgid "LUN not found with given ref %s." msgstr "주어진 ref %s을(를) 사용하여 LUN을 찾을 수 없습니다. " -msgid "LUN number ({}) is not an integer." -msgstr "LUN 번호({})가 정수가 아닙니다." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 번호가 채널 id에 대한 경계를 벗어남: %(ch_id)s." @@ -5235,55 +4989,15 @@ msgstr "이 볼륨에 대한 메타데이터 백업이 이미 존재함" msgid "Metadata backup object '%s' already exists" msgstr "메타데이터 백업 오브젝트 '%s'이(가) 이미 존재함" -msgid "Metadata item was not found" -msgstr "메타데이터 항목이 없음" - -msgid "Metadata item was not found." -msgstr "메타데이터 항목을 찾을 수 없습니다. " - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "메타데이터 특성 키 %s이(가) 255자보다 큼 " - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "메타데이터 특성 키 %s 값이 255자보다 큼 " - -msgid "Metadata property key blank" -msgstr "메타데이터 특성 키 공백" - msgid "Metadata property key blank." msgstr "메타데이터 특성 키가 공백입니다. " -msgid "Metadata property key greater than 255 characters." -msgstr "메타데이터 특성 키가 255자보다 깁니다. " - -msgid "Metadata property value greater than 255 characters." -msgstr "메타데이터 특성 값이 255자보다 깁니다. " - msgid "Metadata restore failed due to incompatible version" msgstr "호환 불가능한 버전으로 인해 메타데이터 복원에 실패" msgid "Metadata restore failed due to incompatible version." msgstr "호환 불가능한 버전으로 인해 메타데이터 복원 실패" -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "볼륨 %(src)s 마이그레이션에 실패했습니다. " - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"소스 볼륨 %(src)s과(와) 대상 볼륨 %(dst)s 사이의 볼륨 마이그레이션에 실패했습" -"니다. " - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "LUN %s의 마이그레이션이 중지되었거나 결함이 발생했습니다." - -msgid "MirrorView/S enabler is not installed." -msgstr "MirrorView/S 인에이블러가 설치되지 않았습니다." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5311,9 +5025,6 @@ msgstr "요청 본문에서 필수 요소 '%s'이(가) 누락되었습니다. " msgid "Missing required element 'consistencygroup' in request body." msgstr "요청 본문에 필수 요소 'consistencygroup'이 누락되었습니다. " -msgid "Missing required element 'host' in request body." -msgstr "요청 본문에 필수 요소 '호스트'가 누락되었습니다. " - msgid "Missing required element quota_class_set in request body." msgstr "요청 본문에서 필수 요소 quota_class_set가 누락되었습니다." @@ -5436,9 +5147,6 @@ msgstr "스토리지 풀 이름 또는 ID를 지정해야 합니다. " msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "스토리지 풀을 지정해야 합니다. 옵션: sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "연령에 대해 양수 값을 제공해야 함" - msgid "Must supply a positive, non-zero value for age" msgstr "기간에 0이 아닌 양수 값을 제공해야 함" @@ -5822,9 +5530,6 @@ msgstr "" "CloudByte 스토리지에서 [%(operation)s] 기반 작업 [%(job)s]에 대해 조회하는 중" "에 널 응답이 수신되었습니다. " -msgid "Number of retries if connection to ceph cluster failed." -msgstr "ceph 클러스터에 대한 연결에 실패한 경우 재시도 횟수입니다. " - msgid "Object Count" msgstr "오브젝트 카운트" @@ -5883,16 +5588,10 @@ msgstr "gpfs_images_share_mode 옵션이 올바르게 설정되지 않았습니 msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base 옵션이 올바르게 설정되지 않았습니다." -msgid "Option map (cls._map) is not defined." -msgstr "옵션 맵(cls._map)이 정의되지 않았습니다." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "시작 %(res)s %(prop)s은(는) '%(vals)s' 값 중 하나여야 함" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "Blockbridge API 서버에 연결할 HTTPS 포트를 대체하십시오. " - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -6140,14 +5839,6 @@ msgstr "RPC 서버 응답이 완료되지 않음" msgid "Raid did not have MCS Channel." msgstr "Raid에 MCS 채널이 없습니다. " -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"구성 옵션 max_luns_per_storage_group으로 설정된 제한사항에 도달합니다.스토리" -"지 그룹 %(sg)s에 %(vol)s을(를) 추가하는 조작이 거부됩니다." - #, python-format msgid "Received error string: %s" msgstr "오류 문자열이 수신됨: %s" @@ -6326,9 +6017,6 @@ msgstr "필수 구성을 찾을 수 없음" msgid "Required flag %s is not set" msgstr "필수 플래그 %s이(가) 설정되지 않음" -msgid "Requires an NaServer instance." -msgstr "NaServer 인스턴스가 필요합니다." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6521,10 +6209,6 @@ msgstr "호스트 %(host)s에서 서비스 %(service_id)s을(를) 찾을 수 없 msgid "Service %(service_id)s could not be found." msgstr "%(service_id)s 서비스를 찾을 수 없습니다. " -#, python-format -msgid "Service %s not found." -msgstr "%s 서비스를 찾을 수 없음" - msgid "Service is too old to fulfil this request." msgstr "서비스가 너무 오래되어 이 요청을 이행할 수 없습니다." @@ -6618,10 +6302,6 @@ msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(snapshot_id)s 스냅샷에 %(metadata_key)s 키를 갖는 메타데이터가 없습니다. " -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "스냅샷 %s이(가) 일관성 그룹의 일부가 아니어야 합니다. " - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "배열에 스냅샷 '%s'이(가) 없습니다." @@ -6648,9 +6328,6 @@ msgstr "볼륨의 스냅샷이 다음 상태에서 지원되지 않음: %s" msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "배치되지 않은 스냅샷 res \"%s\"이(가) 있습니까? " -msgid "Snapshot size must be multiple of 1 GB." -msgstr "스냅샷 크기는 1GB의 배수여야 합니다. " - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" @@ -6800,9 +6477,6 @@ msgstr "스토리지 시스템 ID가 설정되지 않았습니다." msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "%(poolNameInStr)s 풀에 대한 스토리지 시스템을 찾을 수 없습니다. " -msgid "Storage-assisted migration failed during manage volume." -msgstr "볼륨 관리 중에 스토리지 지원 마이그레이션에 실패했습니다." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "스토리지 시스템 %(array)s을(를) 찾을 수 없습니다." @@ -6849,10 +6523,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "대상 볼륨 유형이 아직 사용 중입니다." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "테넌트 ID: %s이(가) 존재하지 않습니다. " - msgid "Terminate connection failed" msgstr "연결 종료 실패" @@ -6942,10 +6612,6 @@ msgstr "경로 %(path)s에 있는 디바이스를 사용할 수 없음: %(reason msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "종료 시간(%(end)s)은 시작 시간(%(start)s) 이후여야 합니다." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec: %s이(가) 올바르지 않습니다." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "추가 스펙: %(extraspec)s이(가) 올바르지 않습니다. " @@ -6997,14 +6663,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP 사용자 %(user)s이(가) 존재하지 않습니다." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"가져온 lun %(lun_id)s이(가) 풀 %(lun_pool)s에 있는데 이 풀은 %(host)s 호스트" -"에서 관리되지 않습니다. " - msgid "The key cannot be None." msgstr "키는 None이 되어서는 안 됩니다. " @@ -7083,11 +6741,6 @@ msgstr "보유 수는 %s 이하여야 합니다." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "볼륨이 유지보수 모드에 있으면 스냅샷을 작성할 수 없습니다. " -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "소스 볼륨 %s이(가) 현재 호스트에서 관리하는 풀에 없습니다. " - msgid "The source volume for this WebDAV operation not found." msgstr "이 WebDAV 조작의 소스 볼륨을 찾을 수 없습니다." @@ -7254,10 +6907,6 @@ msgstr "사용 가능한 자원이 없습니다.(자원: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "올바른 ESX 호스트가 없습니다. " -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "%s에 접속된 올바른 데이터 저장소가 없습니다." - msgid "There are no valid datastores." msgstr "올바른 데이터 저장소가 없습니다. " @@ -7346,11 +6995,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "이 버전의 LVM에서는 씬 프로비저닝이 지원되지 않습니다." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"씬 프로비저닝 인에이블러가 설치되어 있지 않습니다. 씬 볼륨을 작성할 수 없습니" -"다." - msgid "This driver does not support deleting in-use snapshots." msgstr "이 드라이버는 스냅샵 삭제 중에 사용 중인 스냅샷을 지원하지 않습니다." @@ -7382,13 +7026,6 @@ msgid "" msgstr "" "스냅샷 %(id)s 삭제를 위해 Nova 업데이트를 대기하는 동안 제한시간이 초과됨." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"ceph 클러스터에 연결할 때 사용되는 제한시간 값(초)입니다. 값이 < 0인 경우, 제" -"한시간이 설정되지 않고 기본 librados 값이 사용됩니다." - #, python-format msgid "Timeout while calling %s " msgstr "%s을(를) 호출하는 동안 제한시간 초과" @@ -7471,9 +7108,6 @@ msgstr "%s의 장애 복구를 완료할 수 없습니다." msgid "Unable to connect or find connection to host" msgstr "호스트에 대한 연결을 설정하거나 찾을 수 없음" -msgid "Unable to create Barbican Client without project_id." -msgstr "project_id 없이 Barbican Client를 작성할 수 없습니다." - #, python-format msgid "Unable to create consistency group %s" msgstr "일관성 그룹 %s을(를) 작성할 수 없음" @@ -7567,9 +7201,6 @@ msgstr "" "Purity REST API 버전 %(api_version)s(으)로 복제를 수행할 수 없습니다. " "%(required_versions)s 중 하나가 필요합니다." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "복제 및 snapcopy를 동시에 사용할 수 없습니다." - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Storwize 클러스터 %s과(와) 파트너십을 설정할 수 없습니다." @@ -7961,9 +7592,6 @@ msgstr "알 수 없는 프로토콜: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "알 수 없는 할당량 자원 %(unknown)s." -msgid "Unknown service" -msgstr "알 수 없는 서비스" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" @@ -8077,9 +7705,6 @@ msgstr "사용자 ID" msgid "User does not have admin privileges" msgstr "사용자에게 관리자 권한이 없음" -msgid "User is not authorized to use key manager." -msgstr "사용자에게 키 관리자를 사용할 권한이 없습니다." - msgid "User not authorized to perform WebDAV operations." msgstr "사용자에게 WebDAV 조작을 수행할 권한이 없습니다." @@ -8298,14 +7923,6 @@ msgstr "" "볼륨 %s이(가) 온라인입니다. OpenStack을 사용하여 관리할 볼륨을 오프라인으로 " "설정합니다." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"볼륨 %s을(를) 마이그레이션하거나, 연결하거나, 일관성 그룹에 속하거나, 스냅샷" -"이 없어야 합니다." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "볼륨 %s이(가) 일관성 그룹의 일부가 아니어야 합니다. " @@ -8334,10 +7951,6 @@ msgstr "볼륨 그룹 %s이(가) 없음" msgid "Volume Type %(id)s already exists." msgstr "%(id)s 볼륨 유형이 이미 존재합니다. " -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "볼륨 유형 %(type_id)s에 키 %(id)s을(를) 가진 추가 스펙이 없습니다. " - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8530,9 +8143,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "볼륨 크기는 1GB의 배수여야 합니다. " -msgid "Volume size must be multiple of 1 GB." -msgstr "볼륨 크기는 1GB의 배수여야 합니다. " - msgid "Volume size must multiple of 1 GB." msgstr "볼륨 크기는 1GB의 배수여야 합니다. " @@ -8607,10 +8217,6 @@ msgstr "볼륨 유형 이름은 빈 상태로 둘 수 없습니다." msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "이름이 %(volume_type_name)s인 볼륨 유형을 찾을 수 없습니다. " -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "볼륨 ID가 %s인 볼륨이 존재하지 않습니다. " - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -8625,17 +8231,10 @@ msgstr "" "볼륨: %(volumeName)s이(가) 스토리지 그룹 %(sgGroupName)s에 추가되지 않았습니" "다." -#, python-format -msgid "Volume: %s could not be found." -msgstr "볼륨: %s을(를) 찾을 수 없습니다. " - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Cinder에서 이미 볼륨: %s을(를) 관리 중입니다." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "볼륨은 이 크기(메가바이트)의 오브젝트로 청크됩니다. " - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "기본 및 보조 SolidFire 계정 모두에서 볼륨/계정이 초과되었습니다." @@ -9255,13 +8854,6 @@ msgstr "" "create_consistencygroup_from_src에서는 cgsnapshot 소스나 일관성 그룹 소스만 " "지원합니다. 여러 소스를 사용할 수 없습니다." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src에서 cgsnapshot 소스나 일관성 그룹 소스를 지" -"원합니다. 여러 소스를 사용할 수 없습니다." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: 소스 vdisk %(src)s(%(src_id)s)이(가) 없습니다. " @@ -9365,17 +8957,11 @@ msgstr "" "create_volume_from_snapshot: 볼륨 작성에 스냅샷 상태가 \"사용 가능\"해야 합니" "다. 올바르지 않은 상태는 %s입니다." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot: 소스 및 대상 크기가 다릅니다. " - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "create_volume_from_snapshot: 볼륨 크기가 스냅샷 기반 볼륨과 다릅니다." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "중복 제거 및 자동 계층화를 둘 다 사용할 수는 없습니다." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -9418,9 +9004,6 @@ msgstr "원격 노드에서 스냅샷 분리" msgid "do_setup: No configured nodes." msgstr "do_setup: 구성된 노드가 없습니다." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries는 0 이상이어야 함" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -9617,22 +9200,12 @@ msgstr "iscsiadm 실행에 실패했습니다." msgid "key manager error: %(reason)s" msgstr "주요 관리자 오류: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key가 정의되지 않음" - msgid "limit param must be an integer" msgstr "limit 매개변수는 정수여야 함" msgid "limit param must be positive" msgstr "limit 매개변수가 양수여야 함" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"manage_existing은 호스트에 연결된 볼륨을 관리할 수 없습니다. 가져오기 전에 " -"이 볼륨과 기존 호스트의 연결을 끊으십시오. " - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing에는 기존 볼륨을 식별하기 위한 'name' 키가 필요합니다. " @@ -9677,10 +9250,6 @@ msgstr "스냅샷 ID가 %s인 다중 자원이 발견됨" msgid "name cannot be None" msgstr "이름은 None일 수 없음" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: NAVISECCLI 도구 %(path)s을(를) 찾을 수 없습니다." - #, python-format msgid "no REPLY but %r" msgstr "REPLY가 아니라 %r" @@ -9744,14 +9313,6 @@ msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted는 'no', 'yes', 'only' 중에서 선택 가능하며, %r은(는) 불가능함" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "백엔드에서 replication_device를 구성해야 함: %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "backend_id [%s]인 replication_device가 누락되었습니다." - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover에 실패했습니다. %s을(를) 찾을 수 없습니다." @@ -9810,9 +9371,6 @@ msgstr "san_ip가 설정되지 않았습니다. " msgid "san_ip must be set" msgstr "san_ip가 설정되어야 함" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: 필수 필드 구성입니다. san_ip가 설정되지 않았습니다." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -9823,16 +9381,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve()는 한 번만 호출할 수 있음" -msgid "service not found" -msgstr "서비스를 찾을 수 없음" - -msgid "snapshot does not exist" -msgstr "스냅샷이 없음" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "스냅샷 id:%s을(를) 찾을 수 없음" - #, python-format msgid "snapshot-%s" msgstr "스냅샷-%s" @@ -9843,10 +9391,6 @@ msgstr "스냅샷이 지정됨" msgid "snapshots changed" msgstr "스냅샷이 변경됨" -#, python-format -msgid "source vol id:%s not found" -msgstr "소스 볼륨 id:%s을(를) 찾을 수 없음" - #, python-format msgid "source volume id:%s is not replicated" msgstr "소스 볼륨 id:%s을(를) 복제할 수 없음" @@ -9944,9 +9488,6 @@ msgstr "볼륨 지정됨" msgid "volume changed" msgstr "볼륨 변경됨" -msgid "volume does not exist" -msgstr "볼륨이 없음" - msgid "volume is already attached" msgstr "볼륨이 이미 접속됨" @@ -9964,9 +9505,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "볼륨 크기 %d이(가) 올바르지 않음" -msgid "volume_type cannot be None" -msgstr "volume_type은 None일 수 없음" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "일관성 그룹에 볼륨을 작성할 때 volume_type이 제공되어야 합니다." @@ -9999,6 +9537,3 @@ msgid "" msgstr "" "zfssa_manage_policy 특성을 'strict' 또는 'loose'로 설정해야 합니다. 현재 값: " "%s." - -msgid "{} is not a valid option." -msgstr "{}는 올바른 옵션이 아닙니다." diff --git a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po index b2cdaf1de..3eaa4cc17 100644 --- a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po +++ b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -264,9 +264,6 @@ msgstr "'status' deve ser especificado." msgid "'volume_id' must be specified" msgstr "'volume_id' deve ser especificado" -msgid "'{}' object has no attribute '{}'" -msgstr "O objeto '{}' não possui nenhum atributo '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -428,21 +425,9 @@ msgstr "" "Uma solicitação da versão da API deve ser comparada com um objeto " "VersionedMethod object." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "Um erro ocorreu em SheepdogDriver. (Motivo: %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Um erro ocorreu durante a operação de backup" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "Ocorreu um erro ao tentar modificar a Captura Instantânea '%s'." - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Ocorreu um erro ao buscar o volume \"%s\"." - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -551,18 +536,12 @@ msgstr "" "Detalhes do usuário de autenticação não localizados no armazenamento do " "CloudByte." -msgid "Authentication error" -msgstr "Erro de Autenticação" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "A autenticação falhou, verifique as credenciais do comutador, código de erro " "%s." -msgid "Authorization error" -msgstr "Erro de autorização" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "A zona de disponibilidade '%(s_az)s' é inválida." @@ -581,10 +560,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "Backend não existe (%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "" -"O backend já foi submetido a failover. Não é possível executar failback." - #, python-format msgid "Backend reports: %(message)s" msgstr "Relatórios de backend: %(message)s" @@ -595,9 +570,6 @@ msgstr "Relatórios de backend: o item já existe" msgid "Backend reports: item not found" msgstr "Relatórios de backend: item não localizado" -msgid "Backend server not NaServer." -msgstr "O servidor Backend não é NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" @@ -697,12 +669,6 @@ msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Formato de projeto inválido: o projeto não está no formato adequado (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Solicitação inválida enviada para cluster Datera: Argumentos inválidos: " -"%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Resposta inválida da API Datera" @@ -719,18 +685,6 @@ msgstr "binário" msgid "Blank components" msgstr "Componentes em branco" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Esquema de autenticação da API Blockbridge (token ou senha)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Senha da API Blockbridge (para esquema de autenticação 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Token da API Blockbridge (para esquema de autenticação 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Usuário da API Blockbridge (para esquema de autorização 'password')" - msgid "Blockbridge api host not configured" msgstr "Host da API Blockbridge não configurado" @@ -851,9 +805,6 @@ msgstr "Não foi possível converter %s para inteiro." msgid "Can't access 'scality_sofs_config': %s" msgstr "Não é possível acessar 'scality_sofs_config': %s" -msgid "Can't attach snapshot." -msgstr "Não é possível anexar a captura instantânea" - msgid "Can't decode backup record." msgstr "Não é possível decodificar registro de backup." @@ -976,10 +927,6 @@ msgstr "" "Não é possível importar a captura instantânea %s no Cinder. O status captura " "instantânea não é normal ou o status de execução não é on-line." -#, python-format -msgid "Can't open config file: %s" -msgstr "Não é possível abrir o arquivo de configuração: %s" - msgid "Can't parse backup record." msgstr "Não é possível analisar o registro de backup." @@ -1055,13 +1002,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "Não é possível conectar-se ao servidor ECOM." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"Não é possível criar clone de tamanho %(vol_size)s a partir do volume de " -"tamanho %(src_vol_size)s" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1114,13 +1054,6 @@ msgstr "" "Não é possível criar ou localizar um grupo de armazenamentos com o nome " "%(sgGroupName)s." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"Não é possível criar o volume de tamanho %(vol_size)s a partir da captura " -"instantânea de tamanho %(snap_size)s" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Não é possível criar volume de tamanho %s: não é múltiplo de 8GB." @@ -1453,10 +1386,6 @@ msgstr "A porta RPC Coho não está configurada" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Comando %(cmd)s bloqueado na CLI e foi cancelado" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: tempo limite de %s" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: tempo limite de %s." @@ -1619,21 +1548,10 @@ msgid "Could not find GPFS file system device: %s." msgstr "" "Não foi possível localizar o dispositivo do sistema de arquivos GPFS: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"Não foi possível localizar um host para o volume %(volume_id)s com o tipo " -"%(type_id)s." - #, python-format msgid "Could not find config at %(path)s" msgstr "Não foi possível localizar a configuração em %(path)s" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "" -"Não foi possível localizar exportação iSCSI para o volume %(volumeName)s." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Não foi possível localizar iSCSI export para o volume %s" @@ -1726,17 +1644,6 @@ msgstr "" "Criação de backup interrompida, esperava-se o status de volume " "%(expected_status)s, mas foi obtido %(actual_status)s." -msgid "Create consistency group failed." -msgstr "Falha ao criar grupo de consistências." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"A criação de volumes criptografados com o tipo %(type)s da imagem %(image)s " -"não é suportada." - msgid "Create export for volume failed." msgstr "Falha ao criar exportação para o volume." @@ -1826,12 +1733,6 @@ msgstr "" "O host mapeado atualmente para o volume %(vol)s está no grupo de hosts não " "suportado com %(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "DESCONTINUADO: Implemente v1 da API Cinder." - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "DESCONTINUADO: Implemente v2 da API Cinder." - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1913,15 +1814,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "LUNs dedup não podem ser estendidos" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"Ativador de Deduplicação não está instalado. Não é possível criar volume " -"deduplicado" - -msgid "Default pool name if unspecified." -msgstr "Nome do conjunto padrão, se não especificado." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1935,12 +1827,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "O tipo de volume padrão não pode ser localizado." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Define o conjunto de conjuntos expostos e suas cadeias de consulta de " -"backend associadas" - msgid "Delete LUNcopy error." msgstr "Erro ao excluir LUNcopy." @@ -2030,9 +1916,6 @@ msgstr "" "replication_device %s de erro de configuração de driver Dell Cinder não " "localizada" -msgid "Deploy v3 of the Cinder API." -msgstr "Implemente v3 da API Cinder." - msgid "Describe-resource is admin only functionality" msgstr "O Descrever-recurso é uma funcionalidade apenas administrativa" @@ -2086,13 +1969,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Falha ao inicializar conexão do driver (erro: %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"O driver não pode ser redefinido porque o volume (LUN {}) possui captura " -"instantânea que é proibida pra migração. " - msgid "Driver must implement initialize_connection" msgstr "O driver deve implementar initialize_connection" @@ -2601,10 +2477,6 @@ msgstr "" "Erro de associação de grupo de armazenamento : %(storageGroupName)s. Para " "Política FAST: %(fastPolicyName)s com descrição do erro: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "Erro ao conectar o volume %s. O limite de destino pode ser atingido!" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2773,10 +2645,6 @@ msgstr "Erro em space-extend para o volume %(space)s com %(size)d GB adicional" msgid "Error managing volume: %s." msgstr "Erro ao gerenciar o volume: %s." -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Erro ao mapear o volume %(vol)s. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2819,17 +2687,9 @@ msgstr "Ocorreu um erro ao excluir cgsnapshot %s." msgid "Error occurred when updating consistency group %s." msgstr "Ocorreu um erro ao atualizar o grupo de consistências %s." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Erro ao analisar o arquivo de configuração: %s" - msgid "Error promoting secondary volume to primary" msgstr "Erro ao promover o volume secundário para primário" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Erro ao remover o volume %(vol)s. %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Erro ao renomear o volume %(vol)s: %(err)s." @@ -3101,12 +2961,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Estender volume não implementado" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"Ativador VP de FAST não é instalado. Não é possível configurar a política de " -"camadas para o volume" - msgid "FAST is not supported on this array." msgstr "O FAST não é suportado nesta matriz." @@ -3172,10 +3026,6 @@ msgstr "" "Falha ao adquirir um bloqueio de recurso. (serial: %(serial)s, inst: " "%(inst)s, ret: %(ret)s, erro padrão: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "Falha ao incluir %(vol)s ao %(sg)s após %(retries)s tentativas." - msgid "Failed to add the logical device." msgstr "Falha ao incluir o dispositivo lógico." @@ -3259,9 +3109,6 @@ msgstr "Falha ao criar CG %(cgName)s da captura instantânea %(cgSnapshot)s." msgid "Failed to create IG, %s" msgstr "Falha ao criar IG, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "Falha ao criar imagem SolidFire-Volume" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Falha ao criar Grupo de Volumes: %(vg_name)s" @@ -3370,9 +3217,6 @@ msgstr "Falha ao criar fluxo de volume de gerenciador de planejador" msgid "Failed to create snapshot %s" msgstr "Falha ao criar a captura instantânea %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "Falha ao criar captura instantânea, nenhum ID de LUN é especificado" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Falha ao criar a captura instantânea para cg: %(cgName)s." @@ -3517,9 +3361,6 @@ msgstr "" "Falha ao assegurar a área de recursos de captura instantânea; não foi " "possível localizar o volume para o ID %s" -msgid "Failed to establish SSC connection." -msgstr "Falha ao estabelecer conexão SSC." - msgid "Failed to establish connection with Coho cluster" msgstr "Falha ao estabelecer a conexão com o cluster Coho" @@ -3573,11 +3414,6 @@ msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "Falha ao localizar o grupo de iniciadores iSCSI contendo %(initiator)s." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "" -"Falha ao localizar o conjunto de armazenamentos para o volume de origem %s." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Falha ao obter detalhes da conta do CloudByte para a conta [%s]." @@ -3805,29 +3641,6 @@ msgstr "" "Falha ao gerenciar volume existente %(name)s, porque o tamanho relatado " "%(size)s não era um número de vírgula flutuante." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"Falha ao gerenciar o volume existente porque o conjunto do tipo de volume " -"escolhido não corresponde ao compartilhamento NFS passado na referência de " -"volume." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"Falha ao gerenciar o volume existente porque o conjunto do tipo de volume " -"escolhido não corresponde ao sistema de arquivos passado na referência de " -"volume." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"Falha ao gerenciar o volume existente porque o conjunto do tipo de volume " -"escolhido não corresponde ao conjunto do host." - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4169,9 +3982,6 @@ msgstr "Erro ao localizar ID do LUN do host." msgid "Find lun group from mapping view error." msgstr "Erro ao localizar grupo de LUNs da visualização de mapeamento." -msgid "Find lun number error." -msgstr "Erro ao localizar número de LUN." - msgid "Find mapping view error." msgstr "Erro ao localizar a visualização de mapeamento." @@ -4546,9 +4356,6 @@ msgstr "Informações de caminho inválido obtido do DRBDmanage! (%s)" msgid "HBSD error occurs." msgstr "Erro HBSD ocorreu." -msgid "HNAS has disconnected SSC" -msgstr "O HNAS possui SSC desconectado" - msgid "HPELeftHand url not found" msgstr "URL HPELeftHand não localizada" @@ -4588,14 +4395,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "Host %s não possui inicializadores do FC" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "Host %s não possui inicializador do iSCSI" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "O host '%s' não pôde ser localizado." - #, python-format msgid "Host group with name %s not found" msgstr "Grupo de hosts com o nome %s não localizado" @@ -4610,9 +4409,6 @@ msgstr "O Host NÃO está Paralisado" msgid "Host is already Frozen." msgstr "O Host já está Paralisado" -msgid "Host not found" -msgstr "Host não localizado" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Host não localizado. Falha ao remover %(service)s no %(host)s." @@ -4641,9 +4437,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Endereço IP/nome do host da API Blockbridge." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4741,12 +4534,6 @@ msgstr "" "Exceção da CLI Infortrend: %(err)s Parâmetro: %(param)s (Código de retorno: " "%(rc)s) (Saída: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Camada inicial: {}, a política: {} não é válida." - -msgid "Input type {} is not supported." -msgstr "O tipo de entrada {} não é suportado." - msgid "Input volumes or snapshots are invalid." msgstr "Os volumes ou capturas instantâneas de entrada são inválidos." @@ -4763,16 +4550,6 @@ msgstr "Espaço livre insuficiente disponível para o volume de extensão." msgid "Insufficient privileges" msgstr "Privilégios insuficientes" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" -"O valor do intervalo (em segundos) entre novas tentativas de conexão com o " -"cluster ceph." - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "" -"Portas %(port)s de %(protocol)s inválidas especificadas para io_port_list." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Inválido Domínio 3PAR: %(err)s" @@ -4818,10 +4595,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Destino de Replicação Inválido: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Tipo de autenticação VNX inválido: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4866,14 +4639,6 @@ msgstr "Chave de autenticação inválida: %(reason)s" msgid "Invalid backup: %(reason)s" msgstr "Backup inválido: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"URL da API barbican inválida: a versão é necessária, por exemplo, " -"'http[s]://|[:port]/'. A URL especificada é: %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Detalhes do usuário chap inválidos localizados no armazenamento do CloudByte." @@ -5048,10 +4813,6 @@ msgstr "Conjunto de armazenamento inválido %s especificado." msgid "Invalid storage pool is configured." msgstr "Um conjunto de armazenamento inválido foi configurado." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "Modo de sincronização inválido especificado, o modo permitido é %s." - msgid "Invalid transport type." msgstr "Tipo de transporte inválido." @@ -5059,14 +4820,6 @@ msgstr "Tipo de transporte inválido." msgid "Invalid update setting: '%s'" msgstr "Configuração de atualização inválida: '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"URL inválida: deve estar no formato 'http[s]://|[:port]/" -"', A URL especificada é: %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Valor inválido '%s' para força." @@ -5215,9 +4968,6 @@ msgstr "" "Falha ao emitir um failover porque a replicação não está configurada " "corretamente." -msgid "Item not found" -msgstr "Item não localizado" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" @@ -5253,9 +5003,6 @@ msgstr "O LU não existe para o volume: %s" msgid "LUN export failed!" msgstr "Falha ao exportar LUN!" -msgid "LUN id({}) is not valid." -msgstr "O ID de LUN ({}) não é válido." - msgid "LUN map overflow on every channel." msgstr "Estouro do mapa de LUN em todos os canais." @@ -5263,9 +5010,6 @@ msgstr "Estouro do mapa de LUN em todos os canais." msgid "LUN not found with given ref %s." msgstr "LUN não localizado com ref %s dada." -msgid "LUN number ({}) is not an integer." -msgstr "O número de LUN ({}) não é um número inteiro." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "O número do LUN está fora do limite no ID de canal: %(ch_id)s." @@ -5460,55 +5204,15 @@ msgstr "Backup de metadados já existe para esse volume" msgid "Metadata backup object '%s' already exists" msgstr "Objeto de backup de metadados '%s' já existe" -msgid "Metadata item was not found" -msgstr "O item de metadados não foi localizado" - -msgid "Metadata item was not found." -msgstr "Item de metadados não foi localizado." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "Chave da propriedade de metadados %s maior que 255 caracteres" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "Valor da chave da propriedade de metadados %s maior que 255 caracteres" - -msgid "Metadata property key blank" -msgstr "Chave da propriedade de metadados em branco" - msgid "Metadata property key blank." msgstr "A chave da propriedade de metadados está em branco." -msgid "Metadata property key greater than 255 characters." -msgstr "A chave da propriedade de metadados tem mais de 255 caracteres." - -msgid "Metadata property value greater than 255 characters." -msgstr "O valor da propriedade de metadados tem mais de 255 caracteres." - msgid "Metadata restore failed due to incompatible version" msgstr "Restauração de metadados falhou devido à versão incompatível" msgid "Metadata restore failed due to incompatible version." msgstr "A restauração de metadados falhou devido à versão incompatível." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "Falha ao migrar o volume %(src)s." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"Falha ao migrar volume entre o volume de origem %(src)s e o volume de " -"destino %(dst)s." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "A migração do LUN %s foi interrompida ou falhou." - -msgid "MirrorView/S enabler is not installed." -msgstr "O ativador MirrorView/S não está instalado." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5537,9 +5241,6 @@ msgstr "Elemento obrigatório '%s' ausente no corpo da solicitação." msgid "Missing required element 'consistencygroup' in request body." msgstr "Elemento requerido ausente 'consistencygroup' no corpo da solicitação." -msgid "Missing required element 'host' in request body." -msgstr "Elemento necessário ausente 'host' no corpo da solicitação." - msgid "Missing required element quota_class_set in request body." msgstr "Faltando elemento obrigatório quota_class_set no corpo da requisição." @@ -5666,9 +5367,6 @@ msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "Deve-se especificar conjuntos de armazenamentos. Opções: sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "Deve fornecer um valor positivo para a idade" - msgid "Must supply a positive, non-zero value for age" msgstr "Deve fornecer um número positivo, diferente de zero para a idade" @@ -6074,9 +5772,6 @@ msgstr "" "Resposta nula recebida ao consultar a tarefa baseada em [%(operation)s] " "[%(job)s] no armazenamento CloudByte." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "Número de novas tentativas se a conexão com o cluster ceph falhou." - msgid "Object Count" msgstr "Contagem de Objetos" @@ -6136,17 +5831,10 @@ msgstr "A opção gpfs_images_share_mode não está configurada corretamente." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "A opção gpfs_mount_point_base não está configurada corretamente." -msgid "Option map (cls._map) is not defined." -msgstr "O mapa de opções (cls._map) não está definido." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "A origem de %(res)s %(prop)s deve ser um dos valores '%(vals)s'" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "" -"Porta HTTPS de substituição para conectar-se ao servidor da API Blockbridge." - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -6403,15 +6091,6 @@ msgstr "A resposta do servidor RPC está incompleta" msgid "Raid did not have MCS Channel." msgstr "O RAID não tinha o Canal MCS." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Atinja a limitação definida pela opção de configuração " -"max_luns_per_storage_group. Operação para incluir %(vol)s no Grupo de " -"Armazenamento %(sg)s será rejeitada." - #, python-format msgid "Received error string: %s" msgstr "Sequência de erros recebida: %s" @@ -6596,9 +6275,6 @@ msgstr "Configuração necessária não localizada" msgid "Required flag %s is not set" msgstr "A sinalização %s necessária não está configurada" -msgid "Requires an NaServer instance." -msgstr "Requer uma instância NaServer." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6793,10 +6469,6 @@ msgstr "O serviço %(service_id)s não pôde ser localizado no host %(host)s." msgid "Service %(service_id)s could not be found." msgstr "O serviço %(service_id)s não pôde ser localizado." -#, python-format -msgid "Service %s not found." -msgstr "Serviço %s não localizado." - msgid "Service is too old to fulfil this request." msgstr "O serviço é muito antigo para preencher essa solicitação." @@ -6892,11 +6564,6 @@ msgstr "" "A captura instantânea %(snapshot_id)s não tem metadados com a chave " "%(metadata_key)s." -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "" -"A captura instantânea %s não deve fazer parte de um grupo de consistências." - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "A captura instantânea '%s' não existe na matriz." @@ -6925,9 +6592,6 @@ msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "" "Captura instantânea res \"%s\" que não é implementada em qualquer lugar." -msgid "Snapshot size must be multiple of 1 GB." -msgstr "O tamanho da captura instantânea deve ser múltiplo de 1 GB." - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" @@ -7091,10 +6755,6 @@ msgstr "ID do sistema de armazenamento não configurado." msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Sistema de armazenamento não encontrado para pool %(poolNameInStr)s." -msgid "Storage-assisted migration failed during manage volume." -msgstr "" -"A migração de armazenamento assistida falhou durante gerenciamento de volume." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s não foi encontrado." @@ -7143,10 +6803,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Tipo de volume de destino ainda está em uso." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "ID do locatário: %s não existe." - msgid "Terminate connection failed" msgstr "Finalização da conexão com falha" @@ -7243,10 +6899,6 @@ msgstr "" "O horário de encerramento (%(end)s) deve ser posterior ao horário de início " "(%(start)s)." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "O extra_spec: %s é inválido." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "O extraspec: %(extraspec)s não é válido." @@ -7298,14 +6950,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "O usuário de CHAP iSCSI %(user)s não existe." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"O lun importado %(lun_id)s está no conjunto %(lun_pool)s que não é " -"gerenciado por host %(host)s." - msgid "The key cannot be None." msgstr "A chave não pode ser Nenhum." @@ -7392,12 +7036,6 @@ msgstr "" "A captura instantânea não pode ser criada quando o volume está no modo de " "manutenção." -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "" -"O volume de origem %s não está no conjunto que é gerenciado pelo host atual." - msgid "The source volume for this WebDAV operation not found." msgstr "O volume de origem para esta operação WebDAV não foi localizado." @@ -7571,10 +7209,6 @@ msgstr "Não há recursos disponíveis para uso. (recurso: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Não há hosts ESX válidos." -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "Não há armazenamentos de dados válidos conectados ao %s." - msgid "There are no valid datastores." msgstr "Não há nenhum datastore válido." @@ -7669,11 +7303,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "Thin provisioning não suportado nesta versão do LVM." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"Ativador Thin Provisioning não está instalado. Não é possível criar um " -"volume thin" - msgid "This driver does not support deleting in-use snapshots." msgstr "" "Este driver não oferece suporte à exclusão de capturas instantâneas em uso." @@ -7712,14 +7341,6 @@ msgstr "" "Tempo limite atingido ao aguardar atualização de Nova para exclusão de " "captura instantânea %(id)s." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Valor de tempo limite (em segundos) usado ao conectar-se ao cluster ceph. Se " -"o valor < 0, nenhum tempo limite foi configurado e o valor librados padrão " -"foi usado." - #, python-format msgid "Timeout while calling %s " msgstr "Tempo limite ao chamar %s." @@ -7807,9 +7428,6 @@ msgstr "Não é possível concluir o failover de %s." msgid "Unable to connect or find connection to host" msgstr "Não é possível conectar-se ou localizar a conexão com o host" -msgid "Unable to create Barbican Client without project_id." -msgstr "Não é possível criar o Barbican Client sem project_id." - #, python-format msgid "Unable to create consistency group %s" msgstr "Não foi possível criar o grupo de consistências %s" @@ -7911,10 +7529,6 @@ msgstr "" "Não é possível executar replicação com a versão da API REST Purity " "%(api_version)s, requer uma das %(required_versions)s." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "" -"Não é possível ativar a replicação e a cópia instantânea ao mesmo tempo." - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Não é possível estabelecer a parceria com o cluster Storwize: %s" @@ -8320,9 +7934,6 @@ msgstr "Protocolo desconhecido: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "Recursos da cota desconhecidos %(unknown)s." -msgid "Unknown service" -msgstr "Serviço desconhecido" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" @@ -8441,9 +8052,6 @@ msgstr "ID de Usuário" msgid "User does not have admin privileges" msgstr "O usuário não tem privilégios de administrador" -msgid "User is not authorized to use key manager." -msgstr "O usuário não está autorizado a usar o gerenciador de chaves." - msgid "User not authorized to perform WebDAV operations." msgstr "O usuário não está autorizado a executar operações do WebDAV." @@ -8668,14 +8276,6 @@ msgstr "" "O volume %s está on-line. Configure o volume para off-line gerenciar o uso " "do OpenStack." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"O Volume %s não deve ser de migração, estar conectado, pertencer a um grupo " -"de consistências ou possuir capturas instantâneas." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "O volume %s não deve ser parte de um grupo de consistências." @@ -8704,11 +8304,6 @@ msgstr "O Grupo de Volume %s não existe" msgid "Volume Type %(id)s already exists." msgstr "O Tipo de Volume %(id)s já existe." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"Tipo de volume %(type_id)s não possui especificação extra com a chave %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8913,9 +8508,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "O tamanho do volume deve ser múltiplo de 1 GB." -msgid "Volume size must be multiple of 1 GB." -msgstr "O tamanho do volume deve ser múltiplo de 1 GB." - msgid "Volume size must multiple of 1 GB." msgstr "O tamanho do volume deve ser múltiplo de 1 GB." @@ -8995,10 +8587,6 @@ msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "O tipo de volume com o nome %(volume_type_name)s não pôde ser localizado." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "O volume com o ID do volume %s não existe." - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -9013,17 +8601,10 @@ msgstr "" "O volume: %(volumeName)s não foi incluído no grupo de armazenamentos " "%(sgGroupName)s. " -#, python-format -msgid "Volume: %s could not be found." -msgstr "Volume: %s não pôde ser localizado." - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "O volume %s já está sendo gerenciado pelo Cinder." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "Volumes serão divididos em objetos desse tamanho (em megabytes)." - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" @@ -9658,13 +9239,6 @@ msgstr "" "create_consistencygroup_from_src suporta somente uma origem cgsnapshot ou " "uma origem de grupo de consistências. Diversas origens não podem ser usadas." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src suporta uma origem cgsnapshot ou uma origem " -"de grupo de consistências. Diversas origens não podem ser usadas." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: O vdisk de origem %(src)s (%(src_id)s) não existe." @@ -9772,9 +9346,6 @@ msgstr "" "create_volume_from_snapshot: O status da captura instantânea deve ser " "\"disponível\" para volume de criação. O status inválido é: %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot: Os tamanhos de origem e destino diferem." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -9782,9 +9353,6 @@ msgstr "" "create_volume_from_snapshot: O tamanho do volume é diferente do volume " "baseado em captura instantânea." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "não é possível ativar ambas as camadas, a deduplicada e a automática." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -9829,9 +9397,6 @@ msgstr "remover captura instantânea do nó remoto" msgid "do_setup: No configured nodes." msgstr "do_setup: Nenhum nó configurado." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries deve ser maior ou igual a 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -10030,22 +9595,12 @@ msgstr "A execução do iscsiadm falhou. " msgid "key manager error: %(reason)s" msgstr "Erro do gerenciador de chaves: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key não definido" - msgid "limit param must be an integer" msgstr "o parâmetro limit deve ser um número inteiro" msgid "limit param must be positive" msgstr "o parâmetro limit deve ser positivo" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"O manage_existing não pode gerenciar um volume conectado aos hosts. " -"Desconecte esse volume dos hosts existentes antes de importar" - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing requer uma chave de 'nome' para identificar um volume " @@ -10092,11 +9647,6 @@ msgstr "vários recursos com ID de captura instantânea %s localizado" msgid "name cannot be None" msgstr "o nome não pode ser Nenhum" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "" -"naviseccli_path: Não foi possível localizar a ferramenta NAVISECCLI %(path)s." - #, python-format msgid "no REPLY but %r" msgstr "Nenhuma REPLY, mas %r" @@ -10159,14 +9709,6 @@ msgstr "bibliotecas Python rados e rbd não localizadas" msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted pode ser apenas um de 'no', 'yes' ou 'only', não %r" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "replication_device deve ser configurado no backend: %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "replication_device com backend_id [%s] está ausente." - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover falhou. %s não localizado." @@ -10225,9 +9767,6 @@ msgstr "san_ip não está configurado." msgid "san_ip must be set" msgstr "san_ip deve ser configurado" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: Configuração de campo obrigatória. san_ip não foi configurado." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10239,16 +9778,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() pode ser chamado apenas uma vez" -msgid "service not found" -msgstr "serviço não encontrado" - -msgid "snapshot does not exist" -msgstr "a captura instantânea não existe" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "ID da captura instantânea:%s não localizado" - #, python-format msgid "snapshot-%s" msgstr "captura instantânea-%s" @@ -10259,10 +9788,6 @@ msgstr "capturas instantâneas designadas" msgid "snapshots changed" msgstr "capturas instantâneas alteradas" -#, python-format -msgid "source vol id:%s not found" -msgstr "ID do vol. de origem:%s não localizado" - #, python-format msgid "source volume id:%s is not replicated" msgstr "ID do volume de origem:%s não é replicado" @@ -10361,9 +9886,6 @@ msgstr "volume designado" msgid "volume changed" msgstr "volume alterado" -msgid "volume does not exist" -msgstr "o volume não existe" - msgid "volume is already attached" msgstr "o volume já está conectado" @@ -10381,9 +9903,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "O tamanho do volume %d é inválido." -msgid "volume_type cannot be None" -msgstr "O volume_type não pode ser Nenhum" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10420,6 +9939,3 @@ msgid "" msgstr "" "A propriedade zfssa_manage_policy precisa ser configurada para 'strict' ou " "'loose'. O valor atual é: %s." - -msgid "{} is not a valid option." -msgstr "{} não é uma opção válida." diff --git a/cinder/locale/ru/LC_MESSAGES/cinder.po b/cinder/locale/ru/LC_MESSAGES/cinder.po index b49ce08c9..0acc87a64 100644 --- a/cinder/locale/ru/LC_MESSAGES/cinder.po +++ b/cinder/locale/ru/LC_MESSAGES/cinder.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -265,9 +265,6 @@ msgstr "Должно быть указано значение status." msgid "'volume_id' must be specified" msgstr "Должен быть указан параметр volume_id" -msgid "'{}' object has no attribute '{}'" -msgstr "Объект '{}' не имеет атрибута '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -426,21 +423,9 @@ msgstr "" msgid "An API version request must be compared to a VersionedMethod object." msgstr "Запрос версии API должен сравниваться с объектом VersionedMethod." -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "Обнаружена ошибка в SheepdogDriver. (Причина: %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Ошибка операции резервного копирования" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "Ошибка при попытке изменения моментальной копии '%s'." - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "Возникла ошибка при поиске тома \"%s\"." - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -545,17 +530,11 @@ msgid "Auth user details not found in CloudByte storage." msgstr "" "В хранилище CloudByte не найдена информация о пользователе для идентификации." -msgid "Authentication error" -msgstr "Ошибка аутентификации" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Ошибка идентификации. Проверьте идентификационные данные. Код ошибки %s." -msgid "Authorization error" -msgstr "Ошибка авторизации" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "Зона доступности %(s_az)s недопустима." @@ -574,11 +553,6 @@ msgstr "" msgid "Backend doesn't exist (%(backend)s)" msgstr "Базовый сервер не существует (%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "" -"Переключение базовой системы после сбоя уже выполнено. Обратное переключение " -"невозможно." - #, python-format msgid "Backend reports: %(message)s" msgstr "Отчеты базовой программы: %(message)s" @@ -589,9 +563,6 @@ msgstr "Отчеты базовой программы: элемент уже с msgid "Backend reports: item not found" msgstr "Отчеты базовой программы: элемент не найден" -msgid "Backend server not NaServer." -msgstr "Базовая программа не NaServer." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Тайм-аут повторов службы базовой программы: %(timeout)s с" @@ -697,12 +668,6 @@ msgstr "" msgid "Bad project format: project is not in proper format (%s)" msgstr "Неправильный формат проекта: проект имеет неправильный формат (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "" -"Недопустимый запрос отправлен в кластер Datera:Недопустимые аргументы: " -"%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Неправильный ответ API Datera" @@ -719,18 +684,6 @@ msgstr "Двоичный" msgid "Blank components" msgstr "Пустые компоненты" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Схема идентификации API Blockbridge (token или password)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Пароль API Blockbridge (для схемы идентификации 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Маркер API Blockbridge (для схемы идентификации 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Пользователь API Blockbridge (для схемы идентификации 'password')" - msgid "Blockbridge api host not configured" msgstr "Не настроен хост API Blockbridge" @@ -847,9 +800,6 @@ msgstr "Невозможно преобразовать %s в целое чис msgid "Can't access 'scality_sofs_config': %s" msgstr "Нет доступа к scality_sofs_config: %s" -msgid "Can't attach snapshot." -msgstr "Не удается присоединить моментальную копию." - msgid "Can't decode backup record." msgstr "Не удалось декодировать запись резервной копии." @@ -960,10 +910,6 @@ msgstr "" "Не удается импортировать моментальную копию %s в Cinder. Состояние " "моментальной копии указывает на ошибку или на то, что она недоступна." -#, python-format -msgid "Can't open config file: %s" -msgstr "Не удалось открыть файл конфигурации %s" - msgid "Can't parse backup record." msgstr "Не удалось проанализировать запись резервной копии." @@ -1038,13 +984,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "Не удалось подключиться к серверу ECOM." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"Не удается создать копию с размером %(vol_size)s для тома с размером " -"%(src_vol_size)s" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1093,13 +1032,6 @@ msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "Не удалось создать или найти группу носителей с именем %(sgGroupName)s." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"Не удается создать том размером %(vol_size)s из моментальной копии размером " -"%(snap_size)s" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Не удалось создать том размера %s: не кратен 8 ГБ." @@ -1415,10 +1347,6 @@ msgstr "Не настроен порт RPC Coho" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Команда %(cmd)s заблокирована в CLI и была отменена" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: тайм-аут %s " - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: тайм-аут %s." @@ -1581,18 +1509,10 @@ msgstr "Не найден ИД кластера GPFS: %s." msgid "Could not find GPFS file system device: %s." msgstr "Не найдено устройство файловой системы GPFS: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "Не найден хост для тома %(volume_id)s с типом %(type_id)s." - #, python-format msgid "Could not find config at %(path)s" msgstr "Невозможно найти конфигурацию по адресу %(path)s" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "Не найден экспорт iSCSI для тома %(volumeName)s." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Не удалось найти экспорт iSCSI для тома %s" @@ -1676,17 +1596,6 @@ msgstr "" "Создание резервной копии прервано: ожидалось состояние тома " "%(expected_status)s, получено %(actual_status)s." -msgid "Create consistency group failed." -msgstr "Не удалось создать группу согласования." - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "" -"Создание зашифрованных томов с типом %(type)s из образа %(image)s не " -"поддерживается." - msgid "Create export for volume failed." msgstr "Не удалось создать экспорт для тома." @@ -1774,12 +1683,6 @@ msgstr "" "Связанный с томом %(vol)s хост находится в неподдерживаемой группе хостов " "%(group)s." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "УСТАРЕЛО: Развернуть версию 1 API Cinder." - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "УСТАРЕЛО: Развернуть версию 2 API Cinder." - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1860,15 +1763,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "LUN с дедупликацией нельзя расширять" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"Программа включения запрета дубликатов не установлена. Создать том без " -"дубликатов невозможно" - -msgid "Default pool name if unspecified." -msgstr "Имя пула по умолчанию, если не указано." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1882,12 +1776,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "Не удается найти тип тома по умолчанию." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Определяет набор экспортированных пулов и связанных с ними строк запроса " -"базовой системы" - msgid "Delete LUNcopy error." msgstr "Ошибка удаления LUNcopy." @@ -1976,9 +1864,6 @@ msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Ошибка конфигурации драйвера Dell Cinder. replication_device %s не найден" -msgid "Deploy v3 of the Cinder API." -msgstr "Развернуть версию 3 API Cinder." - msgid "Describe-resource is admin only functionality" msgstr "Функция Describe-resource доступна только администраторам" @@ -2030,13 +1915,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Драйверу не удалось инициализировать соединение (ошибка: %(err)s)." -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"Драйвер не может изменить тип, так как том (LUN {}) содержит моментальную " -"копию, перенос которой запрещен." - msgid "Driver must implement initialize_connection" msgstr "Драйвер должен реализовать initialize_connection" @@ -2539,10 +2417,6 @@ msgstr "" "Ошибка при связывании группы носителей %(storageGroupName)s со стратегией " "FAST %(fastPolicyName)s, описание ошибки: %(errordesc)s." -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "Ошибка подключения тома %s. Количество целевых объектов предельное!" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2713,10 +2587,6 @@ msgstr "" msgid "Error managing volume: %s." msgstr "Ошибка управления томом: %s." -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "Ошибка при преобразовании тома %(vol)s: %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2759,17 +2629,9 @@ msgstr "Ошибка удаления моментальной копии гру msgid "Error occurred when updating consistency group %s." msgstr "Ошибка изменения группы согласования %s." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Ошибка анализа файла конфигурации: %s" - msgid "Error promoting secondary volume to primary" msgstr "Ошибка при попытке продвинуть вспомогательный том до уровня основного" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "Ошибка при удалении тома %(vol)s: %(error)s." - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Ошибка переименования тома %(vol)s: %(err)s." @@ -3052,12 +2914,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Том расширения не реализован" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"Программа включения VP FAST не установлена. Задать стратегию слоев для тома " -"невозможно" - msgid "FAST is not supported on this array." msgstr "FAST не поддерживается в этом массиве." @@ -3127,10 +2983,6 @@ msgstr "" "Не удалось получить блокировку ресурса. (порядковый номер: %(serial)s, " "экземпляр: %(inst)s, код возврата: %(ret)s, stderr: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "Добавить %(vol)s в %(sg)s после %(retries)s попыток не удалось." - msgid "Failed to add the logical device." msgstr "Не удалось добавить логическое устройство." @@ -3216,9 +3068,6 @@ msgstr "" msgid "Failed to create IG, %s" msgstr "Не удалось создать группу инициаторов, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "Не удалось создать образ-том SolidFire" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Не удалось создать группу тома %(vg_name)s" @@ -3329,9 +3178,6 @@ msgstr "Не удалось создать поток тома админист msgid "Failed to create snapshot %s" msgstr "Не удалось создать моментальную копию %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "Не удалось создать моментальную копию, так как не указан ИД LUN" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "" @@ -3480,9 +3326,6 @@ msgstr "" "Не удалось обеспечить область ресурсов моментальной копии. Не найден том с " "ИД %s" -msgid "Failed to establish SSC connection." -msgstr "Не удалось установить соединение SSC." - msgid "Failed to establish connection with Coho cluster" msgstr "Не удалось установить соединение с кластером Coho" @@ -3533,10 +3376,6 @@ msgstr "Не найден хост %s." msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "Не найдена группа инициаторов iSCSI, содержащая %(initiator)s." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "Найти пул носителей для исходного тома %s не удалось." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Не удалось получить сведения об учетной записи CloudByte [%s]." @@ -3765,27 +3604,6 @@ msgstr "" "Сбой управления существующего тома %(name)s: размер %(size)s не число с " "плавающей точкой." -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"Сбой управления существующим томом, поскольку пул выбранного типа тома не " -"соответствует общему ресурсу NFS, переданному в ссылке тома." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"Сбой управления существующим томом, поскольку пул выбранного типа тома не " -"соответствует файловой системе, переданной в ссылке на том." - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "" -"Сбой управления существующим томом, поскольку пул выбранного типа тома не " -"соответствует пулу хоста." - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -4136,9 +3954,6 @@ msgstr "Ошибка поиска ИД LUN хоста." msgid "Find lun group from mapping view error." msgstr "Ошибка поиска группы LUN из представления связей." -msgid "Find lun number error." -msgstr "Ошибка поиска номера LUN." - msgid "Find mapping view error." msgstr "Ошибка поиска представления связей." @@ -4505,9 +4320,6 @@ msgstr "Получена неправильная информация о пут msgid "HBSD error occurs." msgstr "Ошибка HBSD." -msgid "HNAS has disconnected SSC" -msgstr "HNAS отключил SSC" - msgid "HPELeftHand url not found" msgstr "URL HPELeftHand не найден" @@ -4547,14 +4359,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "У хоста %s нет инициаторов FC" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "У хоста %s нет инициатора iSCSI" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "Не удалось найти хост '%s'." - #, python-format msgid "Host group with name %s not found" msgstr "Не найдена группа хостов с именем %s" @@ -4569,9 +4373,6 @@ msgstr "Хост не заморожен." msgid "Host is already Frozen." msgstr "Хост уже заморожен." -msgid "Host not found" -msgstr "Узел не найден" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Хост не найден. Не удалось переместить %(service)s на %(host)s." @@ -4601,9 +4402,6 @@ msgstr "" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "IP-адрес/имя хоста API Blockbridge." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -4701,12 +4499,6 @@ msgstr "" "Исключительная ситуация CLI Infortrend: %(err)s. Параметр: %(param)s (Код " "возврата: %(rc)s) (Вывод: %(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "Недопустимые параметры: начальный уровень: {}, стратегия: {}." - -msgid "Input type {} is not supported." -msgstr "Тип ввода {} не поддерживается." - msgid "Input volumes or snapshots are invalid." msgstr "Недопустимые входные тома или моментальные копии." @@ -4723,13 +4515,6 @@ msgstr "Недостаточно места для расширения тома msgid "Insufficient privileges" msgstr "Недостаточно прав доступа" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "Интервал в секундах между повторными подключениями к кластеру ceph." - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "Недопустимые порты %(protocol)s %(port)s указаны в io_port_list." - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Недопустимый домен 3PAR: %(err)s" @@ -4776,10 +4561,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Недопустимый целевой объект репликации: %(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Недопустимый тип идентификации VNX: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4822,14 +4603,6 @@ msgstr "Недопустимый ключ идентификации: %(reason)s msgid "Invalid backup: %(reason)s" msgstr "Недопустимая резервная копия: %(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"Недопустимый url barbican api: необходимо указать версию, например, " -"'http[s]://|<полное-имя>[:порт]/<версия>'; указан url: %s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Обнаружена недопустимая информация о пользователе chap в хранилище CloudByte." @@ -5004,10 +4777,6 @@ msgstr "Указан недопустимый пул памяти %s." msgid "Invalid storage pool is configured." msgstr "Настроен недопустимый пул памяти." -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "Указан недопустимый режим синхронизации. Допустимый режим: %s." - msgid "Invalid transport type." msgstr "Недопустимый тип транспорта." @@ -5015,14 +4784,6 @@ msgstr "Недопустимый тип транспорта." msgid "Invalid update setting: '%s'" msgstr "Недопустимый параметр обновления: '%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"Недопустимый url: вместо формата 'http[s]://|<полное-имя>[:порт]/" -"<версия>' указан url: %s" - #, python-format msgid "Invalid value '%s' for force." msgstr "Неверное значение '%s' для принудительного применения." @@ -5169,9 +4930,6 @@ msgid "" msgstr "" "Переключение после сбоя невозможно, так как репликация настроена неверно." -msgid "Item not found" -msgstr "объект не найден" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "Не найден ИД задания в ответе на запрос создания тома CloudByte [%s]." @@ -5201,9 +4959,6 @@ msgstr "LU не существует для тома: %s" msgid "LUN export failed!" msgstr "Сбой экспорта LUN!" -msgid "LUN id({}) is not valid." -msgstr "Недопустимый LUN id({})." - msgid "LUN map overflow on every channel." msgstr "Переполнение карты LUN на каждом канале." @@ -5211,9 +4966,6 @@ msgstr "Переполнение карты LUN на каждом канале." msgid "LUN not found with given ref %s." msgstr "LUN не найден по данной ссылке %s." -msgid "LUN number ({}) is not an integer." -msgstr "Номер LUN ({}) не является целым числом." - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" @@ -5403,55 +5155,15 @@ msgstr "Резервная копия метаданных уже существ msgid "Metadata backup object '%s' already exists" msgstr "Объект резервной копии метаданных %s уже существует" -msgid "Metadata item was not found" -msgstr "Элемент метаданных не найден" - -msgid "Metadata item was not found." -msgstr "Элемент метаданных не найден." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "Ключ свойства метаданных %s длиннее 255 символов" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "Значение %s ключа свойства метаданных длиннее 255 символов" - -msgid "Metadata property key blank" -msgstr "Ключ свойства метаданных пуст" - msgid "Metadata property key blank." msgstr "Пустой ключ свойства метаданных." -msgid "Metadata property key greater than 255 characters." -msgstr "Длина ключа свойства метаданных превышает 255 символов." - -msgid "Metadata property value greater than 255 characters." -msgstr "Значение свойства метаданных превышает 255 символов." - msgid "Metadata restore failed due to incompatible version" msgstr "Не удалось восстановить метаданные: несовместимая версия" msgid "Metadata restore failed due to incompatible version." msgstr "Не удалось восстановить метаданные: несовместимая версия." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "Не удалось перенести том %(src)s." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"Не удалось перенести том между исходным томом %(src)s и целевым томом " -"%(dst)s." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "Миграция LUN %s остановлена или возникла неполадка." - -msgid "MirrorView/S enabler is not installed." -msgstr "Программа включения MirrorView/S не установлена." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5479,9 +5191,6 @@ msgstr "В теле запроса отсутствует обязательны msgid "Missing required element 'consistencygroup' in request body." msgstr "В теле запроса отсутствует обязательный элемент 'consistencygroup'." -msgid "Missing required element 'host' in request body." -msgstr "В теле запроса отсутствует обязательный элемент 'host'." - msgid "Missing required element quota_class_set in request body." msgstr "Отсутствует требуемый параметр quota_class_set в теле запроса." @@ -5605,9 +5314,6 @@ msgstr "Необходимо указать имя или ИД пула памя msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "Необходимо указать пулы памяти. Опция: sio_storage_pools." -msgid "Must supply a positive value for age" -msgstr "Должно быть указано положительное значение возраста" - msgid "Must supply a positive, non-zero value for age" msgstr "" "В качестве возраста необходимо указать положительное число, не равное 0" @@ -5994,9 +5700,6 @@ msgstr "" "Получен пустой ответ на запрос выполнения операции [%(operation)s] с помощью " "задания [%(job)s] в хранилище CloudByte." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "Число повторов в случае сбоя подключения к кластеру ceph." - msgid "Object Count" msgstr "Количество объектов" @@ -6057,16 +5760,10 @@ msgstr "Опция gpfs_images_share_mode указана неправильно. msgid "Option gpfs_mount_point_base is not set correctly." msgstr "Опция gpfs_mount_point_base указана неправильно." -msgid "Option map (cls._map) is not defined." -msgstr "Карта опций (cls._map) не определена." - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "Исходное значение %(res)s %(prop)s должно быть одно из '%(vals)s'" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "Переопределить порт HTTPS для подключения к серверу API Blockbridge." - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -6318,15 +6015,6 @@ msgstr "Неполный ответ от сервера RPC" msgid "Raid did not have MCS Channel." msgstr "У RAID нет канала MCS." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"Достигнуто ограничение, заданное опцией конфигурации " -"max_luns_per_storage_group. Операция добавления %(vol)s в группу носителей " -"%(sg)s отклонена." - #, python-format msgid "Received error string: %s" msgstr "Получена ошибочная строка: %s" @@ -6510,9 +6198,6 @@ msgstr "Не найдена требуемая конфигурация" msgid "Required flag %s is not set" msgstr "Не указан требуемый флаг %s" -msgid "Requires an NaServer instance." -msgstr "Требуется экземпляр NaServer." - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6711,10 +6396,6 @@ msgstr "Служба %(service_id)s не найдена на хосте %(host)s msgid "Service %(service_id)s could not be found." msgstr "Служба %(service_id)s не найдена." -#, python-format -msgid "Service %s not found." -msgstr "Служба %s не найдена." - msgid "Service is too old to fulfil this request." msgstr "Служба устарела и не поддерживает этот запрос." @@ -6806,10 +6487,6 @@ msgstr "Снимок %(snapshot_id)s не может быть найден." msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "У снимка %(snapshot_id)s нет метаданных с ключом %(metadata_key)s." -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "Моментальная копия %s не должна быть частью группы согласования." - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "Моментальная копия '%s' не существует в массиве." @@ -6838,9 +6515,6 @@ msgstr "" "Создать моментальную копию для ресурса \"%s\", который нигде не был " "развернут?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "Размер моментальной копии должен быть кратен 1 ГБ." - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" @@ -6996,9 +6670,6 @@ msgstr "Не указан ИД системы хранения." msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Для пула %(poolNameInStr)s не найдена система памяти." -msgid "Storage-assisted migration failed during manage volume." -msgstr "Перенос с помощью носителя не выполнен при операции управления томом." - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "Система хранения %(array)s не найдена." @@ -7047,10 +6718,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Тип целевого тома еще используется." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "ИД арендатора %s не существует." - msgid "Terminate connection failed" msgstr "Завершение соединения не выполнено" @@ -7145,10 +6812,6 @@ msgstr "" "Конечное время (%(end)s) должно указывать на время после начального времени " "(%(start)s)." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "Значение extra_spec %s недопустимо." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Недопустимая дополнительная спецификация %(extraspec)s." @@ -7200,14 +6863,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "Пользователь %(user)s CHAP iSCSI не существует." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"Импортированный LUN %(lun_id)s находится в пуле %(lun_pool)s, который не " -"управляется хостом %(host)s." - msgid "The key cannot be None." msgstr "Ключ не может быть None." @@ -7281,11 +6936,6 @@ msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "Моментальную копию нельзя создать, когда том находится в режиме обслуживания." -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "Исходный том %s отсутствует в пуле, управляемом текущим хостом." - msgid "The source volume for this WebDAV operation not found." msgstr "Не найден исходный том для этой операции WebDAV." @@ -7452,10 +7102,6 @@ msgstr "Нет доступных ресурсов. (ресурс: %(resource)s) msgid "There are no valid ESX hosts." msgstr "Нет допустимых хостов ESX." -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "Нет допустимых хранилищ данных, подключенных к %s." - msgid "There are no valid datastores." msgstr "Нет допустимых хранилищ данных." @@ -7549,11 +7195,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "Оперативное выделение ресурсов не поддерживается в этой версии LVM." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"Программа включения оперативного выделения ресурсов не установлена. Создать " -"том с оперативным выделением ресурсов невозможно" - msgid "This driver does not support deleting in-use snapshots." msgstr "Этот драйвер не поддерживает удаление используемых моментальных копий." @@ -7588,14 +7229,6 @@ msgstr "" "Истек тайм-аут ожидания обновления Nova для удаления моментальной копии " "%(id)s." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Значение тайм-аута (в секундах), используемое при подключении к кластеру " -"ceph. Если оно отрицательно, то тайм-аут не задан и используется значение " -"librados по умолчанию." - #, python-format msgid "Timeout while calling %s " msgstr "Тайм-аут при вызове %s " @@ -7681,9 +7314,6 @@ msgstr "Не удается выполнить переключение посл msgid "Unable to connect or find connection to host" msgstr "Не удалось подключиться к хосту или найти соединение с ним" -msgid "Unable to create Barbican Client without project_id." -msgstr "Невозможно создать Barbican Client без project_id." - #, python-format msgid "Unable to create consistency group %s" msgstr "Не удалось создать группу согласования %s" @@ -7781,10 +7411,6 @@ msgstr "" "Невозможно выполнить репликацию с версией API %(api_version)s Purity REST, " "требуется одна из версий %(required_versions)s." -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "" -"Нельзя использовать репликацию и моментальное копирование одновременно." - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Не удаётся установить партнёрство с кластером Storwize %s." @@ -8189,9 +7815,6 @@ msgstr "Неизвестный протокол: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "Неизвестные ресурсы квоты: %(unknown)s." -msgid "Unknown service" -msgstr "Неизвестная служба" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" @@ -8308,9 +7931,6 @@ msgstr "ID пользователя" msgid "User does not have admin privileges" msgstr "Пользователь не имеет административных привилегий" -msgid "User is not authorized to use key manager." -msgstr "У пользователя нет прав на использование Администратора ключей." - msgid "User not authorized to perform WebDAV operations." msgstr "У пользователя нет прав доступа для выполнения операций WebDAV." @@ -8527,14 +8147,6 @@ msgid "" msgstr "" "Том %s включен. Выключите его, чтобы передать под управление OpenStack." -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "" -"Том %s не должен быть в процессе переноса или присоединен, не должен " -"принадлежать группе согласования или иметь моментальные копии." - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Том %s не должен быть частью группы согласования." @@ -8563,11 +8175,6 @@ msgstr "Группа томов %s не существует" msgid "Volume Type %(id)s already exists." msgstr "Тип тома %(id)s уже существует." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"Тип тома %(type_id)s не имеет дополнительных спецификаций с ключом %(id)s." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8766,9 +8373,6 @@ msgstr "" msgid "Volume size must be a multiple of 1 GB." msgstr "Размер тома должен быть кратным 1 ГБ." -msgid "Volume size must be multiple of 1 GB." -msgstr "Размер тома должен быть кратным 1 ГБ." - msgid "Volume size must multiple of 1 GB." msgstr "Размер тома должен быть кратен 1 ГБ." @@ -8846,10 +8450,6 @@ msgstr "Имя типа тома не должно быть пустым." msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "Том с ИД тома %s не существует." - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -8863,17 +8463,10 @@ msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "Том %(volumeName)s не был добавлен в группу носителей %(sgGroupName)s. " -#, python-format -msgid "Volume: %s could not be found." -msgstr "Не найден том %s." - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Том %s уже находится под управлением Cinder." -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "Тома будут разбиты на блоки этого размера (в мегабайтах)." - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" @@ -9502,13 +9095,6 @@ msgstr "" "create_consistencygroup_from_src поддерживает только источник cgsnapshot или " "источник группы согласования. Несколько источников использовать нельзя." -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src поддерживает только источник cgsnapshot или " -"источник группы согласования. Несколько источников использовать нельзя." - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: Исходный vdisk %(src)s (%(src_id)s) не существует." @@ -9614,9 +9200,6 @@ msgstr "" "create_volume_from_snapshot: Для состояние тома требуется состояние " "моментальной копии \"available\". Недопустимое состояние: %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot: различаются исходный и целевой размеры." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -9624,11 +9207,6 @@ msgstr "" "create_volume_from_snapshot: размер тома отличается от размера тома на " "основе моментальной копии." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "" -"Включить запрет дубликатов и автоматическое разбиение на слои одновременно " -"нельзя." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -9672,9 +9250,6 @@ msgstr "отключить моментальную копию от удален msgid "do_setup: No configured nodes." msgstr "do_setup: Нет настроенных узлов." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "Значение eqlx_cli_max_retries должно быть больше или равно 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -9875,22 +9450,12 @@ msgstr "Ошибка выполнения iscsiadm. " msgid "key manager error: %(reason)s" msgstr "ошибка администратора ключей: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key не определен" - msgid "limit param must be an integer" msgstr "Параметр limit должен быть целым числом" msgid "limit param must be positive" msgstr "Параметр limit должен быть положительным" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"manage_existing не поддерживает управление томом, подключенным к хостам. " -"Отключите том от существующих хостов перед импортом" - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "Для manage_existing требуется ключ 'name' для идентификации существующего " @@ -9939,10 +9504,6 @@ msgstr "Обнаружено несколько ресурсов с ИД мом msgid "name cannot be None" msgstr "Недопустимое значение для имени: None" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: инструмент NAVISECCLI %(path)s не найден." - #, python-format msgid "no REPLY but %r" msgstr "нет REPLY за исключением %r" @@ -10007,14 +9568,6 @@ msgstr "" "read_deleted может принимать значения 'no', 'yes' или 'only', значение %r " "недопустимо" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "В базовой системе должно быть настроено устройство репликации: %s." - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "Отсутствует устройство репликации с backend_id [%s]." - #, python-format msgid "replication_failover failed. %s not found." msgstr "Ошибка replication_failover. %s не найден." @@ -10076,9 +9629,6 @@ msgstr "Не задано значение san_ip." msgid "san_ip must be set" msgstr "san_ip должен быть назначен" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: обязательное поле. san_ip не задан." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -10090,16 +9640,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() может быть вызван только один раз" -msgid "service not found" -msgstr "служба не найдена" - -msgid "snapshot does not exist" -msgstr "моментальная копия не существует" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "Не найден ИД моментальной копии %s" - #, python-format msgid "snapshot-%s" msgstr "snapshot-%s" @@ -10110,10 +9650,6 @@ msgstr "моментальные копии присвоены" msgid "snapshots changed" msgstr "моментальные копии изменены" -#, python-format -msgid "source vol id:%s not found" -msgstr "Не найден ИД исходного тома %s" - #, python-format msgid "source volume id:%s is not replicated" msgstr "исходный том с ИД %s не скопирован" @@ -10213,9 +9749,6 @@ msgstr "том присвоен" msgid "volume changed" msgstr "том изменен" -msgid "volume does not exist" -msgstr "Том не существует" - msgid "volume is already attached" msgstr "том уже подключен" @@ -10233,9 +9766,6 @@ msgstr "" msgid "volume size %d is invalid." msgstr "Недопустимый размер тома %d." -msgid "volume_type cannot be None" -msgstr "volume_type не может быть задан как None" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" @@ -10270,6 +9800,3 @@ msgid "" msgstr "" "Свойству zfssa_manage_policy должно быть присвоено значение 'strict' или " "'loose'. Текущее значение: %s." - -msgid "{} is not a valid option." -msgstr "{} не является допустимым параметром." diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po index 0fcaf332d..1240b68cb 100644 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po +++ b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -87,10 +87,6 @@ msgstr "" "unmanage işlemi denenirken SolidFire Kümesinde Mantıksal Sürücü Kimliği %s " "için hesap bulunamadı!" -#, python-format -msgid "Array mismatch %(myid)s vs %(arid)s" -msgstr "Dizi uyuşmazlığı %(myid)s ve %(arid)s" - #, python-format msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." msgstr "İstek yetkilendiriliyor: %(zfssaurl)s tekrar deneme: %(retry)d ." @@ -134,23 +130,10 @@ msgstr "Anlık sistem görüntüsü oluşturmak için Nova çağrısı başarıs msgid "Call to json.loads() raised an exception: %s." msgstr "json.loads() çağrısı bir istisna oluşturdu: %s." -#, python-format -msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." -msgstr "%(cg_name)s tutarsızlık grubuna %(lun)s lun eklenemiyor." - #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "%(target_iqn)s ile %(target_ip)s keşfedilemiyor." -#, python-format -msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "" -"%(cg_name)s tutarsızlık grubunda yeni %(luns)s LUN'lar yerleştirilemiyor." - -#, python-format -msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." -msgstr "%(cg_name)s tutarsızlık grubunda %(luns)s LUN'lar kaldırılamıyor." - #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "%(key)s eşleştirmek için mantıksal sürücü bulunamıyor, %(msg)s" @@ -258,10 +241,6 @@ msgstr "Flexvisor hatasına bağlan: %s." msgid "Connect to Flexvisor failed: %s." msgstr "Flexvisor'a bağlanılamadı: %s." -#, python-format -msgid "Consistency group %s: create failed" -msgstr "Tutarlılık grubu %s: oluşturma başarısız oldu" - #, python-format msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" msgstr "" @@ -287,14 +266,6 @@ msgstr "" msgid "Could not stat scheduler options file %(filename)s." msgstr "%(filename)s zamanlayıcı seçenek dosyalarının bilgileri gösterilemedi." -#, python-format -msgid "Create cg snapshot %s failed." -msgstr "%s cg anlık sistem görüntüsü oluşturma başarısız oldu." - -#, python-format -msgid "Create consistency group %s failed." -msgstr "%s tutarlılık grubu oluşturma başarısız oldu." - #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." @@ -336,14 +307,6 @@ msgstr "DB hatası:" msgid "DBError encountered: " msgstr "DBError ile karşılaşıldı: " -#, python-format -msgid "Delete cgsnapshot %s failed." -msgstr "%s cgsnapshot silme başarısız oldu." - -#, python-format -msgid "Delete consistency group %s failed." -msgstr "%s tutarlılık grubu silme başarısız oldu." - msgid "Delete consistency group failed to update usages." msgstr "Kullanımları güncellemek için tutarlılık grubu silme başarısız oldu." @@ -422,9 +385,6 @@ msgstr "" msgid "Error activating LV" msgstr "LV etkinleştirilirken hata" -msgid "Error adding HBA to server" -msgstr "Sunucuya HBA ekleme hatası" - msgid "Error closing channel." msgstr "Kanal kapatılırken hata." @@ -435,12 +395,6 @@ msgstr "" "'%(method)s' için '%(netloc)s' glance sunucusuna bağlantı kurulurken hata, " "%(extra)s. " -msgid "Error copying key." -msgstr "Anahtar kopyalama hatası." - -msgid "Error creating Barbican client." -msgstr "Barbican istemcisi oluşturulurken hata." - #, python-format msgid "Error creating QOS rule %s" msgstr "QOS kuralı %s oluşturulurken hata" @@ -454,9 +408,6 @@ msgstr "Mantıksal Sürücü Grubu oluşturulurken hata" msgid "Error creating chap record." msgstr "Chap kaydı oluşturulurken hata." -msgid "Error creating key." -msgstr "Anahtar oluşturma hatası." - msgid "Error creating snapshot" msgstr "Anlık sistem görüntüsü oluşturma hatası" @@ -464,9 +415,6 @@ msgstr "Anlık sistem görüntüsü oluşturma hatası" msgid "Error creating volume. Msg - %s." msgstr "Mantıksal sürücü oluşturulurken hata. İleti - %s." -msgid "Error deleting key." -msgstr "Anahtar silinirken hata." - #, python-format msgid "Error detaching volume %(volume)s, due to remove export failure." msgstr "" @@ -528,28 +476,15 @@ msgstr "Dizi, havuz, SLO ve iş yükü alma hatası." msgid "Error getting chap record." msgstr "Chap kaydı alınırken hata." -msgid "Error getting key." -msgstr "Anahtar alınırken hata." - msgid "Error getting name server info." msgstr "Ad sunucu bilgisi alınırken hata." -msgid "Error getting secret data." -msgstr "Gizli veri alırken hata." - -msgid "Error getting secret metadata." -msgstr "Gizli metadata alınırken hata." - msgid "Error getting show fcns database info." msgstr "fcns veritabanı bilgisini göster sonucu alınırken hata." msgid "Error getting target pool name and array." msgstr "Hedef havuz adı ve dizisi alınırken hata." -#, python-format -msgid "Error happened during storage pool querying, %s." -msgstr "Depolama havuzu sorgulama sırasında hata oluştu, %s." - #, python-format msgid "Error in copying volume: %s" msgstr "Mantıksal sürücü kopyalamada hata: %s" @@ -674,14 +609,6 @@ msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "" "Mantıksal sürücü oluşturulurken hata oluştu: imajdan %(id)s: %(image_id)s." -#, python-format -msgid "Error on adding lun to consistency group. %s" -msgstr "Tutarsızlık grubuna lun eklenirken hata. %s" - -#, python-format -msgid "Error on enable compression on lun %s." -msgstr "Lun %s sıkıştırma etkinleştirmede hata." - #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" @@ -743,9 +670,6 @@ msgstr "" msgid "Error setting Flash Cache policy to %s - exception" msgstr "%s için Flash Cache ilkesi ayarlanırken hata - istisna" -msgid "Error storing key." -msgstr "Anahtar depolama hatası." - #, python-format msgid "Error unmapping volume: %s" msgstr "Mantıksal sürücü eşleştirmesi kaldırılırken hata: %s" @@ -1134,10 +1058,6 @@ msgstr "" msgid "Failed to find %(s)s. Result %(r)s" msgstr "%(s)s bulunamadı. Sonuç %(r)s" -#, python-format -msgid "Failed to find available iSCSI targets for %s." -msgstr "%s için kullanılabilir iSCSI hedefleri bulunamadı." - #, python-format msgid "Failed to get device number for throttling: %(error)s" msgstr "Ayarlama için aygıt numarası alınamadı: %(error)s" @@ -1228,9 +1148,6 @@ msgstr "osapi_volume yükleme başarısız" msgid "Failed to open iet session list for %s" msgstr "%s için iet oturum listesi açılamadı" -msgid "Failed to query migration status of LUN." -msgstr "LUN'un göç durumu sorgulanamadı." - msgid "Failed to re-export volume, setting to ERROR." msgstr "Mantıksal sürücü yeniden dışa aktarılamadı, HATA durumuna ayarlıyor." @@ -1365,14 +1282,6 @@ msgstr "" "Verilen anlık sistem görüntüsü %(snapshot_id)s metadata'sı kullanılarak " "%(volume_id)s metadata'sı güncellenemedi." -#, python-format -msgid "" -"Failed to update initiator data for initiator %(initiator)s and backend " -"%(backend)s" -msgstr "" -"%(backend)s art alanda çalışan uygulama ve %(initiator)s başlatıcısı için " -"başlatıcı veri güncellenemedi" - #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "%s mantıksal sürücü aktarım kimliğine verilen kota güncellenemedi" @@ -1507,10 +1416,6 @@ msgstr "Get metodu hatası." msgid "Get replication status for volume failed." msgstr "Mantıksal sürücü için kopyalama durumu alma başarısız oldu." -#, python-format -msgid "HDP not found: %s" -msgstr "HDP bulunamadı: %s" - #, python-format msgid "ISCSI discovery attempt failed for:%s" msgstr "ISCSI keşif girişimi başarısız oldu:%s" @@ -1527,10 +1432,6 @@ msgstr "Geçersiz JSON: %s" msgid "Invalid ReplayList return: %s" msgstr "Geçersiz ReplayList dönüşü: %s" -#, python-format -msgid "Invalid value for %(key)s, value is %(value)s." -msgstr "%(key)s için geçersiz değer, değer %(value)s." - #, python-format msgid "JSON encode params %(param)s error: %(status)s." msgstr "JSON %(param)s parametre şifreleme hatası: %(status)s." @@ -1569,12 +1470,6 @@ msgstr "" msgid "Lun delete for %s failed!" msgstr "%s için Lun silme başarısız oldu!" -#, python-format -msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "" -"Lun %(vol)s mantıksal sürücüsü %(snap)s anlık görüntüsü için anlık görüntüyü " -"silemedi!" - msgid "Lun mapping returned null!" msgstr "Lun eşleştirmesi boş değer döndü!" @@ -1601,10 +1496,6 @@ msgstr "" msgid "Message: %s" msgstr "İleti: %s" -#, python-format -msgid "Migration of LUN %s failed to complete." -msgstr "%s LUN göçü tamamlanamadı." - msgid "Model update failed." msgstr "Model güncellemesi başarısız oldu." @@ -1620,12 +1511,6 @@ msgstr "%(share)s için bağlama hatası." msgid "Multiple replay profiles under name %s" msgstr "%s adı altında birden fazla tekrar profilleri" -#, python-format -msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" -msgstr "" -"NFS paylaşımı %(share)s hiçbir servis girdisine sahip değil: %(svc)s -> " -"%(hdp)s" - msgid "No CLI output for firmware version check" msgstr "Donanım yazılımı sürüm kontrolü için hiçbir CLI çıktısı yok" @@ -1644,10 +1529,6 @@ msgstr "" "Hiçbir eylem gerekmez. Mantıksal sürücü: %(volumeName)s zaten slo/workload " "birleşiminin parçasıdır: %(targetCombination)s." -#, python-format -msgid "No configuration found for service: %s" -msgstr "Servis için hiçbir yapılandırma bulunamadı: %s" - #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " @@ -1842,9 +1723,6 @@ msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "Bağlayıcı gerekli bilgileri içermiyor: wwpns eksik" -msgid "The given extra_spec or valid_values is None." -msgstr "Verilen extra_spec ya da valid_values hiçbiridir." - #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " @@ -2286,9 +2164,6 @@ msgstr "" "sil: %(vol_id)s stdout ile başarısız oldu: %(out)s\n" " stderr: %(err)s" -msgid "delete_vol: provider location empty." -msgstr "delete_vol: sağlayıcı konumu boş." - #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "ensure_export: Mantıksal sürücü %s depolama üzerinde bulunamadı." @@ -2303,10 +2178,6 @@ msgstr "mantıksal sürücü durum bilgisi tazeleme hatası" msgid "horcm command timeout." msgstr "horcm komutu zaman aşımı." -#, python-format -msgid "iSCSI portal not found for service: %s" -msgstr "Servis için iSCSI kapısı bulunamadı: %s" - #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po index 6cd92ef66..1ab272c45 100644 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po +++ b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 8.0.1.dev161\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev522\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-23 16:54+0000\n" +"POT-Creation-Date: 2016-08-31 10:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -65,10 +65,6 @@ msgstr "" "Yanıt Verisi:%(res)s\n" "\n" -#, python-format -msgid "%(element)s: %(val)s" -msgstr "%(element)s: %(val)s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" @@ -105,10 +101,6 @@ msgstr "" "'%(name)s' mantıksal sürücüsü için 3PAR vlun silindi, ama '%(host)s' " "istemcisi silinmedi çünkü: %(reason)s" -#, python-format -msgid "AUTH properties: %(authProps)s" -msgstr "AUTH özellikleri: %(authProps)s" - #, python-format msgid "AUTH properties: %s." msgstr "AUTH özellikleri: %s." @@ -157,18 +149,10 @@ msgstr "Mantıksal sürücü ekleme başarıyla tamamlandı." msgid "Availability Zones retrieved successfully." msgstr "Kullanılabilir Bölgeler başarıyla alındı." -#, python-format -msgid "Available services: %s" -msgstr "Kullanılabilir servisler: %s" - #, python-format msgid "Backend name is %s." msgstr "Arka uç ismi %s." -#, python-format -msgid "Backend type: %s" -msgstr "Arka uç türü: %s" - #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "Destekleyen VM: %(backing)s %(new_name)s olarak yeniden adlandırıldı." @@ -200,10 +184,6 @@ msgstr "" msgid "Backup service: %s." msgstr "Yedek servisi: %s." -#, python-format -msgid "Bandwidth limit is: %s." -msgstr "Bant genişliği sınırı: %s." - #, python-format msgid "Begin backup of volume %s." msgstr "Mantıksal sürücü %s yedeğine başla." @@ -215,10 +195,6 @@ msgstr "Mantıksal sürücünün ayrılmasına başlanması başarıyla tamamlan msgid "CONCERTO version: %s" msgstr "CONCERTO sürümü: %s" -#, python-format -msgid "Cancelling Migration from LUN %s." -msgstr "LUN %s'den Göç iptal ediliyor" - #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " @@ -316,13 +292,6 @@ msgstr "İmaj %s anlık görüntüden klonlanıyor." msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "Mantıksal sürücü %(src)s %(dst)s mantıksal sürücüsüne klonlanıyor" -#, python-format -msgid "" -"Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" -msgstr "" -"volume_name %(vname)s clone_name %(cname)s export_path %(epath)s ile " -"klonlanıyor" - #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "CloudByte API'si [%s] komutu için başarıyla çalıştırıldı." @@ -334,10 +303,6 @@ msgstr "Mantıksal sürücü göçü-tamamlama başarıyla tamamlandı." msgid "Completed: convert_to_base_volume: id=%s." msgstr "Tamamlandı: convert_to_base_volume: id=%s." -#, python-format -msgid "Configured pools: %s" -msgstr "Yapılandırılan havuzlar: %s" - #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " @@ -354,18 +319,6 @@ msgstr "İstemciye bağlanılıyor: %s." msgid "Connector returning fcnsinfo-%s" msgstr "Bağlayıcı fcnsinfo-%s döndürüyor" -#, python-format -msgid "Consistency group %s was deleted successfully." -msgstr "Tutarlılık grubu %s başarıyla silindi." - -#, python-format -msgid "Consistency group %s: created successfully" -msgstr "Tutarlılık grubu %s: başarıyla oluşturuldu" - -#, python-format -msgid "Consistency group %s: creating" -msgstr "Tutarlılık grubu %s: oluşturuluyor" - #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "%(sz).2f MB imaj %(mbps).2f MB/s hızda dönüştürüldü" @@ -463,9 +416,6 @@ msgstr "" "Yedek oluşturma başlatıldı, yedek: %(backup_id)s mantıksal sürücü: " "%(volume_id)s." -msgid "Create consistency group completed successfully." -msgstr "Tutarlılık grubu oluşturma başarıyla tamamlandı." - #, python-format msgid "Create export done from Volume %(volume_id)s." msgstr "%(volume_id)s Mantıksal sürücüsünden dışa aktarma oluşturma yapıldı." @@ -484,10 +434,6 @@ msgstr "" msgid "Create snapshot from volume %s" msgstr "%s biriminden sistem görüntüsü oluşturuluyor" -#, python-format -msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" -msgstr "Anlık görüntü oluştur: %(snapshot)s: mantıksal sürücü: %(volume)s" - #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " @@ -638,10 +584,6 @@ msgstr "Tutarlılık Grubunu sil: %(group)s." msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "%(snapshot_id)s anlık görüntü silme tamamlandı." -#, python-format -msgid "Delete Snapshot: %(snapshot)s" -msgstr "Anlık görüntü sil: %(snapshot)s" - #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "Anlık görüntü sil: %(snapshot)s." @@ -769,10 +711,6 @@ msgstr "Anlık görüntü siliniyor: %s" msgid "Deleting stale snapshot: %s" msgstr "Eski anlık görüntü siliniyor: %s" -#, python-format -msgid "Deleting unneeded host %(host_name)r." -msgstr "İhtiyaç duyulmayan istemci %(host_name)r siliniyor." - #, python-format msgid "Deleting volume %s " msgstr "Mantıksal sürücü %s siliniyor " @@ -829,10 +767,6 @@ msgstr "%(vol)s mantıksal sürücüsünün %(img)s yeni imajına kopyalanması msgid "Driver initialization completed successfully." msgstr "Sürücü ilklendirme başarıyla tamamlandı." -#, python-format -msgid "Driver stats: %s" -msgstr "Sürücü istatistikleri: %s" - #, python-format msgid "EQL-driver: Setup is complete, group IP is \"%s\"." msgstr "EQL-sürücüsü: Kurulum tamamlandı, grup IP'si \"%s\"." @@ -845,10 +779,6 @@ msgstr "EQL-sürücüsü: \"%s\" çalıştırılıyor." msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "%(mask)s maskesine sahip %(vol)s mantıksal sürücüsü düzenleniyor" -#, python-format -msgid "Elapsed time for clear volume: %.2f sec" -msgstr "Mantıksal sürücü temizleme için kalan zaman: %.2f sn" - msgid "Embedded mode detected." msgstr "Gömülü kip algılandı." @@ -898,10 +828,6 @@ msgstr "Mantıksal sürücü büyütme isteği başarıyla yapıldı." msgid "Extending volume %s." msgstr "Mantıksal sürücü %s büyütülüyor." -#, python-format -msgid "FC Initiators %(in)s of %(ins)s need registration" -msgstr "FC Başlatıcıları %(in)s / %(ins)s kaydolmalı" - #, python-format msgid "Failed to open iet session list for %(vol_id)s: %(e)s" msgstr "%(vol_id)s için iet oturumu açılamadı: %(e)s" @@ -984,14 +910,6 @@ msgstr "Arka uç için boş kapasite: %(free)s, toplam kapasite: %(total)s." msgid "Generating transfer record for volume %s" msgstr "Mantıksal sürücü %s için aktarım kaydı üretiliyor" -#, python-format -msgid "Get FC targets %(tg)s to register initiator %(in)s." -msgstr "%(in)s başlatıcısını kaydetmek için FC hedeflerini %(tg)s getir." - -#, python-format -msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." -msgstr "%(in)s başlatıcısını kaydetmek için ISCSI hedefleri %(tg)s'yi getir." - msgid "Get all volumes completed successfully." msgstr "Tüm mantıksal sürücülerin getirilmesi başarıyla bitti." @@ -999,10 +917,6 @@ msgstr "Tüm mantıksal sürücülerin getirilmesi başarıyla bitti." msgid "Get domain by name response: %s" msgstr "İsimle alan adı alma yanıtı: %s" -#, python-format -msgid "Get service: %(lbl)s->%(svc)s" -msgstr "Servis getir: %(lbl)s->%(svc)s" - msgid "Get snapshot metadata completed successfully." msgstr "Anlık görüntü metadata'sı getir başarıyla tamamlandı." @@ -1029,25 +943,13 @@ msgstr "vol_name=%s için mantıksal sürücü bilgisi alınıyor" msgid "Going to perform request again %s with valid token." msgstr "İstek %s geçerli jetonla tekrar gerçekleşecek." -#, python-format -msgid "HDP list: %s" -msgstr "HDP listesi: %s" - #, python-format msgid "HTTP exception thrown: %s" msgstr "HTTP istisnası fırlatıldı: %s" -#, python-format -msgid "ISCSI properties: %(properties)s" -msgstr "ISCSI özellikleri: %(properties)s" - msgid "ISCSI provider_location not stored, using discovery." msgstr "ISCSI provider_location kaydedilmemiş, keşif kullanılıyor." -#, python-format -msgid "ISCSI volume is: %(volume)s" -msgstr "ISCSI mantıksal sürücüsü: %(volume)s" - #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "İmaj %(pool)s/%(image)s %(snap)s anlık görüntüsüne bağımlı." @@ -1108,14 +1010,6 @@ msgstr "%(iname)s başlatıcısı için başlatıcı grup ismi %(grp)s" msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s %(size)s GB'ye büyütüldü." -#, python-format -msgid "LUN %(lun)s extended to %(size)s GB." -msgstr "LUN %(lun)s %(size)s GB boyutuna büyütüldü." - -#, python-format -msgid "LUN %(lun)s of size %(sz)s MB is created." -msgstr "%(sz)s MB boyutunda LUN %(lun)s oluşturuldu." - #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" @@ -1222,14 +1116,6 @@ msgstr "FC Bölgesi silinmeli, başlatıcı hedef haritası inşa ediliyor" msgid "Need to remove FC Zone, building initiator target map." msgstr "FC Bölgesi kaldırılmalı, başlatıcı hedef haritası inşa ediliyor." -msgid "" -"Neither security file nor plain text credentials are specified. Security " -"file under home directory will be used for authentication if present." -msgstr "" -"Güvenlik dosyası ya da düz metin kimlik bilgileri belirtilmedi. Eğer " -"mevcutsa ev dizini altındaki güvenlik dosyası kimlik doğrulama için " -"kullanılacak." - #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " @@ -1290,10 +1176,6 @@ msgstr "" msgid "Params for add volume request: %s." msgstr "Mantıksal sürücü ekleme isteği için parametreler: %s." -#, python-format -msgid "Parse_loc: %s" -msgstr "Parse_loc: %s" - #, python-format msgid "Performing post clone for %s" msgstr "%s için klon sonrası işler gerçekleştiriliyor" @@ -1302,9 +1184,6 @@ msgstr "%s için klon sonrası işler gerçekleştiriliyor" msgid "Performing secure delete on volume: %s" msgstr "Mantıksal sürücü güvenle siliniyor: %s" -msgid "Plain text credentials are being used for authentication" -msgstr "Kimlik doğrulama için düz metin kimlik bilgileri kullanılıyor" - #, python-format msgid "Pool id is %s." msgstr "Havuz id'si %s." @@ -1517,29 +1396,13 @@ msgstr "Mantıksal sürücü rtype başarıyla tamamlandı." msgid "Retype volume request issued successfully." msgstr "Mantıksal sürücü retype isteği başarıyla yapıldı." -#, python-format -msgid "Review shares: %s" -msgstr "Paylaşımları gözden geçir: %s" - msgid "Roll detaching of volume completed successfully." msgstr "Mantıksal sürücünün ayrılmasının yuvarlanması başarıyla tamamlandı." -#, python-format -msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" -msgstr "%(server)s ve vserver %(vs)s için küme son ssc işi çalıştırılıyor" - -#, python-format -msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" -msgstr "%(server)s ve vserver %(vs)s için eski ssc tazeleme işi çalıştırılıyor" - #, python-format msgid "Running with vmemclient version: %s" msgstr "vmemclient sürüm: %s ile çalışıyor" -#, python-format -msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" -msgstr " %(svc)s -> %(hdp)s, %(path)s için servis bilgisi kaydet" - #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " @@ -1615,12 +1478,6 @@ msgstr "%(vol)s mantıksal sürücüsü online_flag %(flag)s olarak ayarlanıyor msgid "Skipping deletion of volume %s as it does not exist." msgstr "%s mantıksal sürücüsünün silinmesi atlanıyor çünkü mevcut değil." -#, python-format -msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" -msgstr "" -"ensure_export atlanıyor. Mantıksal sürücü: %s için hiçbir iscsi_target " -"hazırlanmamış" - #, python-format msgid "" "Skipping remove_export. No iscsi_target is presently exported for volume: %s" @@ -1639,10 +1496,6 @@ msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" msgstr "" "Smb paylaşımı %(share)s Toplam boyut %(size)s Toplam ayrılan %(allocated)s" -#, python-format -msgid "Snapshot %s was deleted successfully." -msgstr "Anlık görüntü %s başarıyla silindi." - msgid "Snapshot create request issued successfully." msgstr "Anlık görüntü oluşturma isteği başarıyla yapıldı." @@ -1697,10 +1550,6 @@ msgstr "%(topic)s düğüm başlatılıyor (sürüm %(version_string)s)" msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "Mantıksal sürücü %(driver_name)s (%(version)s) başlatılıyor" -#, python-format -msgid "Storage Group %s was empty." -msgstr "Depolama Grubu %s boştu." - #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "Depolama grubu ilke ile ilişkilendirilmemiş. İstisna %s." @@ -1717,16 +1566,6 @@ msgstr "" msgid "Successful login by user %s" msgstr "%s kullanıcısı tarafından başarılı giriş" -#, python-format -msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" -msgstr "%(server)s ve vserver %(vs)s için ssc işi başarıyla tamamlandı" - -#, python-format -msgid "" -"Successfully completed stale refresh job for %(server)s and vserver %(vs)s" -msgstr "" -"%(server)s ve vserver %(vs)s için eski tazeleme işi başarıyla tamamlandı" - #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "%(src)s konumundaki disk başarıyla %(dest)s konumuna kopyalandı." @@ -2053,14 +1892,6 @@ msgstr "" msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "%(cpg)s yerine %(pool)s havuzu kullanılıyor" -#, python-format -msgid "Using security file in %s for authentication" -msgstr "Kimlik doğrulama için %s içindeki güvenlik dosyası kullanılıyor" - -#, python-format -msgid "Using service label: %s" -msgstr "Servis etiketi kullanılıyor: %s" - #, python-format msgid "Value with type=%s is not serializable" msgstr "type=%s sahip değer serileştirilemez" @@ -2192,10 +2023,6 @@ msgstr "" "Verilen %s referanslı mantıksal sürücü yönetme işlemi süresinde yeniden " "adlandırılmamalı." -#, python-format -msgid "Volume with the name %s wasn't found, can't unmanage" -msgstr "%s isimli mantıksal sürücü bulunamadı, yönetim bırakılamıyor" - #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " @@ -2267,71 +2094,20 @@ msgstr "cgsnapshot %s: başarıyla silindi" msgid "cgsnapshot %s: deleting" msgstr "cgsnapshot %s: siliniyor" -#, python-format -msgid "create_volume: create_lu returns %s" -msgstr "create_volume: create_lu %s döndürüyor" - -#, python-format -msgid "delete lun loc %s" -msgstr "lun loc %s sil" - -#, python-format -msgid "do_setup: %s" -msgstr "do_setup: %s" - #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "" "%(pool)s havuzu için boş kapasite: %(free)s, toplam kapasite: %(total)s." -#, python-format -msgid "iSCSI Initiators %(in)s of %(ins)s need registration." -msgstr "iSCSI Başlatıcıları %(in)s / %(ins)s kaydolmalı." - -#, python-format -msgid "iSCSI portal found for service: %s" -msgstr "Servis içi iSCSI portalı bulundu: %s" - #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "%(iname)s başlatıcısı için %(grp)s igroup bulundu" -#, python-format -msgid "initialize volume %(vol)s connector %(conn)s" -msgstr "%(vol)s mantıksal sürücüsü %(conn)s bağlayıcısını ilklendir" - -#, python-format -msgid "initialize_ connection: %(vol)s:%(initiator)s" -msgstr "initialize_ connection: %(vol)s:%(initiator)s" - #, python-format msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" msgstr "" "initialize_connection mantıksal sürücü: %(volume)s, bağlayıcı: %(connector)s" -#, python-format -msgid "initiate: connection %s" -msgstr "başlat: bağlantı %s" - -msgid "" -"initiator_auto_registration: False. Initiator auto registration is not " -"enabled. Please register initiator manually." -msgstr "" -"initiator_auto_registration: False. Başlatıcı otomatik kaydı etkin değil. " -"Lütfen başlatıcıyı elle kaydedin." - -#, python-format -msgid "iops limit is: %s." -msgstr "iops sınırı: %s." - -#, python-format -msgid "iscsi_initiators: %s" -msgstr "iscsi_initiators: %s" - -#, python-format -msgid "location is: %(location)s" -msgstr "konum: %(location)s" - #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " @@ -2344,10 +2120,6 @@ msgstr "" msgid "new cloned volume: %s" msgstr "yeni klonlanan mantıksal sürücü: %s" -#, python-format -msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" -msgstr "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" - #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "%(ip)s ye %(ssn)s open_connection" @@ -2358,14 +2130,6 @@ msgstr "" "%s mantıksal sürücüsü error_restoring olarak ayarlanıyor (yedek geri " "yükleniyordu)." -#, python-format -msgid "share: %(share)s -> %(info)s" -msgstr "paylaşım: %(share)s -> %(info)s" - -#, python-format -msgid "share: %s incorrect entry" -msgstr "paylaşım: %s geçersiz girdi" - #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "smis_do_iscsi_discovery: %(out)s." @@ -2378,14 +2142,6 @@ msgstr "anlık görüntü %s mevcut değil" msgid "source volume for cloning: %s" msgstr "klon için kaynak mantıksal sürücü: %s" -#, python-format -msgid "targetlist: %s" -msgstr "hedeflistesi: %s" - -#, python-format -msgid "terminate: connection %s" -msgstr "sonlandır: bağlantı %s" - #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "terminate_connection mantıksal sürücü: %(volume)s, bağlayıcı: %(con)s" diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po index 1c9b4a55e..11540018f 100644 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po +++ b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -34,10 +34,6 @@ msgstr "%s" msgid "%s is already mounted" msgstr "%s zaten bağlı" -#, python-format -msgid "AttachSnapTask.revert: detach mount point %s" -msgstr "AttachSnapTask.revert: %s bağlantı noktasını ayır" - msgid "Attempted to delete a space that's not there." msgstr "Orada olmayan bir alan silinmeye çalışıldı." @@ -64,23 +60,6 @@ msgstr "" "Yedek servisi %(service)s doğrulamayı desteklemiyor. Yedek %(id)s " "doğrulanmamış. Doğrulama atlanıyor." -msgid "" -"Both 'storagetype:prvosioning' and 'provisioning:type' are set in the extra " -"specs, the value of 'provisioning:type' will be used. The key 'storagetype:" -"provisioning' may be deprecated in the next release." -msgstr "" -"Hem 'storagetype:prvosioning' hem 'provisioning:type' ek özelliklerde " -"ayarlanmış, 'provisioning:type' değeri kullanılacak. 'storagetype:" -"provisioning' anahtarı sonraki sürümlerde kaldırılabilir." - -#, python-format -msgid "CG %(cg_name)s does not exist. Message: %(msg)s" -msgstr "CG %(cg_name)s mevcut değil. İleti: %(msg)s" - -#, python-format -msgid "CG %(cg_name)s is deleting. Message: %(msg)s" -msgstr "CG %(cg_name)s siliyor. İleti: %(msg)s" - #, python-format msgid "CHAP is enabled, but server secret not configured on server %s" msgstr "CHAP etkin, ama %s sunucusu üzerinde sunucu gizi yapılandırılmamış" @@ -116,10 +95,6 @@ msgstr "" "Mantıksal sürücü yeniden adlandırma geri alınamaz; eski isim %(old_name)s " "idi ve yeni isim %(new_name)s." -#, python-format -msgid "Cgsnapshot name %(name)s already exists. Message: %(msg)s" -msgstr "Cgsnapshot ismi %(name)s zaten mevcut. İleti: %(msg)s" - #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "Değişiklik, şu kaynaklar için kullanımı 0'ın altına düşürecek: %s" @@ -147,18 +122,6 @@ msgstr "" "eqlx_chap_password artık kullanılmıyor. Aynıları için sırayla chap_auth, " "chap_username ve chap_password kullanın." -#, python-format -msgid "Consistency group %(name)s already exists. Message: %(msg)s" -msgstr "Tutarlılık grubu %(name)s zaten mevcut. İleti: %(msg)s" - -#, python-format -msgid "" -"CopySnapshotTask.revert: delete the copied snapshot %(new_name)s of " -"%(source_name)s." -msgstr "" -"CopySnapshotTask.revert: %(source_name)s'in %(new_name)s kopyalanan anlık " -"görüntüsünü sil." - #, python-format msgid "Could not create target because it already exists for volume: %s" msgstr "Hedef oluşturulamadı çünkü mantıksal sürücü: %s için zaten mevcut" @@ -167,22 +130,6 @@ msgstr "Hedef oluşturulamadı çünkü mantıksal sürücü: %s için zaten mev msgid "Could not determine root volume name on %s." msgstr "%s üzerinde kök mantıksal sürücü ismi belirlenemiyor." -#, python-format -msgid "CreateDestLunTask.revert: delete temp lun %s" -msgstr "CreateDestLunTask.revert: geçici lun %s'i sil" - -#, python-format -msgid "CreateSMPTask.revert: delete mount point %s" -msgstr "CreateSMPTask.revert: %s bağlantı noktasını sil" - -#, python-format -msgid "CreateSnapshotTask.revert: delete temp cgsnapshot %s" -msgstr "CreateSnapshotTask.revert: geçici cgsnapshot %s'i sil" - -#, python-format -msgid "CreateSnapshotTask.revert: delete temp snapshot %s" -msgstr "CreateSnapshotTask.revert: geçici anlık görüntü %s'i sil" - #, python-format msgid "" "CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, ret=" @@ -279,9 +226,6 @@ msgstr "Tanımlayıcı silinirken hata oluştu: %s." msgid "Error occurred while deleting temporary disk: %s." msgstr "Geçici disk silinirken hata oluştu: %s." -msgid "Error on parsing target_pool_name/target_array_serial." -msgstr "target_pool_name/target_array_serial ayrıştırmada hata." - #, python-format msgid "Error refreshing volume info. Message: %s" msgstr "Mantıksal sürücü bilgisi tazelenirken hata. İleti: %s" @@ -352,23 +296,10 @@ msgstr "" "Ek özellik anahtarı 'storagetype:pool' sürücü sürümü 5.1.0'dan itibaren " "kullanılmıyor. Bu anahtar atlanıyor." -msgid "" -"Extra spec key 'storagetype:provisioning' may be deprecated in the next " -"release. It is recommended to use extra spec key 'provisioning:type' instead." -msgstr "" -"Ek özellik anahtarı 'storagetype:provisioning' sonraki sürümde " -"kaldırılabilir. Bunun yerine 'provisioning:type' ek özellik anahtarının " -"kullanımı önerilir." - #, python-format msgid "FAST is enabled. Policy: %(fastPolicyName)s." msgstr "FAST etkin. İlke: %(fastPolicyName)s." -#, python-format -msgid "Fail to connect host %(host)s back to storage group %(sg)s." -msgstr "" -"%(host)s istemcisinin %(sg)s depolama grubuna geri bağlanması başarısız." - #, python-format msgid "" "Failed target removal because target or ACL's couldn't be found for iqn: %s." @@ -398,10 +329,6 @@ msgstr "%(vol_type_id)s türündeki %(id)s qos özellikleri ilişkilendirilemedi msgid "Failed to create pair: %s" msgstr "Çift oluşturma başarısız: %s" -#, python-format -msgid "Failed to deregister %(itor)s because: %(msg)s." -msgstr "%(itor)s kaydı silinmesi başarısız çünkü: %(msg)s." - #, python-format msgid "Failed to destroy Storage Group %s." msgstr "Depolama Grubu %s silinemedi." @@ -418,12 +345,6 @@ msgstr "Qos özellikleri %s ilişkisi kesilemedi." msgid "Failed to discard zero page: %s" msgstr "Sıfır sayfası atılamadı: %s" -#, python-format -msgid "Failed to extract initiators of %s, so ignore deregistration operation." -msgstr "" -"%s ilklendiricilerinin çıkarılması başarısız, kayıt silme işlemini göz ardı " -"et." - msgid "Failed to get Raid Snapshot ID and did not store in snapshot." msgstr "" "Raid Anlık Görüntü Kimliği alınamadı ve anlık görüntü içine kaydedilmedi." @@ -431,13 +352,6 @@ msgstr "" msgid "Failed to get target pool id." msgstr "Hedef havuz kimliği alınamadı." -msgid "" -"Failed to get target_pool_name and target_array_serial. 'location_info' is " -"not in host['capabilities']." -msgstr "" -"target_pool_name ve target_array_serial alınamadı. 'location_info' " -"host['capabilities'] içinde değil." - #, python-format msgid "Failed to invoke ems. Message : %s" msgstr "ems başlatma başarısızı. İleti : %s" @@ -484,12 +398,6 @@ msgstr "%(id)s havuzu sorgulanamadı durum %(ret)d." msgid "Failed to refresh mounts, reason=%s" msgstr "Bağlar tazelenemedi, sebep=%s" -#, python-format -msgid "" -"Failed to register %(itor)s to SP%(sp)s port %(portid)s because: %(msg)s." -msgstr "" -"%(itor)s'in SP%(sp)s %(portid)s bağlantı noktasına kaydı başarısız: %(msg)s." - #, python-format msgid "Failed to restart horcm: %s" msgstr "horcm yeniden başlatılamadı: %s" @@ -558,10 +466,6 @@ msgstr "" "%(storageSystem)s üzerindeki %(target)s hedef grubu için grup eş zamanlama " "ismi bulunamadı." -#, python-format -msgid "HLU %(hlu)s has already been removed from %(sgname)s. Message: %(msg)s" -msgstr "HLU %(hlu)s zaten %(sgname)s'den ayrılmış. İleti: %(msg)s" - #, python-format msgid "" "Hint \"%s\" dropped because ExtendedServerAttributes not active in Nova." @@ -578,14 +482,6 @@ msgstr "" "değiştirilmesi gerekiyor ya da yapılandırmada Nova için ayrıcalıklı bir " "hesap belirtilmeli." -#, python-format -msgid "" -"Host %(host)s has already disconnected from storage group %(sgname)s. " -"Message: %(msg)s" -msgstr "" -"İstemci %(host)s bağlantısı %(sgname)s depolama grubundan zaten kesilmiş. " -"İleti: %(msg)s" - msgid "" "Host exists without CHAP credentials set and has iSCSI attachments but CHAP " "is enabled. Updating host with new CHAP credentials." @@ -671,34 +567,6 @@ msgstr "" "NetApp sürücüleri kullanmak önerilen bir yol değildir. Lütfen işlevselliğe " "erişmek için NetAppDriver kullanın." -#, python-format -msgid "LUN %(name)s is already expanded. Message: %(msg)s" -msgstr "LUN %(name)s zaten genişletildi. İleti: %(msg)s" - -#, python-format -msgid "LUN %(name)s is not ready for extension: %(out)s" -msgstr "LUN %(name)s eklenti için hazır değil: %(out)s" - -#, python-format -msgid "LUN %(name)s is not ready for snapshot: %(out)s" -msgstr "LUN %(name)s anlık görüntü için hazır değil: %(out)s" - -#, python-format -msgid "LUN already exists, LUN name %(name)s. Message: %(msg)s" -msgstr "LUN zaten mevcut, LUN ismi %(name)s. İleti: %(msg)s" - -#, python-format -msgid "" -"LUN corresponding to %s is still in some Storage Groups.Try to bring the LUN " -"out of Storage Groups and retry the deletion." -msgstr "" -"%s'e denk gelen LUN hala bazı Depolama Gruplarında. LUN'u Depolama " -"Gruplarından çıkarmaya ve silmeyi tekrarlamaya çalışın." - -#, python-format -msgid "LUN is already deleted, LUN name %(name)s. Message: %(msg)s" -msgstr "LUN zaten silindi, LUN ismi %(name)s. İleti: %(msg)s" - #, python-format msgid "" "LUN misalignment may occur for current initiator group %(ig_nm)s) with host " @@ -709,22 +577,11 @@ msgstr "" "hizalaması oluşabilir. Lütfen istemci OS türüne göre başlatıcı grubunu elle " "ayarlayın." -#, python-format -msgid "LUN with id %(remove_id)s is not present in cg %(cg_name)s, skip it." -msgstr "%(remove_id)s kimlikli LUN %(cg_name)s cg de mevcut değil, atla." - msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" "En az meşgul iSCSI bağlantı noktası bulunamadı, listedeki ilk iSCSI bağlantı " "noktası kullanılıyor." -#, python-format -msgid "" -"Maximum number of Pool LUNs, %s, have been created. No more LUN creation can " -"be done." -msgstr "" -"Azami Havuz LUN'ları sayısı, %s, oluşturuldu. Daha fazla LUN oluşturulamaz." - #, python-format msgid "Message - %s." msgstr "İleti - %s." @@ -741,10 +598,6 @@ msgid "No VLUN contained CHAP credentials. Generating new CHAP key." msgstr "" "Hiçbir VLUN CHAP kimlik bilgileri içermiyor. Yeni CHAP anahtarı üretiliyor." -msgid "No array serial number returned, set as unknown." -msgstr "" -"Herhangi bir dizi seri numarası dönmedi, bilinmeyen olarak ayarlanıyor." - #, python-format msgid "No backing file found for %s, allowing snapshot to be deleted." msgstr "" @@ -768,9 +621,6 @@ msgstr "%(mv)s maskeleme görünümünde bağlantı noktası grubu bulunamadı." msgid "No protection domain name or id was specified in configuration." msgstr "Yapılandırmada herhangi bir koruma alan adı veya kimliği belirtilmedi." -msgid "No shares found hence skipping ssc refresh." -msgstr "Paylaşım bulunamadı ssc tazelemesi atlanıyor." - #, python-format msgid "" "No storage group found. Performing rollback on Volume: %(volumeName)s To " @@ -832,62 +682,6 @@ msgstr "" "düzgün ayarlanması ve yapılandırma seçeneği \"%(mpflag)s\" in \"True\" " "olarak ayarlanmasını gerektirir." -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG backup " -"(%(d_consumed)dG of %(d_quota)dG already consumed)" -msgstr "" -"%(s_pid)s için kota aşıldı, %(s_size)sG yedek oluşturulmaya çalışıldı " -"(%(d_consumed)dG / %(d_quota)dG zaten tüketilmiş)" - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " -"(%(d_consumed)dG of %(d_quota)dG already consumed)." -msgstr "" -"%(s_pid)s için kota aşıldı, %(s_size)sG anlık görüntü oluşturulmaya " -"çalışıldı (%(d_consumed)dG / %(d_quota)dG zaten tüketildi)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " -"(%(d_consumed)dG of %(d_quota)dG already consumed)" -msgstr "" -"%(s_pid)s için kota aşıldı, %(s_size)sG mantıksal sürücü oluşturulmaya " -"çalışıldı (%(d_consumed)dG / %(d_quota)dG zaten tüketilmiş)" - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " -"(%(d_consumed)dG of %(d_quota)dG already consumed)" -msgstr "" -"%(s_pid)s için kota aşıldı, %(s_size)sG mantıksal sürücü oluşturulmaya " -"çalışıldı - (%(d_consumed)dG / %(d_quota)dG zaten kullanılmış)" - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create backups (%(d_consumed)d " -"backups already consumed)" -msgstr "" -"%(s_pid)s için kota aşıldı, yedek oluşturulmaya çalışıldı (%(d_consumed)d " -"yedek zaten tüketilmiş)" - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " -"snapshots already consumed)." -msgstr "" -"%(s_pid)s için kota aşıldı, anlık görüntü oluşturulmaya çalışıldı " -"(%(d_consumed)d anlık görüntü zaten tüketilmiş)." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d volumes " -"already consumed)" -msgstr "" -"%(s_pid)s için kota aşıldı, mantıksal sürücü oluşturulmaya çalışıldı " -"(%(d_consumed)d mantıksal sürücü zaten tüketilmiş)" - #, python-format msgid "" "RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " @@ -919,10 +713,6 @@ msgstr "İstenen imaj mevcut Kiracı tarafından erişilebilir değil." msgid "Returning as clean tmp vol job already running." msgstr "Geçici mantıksal sürücü temizleme işi hala çalıştığından dönülüyor." -#, python-format -msgid "See unavailable iSCSI target: %s" -msgstr "Kullanılamaz iSCSI hedefine bak: %s" - #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export." @@ -937,28 +727,6 @@ msgstr "Hedef silmenin sessizce başarısız olduğu algılandı, tekrar dene... msgid "Snapshot %(name)s already exists. Message: %(msg)s" msgstr "Anlık görüntü %(name)s zaten mevcut. İleti: %(msg)s" -#, python-format -msgid "" -"Snapshot %(name)s for consistency group does not exist. Message: %(msg)s" -msgstr "" -"Tutarlılık grubu için %(name)s anlık görüntüsü mevcut değil. İleti: %(msg)s" - -#, python-format -msgid "Snapshot %(name)s is in use, retry. Message: %(msg)s" -msgstr "Anlık görüntü %(name)s kullanımda, tekrar dene. İleti: %(msg)s" - -#, python-format -msgid "Snapshot %(name)s may deleted already. Message: %(msg)s" -msgstr "Anlık görüntü %(name)s zaten silinmiş olabilir. İleti: %(msg)s" - -#, python-format -msgid "" -"Snapshot %(snapname)s is attached to snapshot mount point %(mpname)s " -"already. Message: %(msg)s" -msgstr "" -"Anlık görüntü %(snapname)s anlık görüntü bağlantı noktası %(mpname)s'e zaten " -"bağlı. İleti: %(msg)s" - #, python-format msgid "Snapshot %s already deleted." msgstr "Anlık görüntü %s zaten silinmiş." @@ -967,37 +735,10 @@ msgstr "Anlık görüntü %s zaten silinmiş." msgid "Snapshot still %(status)s Cannot delete snapshot." msgstr "Anlık görüntü hala %(status)s Anlık görüntü silinemiyor." -#, python-format -msgid "Start migration failed. Message: %s" -msgstr "Göç başlatma başarısız. İleti: %s" - -#, python-format -msgid "Storage Group %s is not found." -msgstr "Depolama Grubu %s bulunamadı." - -#, python-format -msgid "Storage Group %s is not found. Create it." -msgstr "Depolama Grubu %s bulunamadı. Oluştur." - -#, python-format -msgid "Storage Group %s is not found. terminate_connection() is unnecessary." -msgstr "Depolama Grubu %s bulunamadı. terminate_connection() gereksiz." - -#, python-format -msgid "Storage Pool '%(pool)s' is '%(state)s'." -msgstr "Depolama Havuzu '%(pool)s' '%(state)s'." - #, python-format msgid "Storage group %(name)s already exists. Message: %(msg)s" msgstr "Depolama grubu %(name)s zaten mevcut. İleti: %(msg)s" -#, python-format -msgid "" -"Storage group %(name)s doesn't exist, may have already been deleted. " -"Message: %(msg)s" -msgstr "" -"Depolama grubu %(name)s mevcut değil, zaten silinmiş olabilir. İleti: %(msg)s" - #, python-format msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." msgstr "" @@ -1039,30 +780,10 @@ msgstr "" "bu komut satırından bir Retype üzerinde gerçekleşir: cinder --os-volume-api-" "version 2 retype --migration-policy on-demand" -#, python-format -msgid "" -"The following specified storage pools do not exist: %(unexist)s. This host " -"will only manage the storage pools: %(exist)s" -msgstr "" -"Belirtilen şu depolama havuzları mevcut değil: %(unexist)s. İstemci yalnızca " -"şu depolama havuzlarını yönetecek: %(exist)s" - #, python-format msgid "The provisioning: %(provisioning)s is not valid." msgstr "Hazırlık: %(provisioning)s geçerli değil." -#, python-format -msgid "" -"The source volume is a legacy volume. Create volume in the pool where the " -"source volume %s is created." -msgstr "" -"Kaynak mantıksal sürücü eski bir mantıksal sürücü. Mantıksal sürücüyü %s " -"kaynak mantıksal sürücüsünün oluşturulduğu yerde oluştur." - -#, python-format -msgid "The specified Snapshot mount point %s is not currently attached." -msgstr "Belirtilen Anlık Görüntü bağlama noktası %s şu an eklenmiş değil." - #, python-format msgid "" "The volume: %(volumename)s was not first part of the default storage group " @@ -1162,10 +883,6 @@ msgid "Verify certificate is not set, using default of False." msgstr "" "Sertifika doğrulama ayarlanmamış, varsayılan değer olan False kullanılıyor." -#, python-format -msgid "Volume %(vol)s was not in Storage Group %(sg)s." -msgstr "Mantıksal sürücü %(vol)s Depolama Grubu %(sg)s'de değildi." - #, python-format msgid "Volume %(volume)s is not in any masking view." msgstr "Mantıksal sürücü %(volume)s herhangi bir maskeleme görünümünde değil." @@ -1305,48 +1022,9 @@ msgstr "" "_unmap_vdisk_from_host: %(vol_name)s mantıksal sürücüsünün hiçbir istemciye " "eşleşmesi bulunamadı." -msgid "" -"config option keymgr.fixed_key has not been defined: some operations may " -"fail unexpectedly" -msgstr "" -"keymgr.fixed_key yapılandırma seçeneği tanımlanmamış. bazı işlemler " -"beklenmedik şekilde başarısız olabilir" - -msgid "" -"destroy_empty_storage_group: True. Empty storage group will be deleted after " -"volume is detached." -msgstr "" -"destroy_empty_storage_group: True. Boş depolama grubu mantıksal sürücü " -"ayrıldıktan sonra silinecek." - msgid "flush() not supported in this version of librbd" msgstr "flush() librbd'nin bu sürümünde desteklenmiyor" -msgid "force_delete_lun_in_storagegroup=True" -msgstr "force_delete_lun_in_storagegroup=True" - -#, python-format -msgid "get_evs: %(out)s -- No find for %(fsid)s" -msgstr "get_evs: %(out)s -- %(fsid)s için bulgu yok" - -#, python-format -msgid "get_fsid: %(out)s -- No info for %(fslabel)s" -msgstr "get_fsid: %(out)s -- %(fslabel)s için bilgi yok" - -msgid "" -"glance_num_retries shouldn't be a negative value. The number of retries will " -"be set to 0 until this iscorrected in the cinder.conf." -msgstr "" -"glance_num_retries negatif bir değer olmamalı. Bu cinder.conf'da düzeltilene " -"kadar tekrar deneme sayıları 0 olarak ayarlanacak." - -msgid "" -"ignore_pool_full_threshold: True. LUN creation will still be forced even if " -"the pool full threshold is exceeded." -msgstr "" -"ignore_pool_full_threshold: True. Havuz dolu eşiği aşılsa bile LUN oluşturma " -"zorlanacak." - #, python-format msgid "initialize_connection: Did not find a preferred node for volume %s." msgstr "" @@ -1364,12 +1042,6 @@ msgstr "%(typ)s türündeki %(key)s nesnesi bulunamadı, %(err_msg)s" msgid "qemu-img is not installed." msgstr "qemu-img kurulu değil." -msgid "refresh stale ssc job in progress. Returning... " -msgstr "vadesi geçmiş ssc işi tazeleme sürüyor. Dönülüyor... " - -msgid "san_secondary_ip is configured as the same value as san_ip." -msgstr "san_secondary_ip san_ip ile aynı değer olarak yapılandırılmış." - #, python-format msgid "snapshot: %s not found, skipping delete operation" msgstr "anlık görüntü: %s bulunamadı, silme işlemi atlanıyor" @@ -1378,12 +1050,6 @@ msgstr "anlık görüntü: %s bulunamadı, silme işlemi atlanıyor" msgid "snapshot: %s not found, skipping delete operations" msgstr "anlık görüntü: %s bulunamadı, silme işlemleri atlanıyor" -msgid "ssc job in progress. Returning... " -msgstr "ssc işi sürüyor. Dönülüyor... " - -msgid "terminate_conn: provider location empty." -msgstr "terminate_conn: sağlayıcı konumu boş." - msgid "terminate_connection: lun map not found" msgstr "terminate_connection: lun eşleştirmesi bulunamadı" diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder.po index ab2fb05bb..88a5de663 100644 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder.po +++ b/cinder/locale/tr_TR/LC_MESSAGES/cinder.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev487\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-08-30 03:16+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -348,10 +348,6 @@ msgstr "" "Belirtilen yönetilecek depolama havuzlarından hepsi mevcut değil. Lütfen " "yapılandırmanızı kontrol edin. Mevcut olmayan havuzlar: %s" -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver'da bir sorun oluştu.(Reason: %(reason)s)" - msgid "An error has occurred during backup operation" msgstr "Yedekleme işlemi sırasında bir hata oluştu" @@ -411,12 +407,6 @@ msgid "Auth user details not found in CloudByte storage." msgstr "" "CloudByte depolamada kimlik doğrulama kullanıcı ayrıntıları bulunamadı." -msgid "Authentication error" -msgstr "Kimlik doğrulama hatası" - -msgid "Authorization error" -msgstr "Yetkilendirme hatası" - msgid "Available categories:" msgstr "Kullanılabilir kategoriler:" @@ -440,9 +430,6 @@ msgstr "Art alanda çalışan uygulama raporları: öge zaten mevcut" msgid "Backend reports: item not found" msgstr "Art alanda çalışan uygulama raporları: öge bulunamadı" -msgid "Backend server not NaServer." -msgstr "Arkauç sunucusu NaServer değil." - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Arka uç servis yeniden deneme zaman aşımına erişildi: %(timeout)s sn" @@ -557,18 +544,6 @@ msgstr "İkili Değer" msgid "Blank components" msgstr "Boş bileşenler" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Blockbridge API kimlik doğrulama şeması (jeton ya da parola)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Blockbridge API parola (kimlik doğrulama şeması için 'password')" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Blockbridge API jetonu (kimlik doğrulama şeması için 'token')" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Blockbridge API kullanıcı (kimlik doğrulama şeması için 'password')" - msgid "Blockbridge api host not configured" msgstr "Blockbridge api istemcisi yapılandırılmadı" @@ -650,10 +625,6 @@ msgstr "%s, tam sayı değere çevrilemez." msgid "Can't decode backup record." msgstr "Yedek kaydı çözülemedi." -#, python-format -msgid "Can't open config file: %s" -msgstr "Yapılandırma dosyası açılamıyor: %s" - msgid "Can't parse backup record." msgstr "Yedek kaydı ayrıştırılamadı." @@ -717,13 +688,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "ECOM sunucusuna bağlanılamıyor." -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "" -"%(src_vol_size)s boyutunda mantıksal sürücüden %(vol_size)s boyutunda klon " -"oluşturulamıyor" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -761,13 +725,6 @@ msgstr "" "%(sgGroupName)s ismine sahip bir depolama grubu oluşturulamıyor ya da " "bulunamıyor." -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "" -"%(snap_size)s boyutunda anlık görüntüden %(vol_size)s boyutunda mantıksal " -"sürücü oluşturulamıyor" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "%s boyutunda mantıksal sürücü oluşturulamıyor: 8GB katı değil." @@ -984,10 +941,6 @@ msgstr "Çoğaltma özelliği %(storageSystem)s üzerinde lisanslı değil." msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "CLI içindeki %(cmd)s komutu bloklandı ve iptal edildi" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition: %s zaman aşımı" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s zaman aşımına uğradı." @@ -1101,21 +1054,10 @@ msgstr "GPFS küme kimliği bulunamadı: %s." msgid "Could not find GPFS file system device: %s." msgstr "GPFS dosya sistemi aygıtı bulunamadı: %s." -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "" -"%(type_id)s türü ile %(volume_id)s mantıksal sürücüsü için bir istemci " -"bulunamadı." - #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s'deki yapılandırma bulunamadı" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "" -"%(volumeName)s mantıksal sürücüsü için iSCSI dışa aktarımı bulunamadı." - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "%s mantıksal sürücü için iSCSI dışa aktarımı bulunamadı" @@ -1215,9 +1157,6 @@ msgstr "" "%(vol)s mantıksal sürücüsü için şu an eşleştirilmiş istemci %(group)s ile " "desteklenmeyen istemci grubunda." -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "ÖNERİLMİYOR: Cinder API v1 sürümünü dağıt." - msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." @@ -1246,15 +1185,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "Dedup lun'lar büyütülemez" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "" -"Kopyaları kaldırma etkinleştirici kurulu değil. Kopyaları kaldırılmış " -"mantıksal sürücü oluşturulamıyor" - -msgid "Default pool name if unspecified." -msgstr "Belirtilmezse öntanımlı havuz adı." - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1268,12 +1198,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "Öntanımlı mantıksal sürücü türü bulunamadı." -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "" -"Ortaya çıkarılan havuzlar ve onların ilişkili arka uç sorgu karakter " -"dizilerini tanımlar" - #, python-format msgid "" "Delete backup aborted, the backup service currently configured " @@ -1842,10 +1766,6 @@ msgstr "" "%(size)d ek GB'ye sahip %(space)s mantıksal sürücüsü için space-extend " "sırasında hata" -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "%(vol)s mantıksal sürücüsü eşleştirilirken hata. %(error)s." - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -1874,17 +1794,9 @@ msgstr "cgsnapshot %s silinirken hata oluştu." msgid "Error occurred when updating consistency group %s." msgstr "%s tutarlılık grubu güncellenirken hata oluştu." -#, python-format -msgid "Error parsing config file: %s" -msgstr "Yapılandırma dosyasını ayrıştırmada hata: %s" - msgid "Error promoting secondary volume to primary" msgstr "İkincil mantıksal sürücünün birincil hale getirilmesinde hata" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "%(vol)s mantıksal sürücüsü kaldırılırken hata. %(error)s." - #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "" @@ -1987,12 +1899,6 @@ msgstr "" msgid "Extend volume not implemented" msgstr "Mantıksal sürücü genişletme uygulanmadı" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "" -"FAST VP Etkinleştirici kurulu değil. Mantıksal sürücü için aşama ilkesi " -"ayarlanamıyor" - msgid "FAST is not supported on this array." msgstr "FAST bu dizi üzerinde desteklenmiyor." @@ -2044,11 +1950,6 @@ msgstr "" "Kaynak kilidi alma başarısız. (seri: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "" -"%(vol)s in %(sg)s e eklenmesi %(retries)s denemeden sonra başarısız oldu." - msgid "Failed to add the logical device." msgstr "Mantıksal aygıt ekleme başarısız." @@ -2106,9 +2007,6 @@ msgstr "Metadata mantıksal sürücüye kopyalanamadı: %(reason)s" msgid "Failed to create IG, %s" msgstr "IG oluşturma başarısız, %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "SolidFire İmaj-Mantıksal Sürücü oluşturulamadı" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Mantıksal Sürücü Grubu oluşturulamadı: %(vg_name)s" @@ -2201,9 +2099,6 @@ msgstr "Zamanlayıcı yönetici mantıksal sürücü akışı oluşturma başar msgid "Failed to create snapshot %s" msgstr "%s anlık sistem görüntüsü oluşturulamadı" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "LUN ID belirtilmediğinden anlık görüntü oluşturma başarısız" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "cg: %(cgName)s için anlık görüntü oluşturma başarısız." @@ -2370,10 +2265,6 @@ msgstr "" msgid "Failed to find host %s." msgstr "%s istemcisi bulunamadı." -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "%s kaynak mantıksal sürücüsü için depolama havuzu bulma başarısız." - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "[%s] hesabı için CloudByte hesap ayrıntıları alınamadı." @@ -3050,14 +2941,6 @@ msgstr "" msgid "Host %s has no FC initiators" msgstr "%s istemcisinin FC başlatıcısı yok" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "%s istemcisinin iSCSI başlatıcısı yok" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "'%s' istemcisi bulunamadı." - #, python-format msgid "Host group with name %s not found" msgstr "%s isimli istemci grubu bulunamadı" @@ -3066,9 +2949,6 @@ msgstr "%s isimli istemci grubu bulunamadı" msgid "Host group with ref %s not found" msgstr "%s başvurusuna sahip istemci grubu bulunamadı" -msgid "Host not found" -msgstr "İstemci bulunamadı" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Host bulunamadı.%(host)s uzerindeki %(service)s silinemiyor." @@ -3089,9 +2969,6 @@ msgstr "" msgid "ID" msgstr "KİMLİK" -msgid "IP address/hostname of Blockbridge API." -msgstr "Blockbridge API IP adresi/bilgisayar adı." - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" @@ -3172,9 +3049,6 @@ msgstr "%(uuid)s örneği bulunamadı." msgid "Insufficient privileges" msgstr "Yetersiz ayrıcalıklar" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "Ceph kümesi için bağlantılar arasındaki dahili değer (saniye cinsinde)" - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Geçersiz 3PAR Alanı: %(err)s" @@ -3204,10 +3078,6 @@ msgstr "" "%s mantıksal sürücüsü için QoS ilkesi alırken geçersiz QoS özellikleri " "algılandı" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "Geçersiz VNX kimlik doğrulama türü: %s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -3500,9 +3370,6 @@ msgstr "İş için beklenirken durumla karşılaşıldı." msgid "Issue encountered waiting for synchronization." msgstr "Eşzamanlama için beklenirken durumla karşılaşıldı." -msgid "Item not found" -msgstr "Öğe bulunamadı" - msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." @@ -3667,52 +3534,15 @@ msgstr "Bu mantıksal sürücü için metadata yedeği zaten var" msgid "Metadata backup object '%s' already exists" msgstr "Metadata yedek nesnesi '%s' zaten var" -msgid "Metadata item was not found" -msgstr "İçerik özelliği bilgisi bulunamadı" - -msgid "Metadata item was not found." -msgstr "Metadata öğesi bulunamadı." - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "Metadata özellik anahtarı %s 255 karakterden büyük" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "Metadata özellik anahtarı %s değeri 255 karakterden büyük" - -msgid "Metadata property key blank" -msgstr "Metadata özellik anahtarı boş" - msgid "Metadata property key blank." msgstr "Metadata özellik anahtarı boş." -msgid "Metadata property key greater than 255 characters." -msgstr "Metadata özellik anahtarı 255 karakterden büyük." - -msgid "Metadata property value greater than 255 characters." -msgstr "255 karakterden daha fazla metadata özellik değeri." - msgid "Metadata restore failed due to incompatible version" msgstr "Uyumsuz sürüm nedeniyle metadata geri yüklemesi başarısız oldu" msgid "Metadata restore failed due to incompatible version." msgstr "Metadata geri yüklemesi uyumsuz sürüm nedeniyle başarısız oldu." -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "%(src)s mantıksal sürücü göçü başarısız." - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"%(src)s kaynak mantıksal sürücüsü ile %(dst)s hedef mantıksal sürücü " -"arasında göç başarısız oldu." - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "LUN %s göçü durduruldu ya da arızalandı." - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -3740,9 +3570,6 @@ msgstr "İstek gövdesinde gerekli öge '%s' eksik." msgid "Missing required element 'consistencygroup' in request body." msgstr "İstek gövdesinde gerekli öge 'consistencygroup' eksik." -msgid "Missing required element 'host' in request body." -msgstr "İstek gövdesinde gerekli 'host' ögesi eksik." - msgid "Missing required element quota_class_set in request body." msgstr "İstek gövdesinde gerekli quota_class_set ögesi eksik." @@ -3835,9 +3662,6 @@ msgstr "İstekte salt okunur belirtilmelidir." msgid "Must specify storage pool name or id." msgstr "Depolama havuzu ismi veya id'si belirtmeli." -msgid "Must supply a positive value for age" -msgstr "Devir için pozitif bir değer verilmelidir" - msgid "Must supply a positive, non-zero value for age" msgstr "Yaş için pozitif, sıfırdan farklı bir değer sağlanmalı" @@ -4141,9 +3965,6 @@ msgstr "" "CloudByte depolamasında [%s] mantıksal sürücüsü oluşturulurken boş yanıt " "alındı." -msgid "Number of retries if connection to ceph cluster failed." -msgstr "Ceph kümesine bağlantı başarısız olursa tekrar deneme sayısı." - msgid "Object Count" msgstr "Nesne Sayısı" @@ -4186,11 +4007,6 @@ msgstr "gpfs_images_share_mode seçeneği doğru ayarlanmamış." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base seçeneği doğru ayarlanmamış." -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "" -"Blockbridge API sunucusuna bağlanmak için HTTPS bağlantı noktasının üzerine " -"yaz." - #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" @@ -4357,15 +4173,6 @@ msgstr "REST sunucu kullanıcı adı belirtilmelidir." msgid "Raid did not have MCS Channel." msgstr "Raid MCS Kanalına sahip değil." -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"max_luns_per_storage_group yapılandırma seçeneği ile ayarlanan " -"sınırlandırmaya eriş. %(vol)s in %(sg)s Depolama Grubuna eklenmesi " -"reddedildi." - #, python-format msgid "Received error string: %s" msgstr "Alınan hata: %s" @@ -4629,10 +4436,6 @@ msgstr "host %(host)s üzerindeki servis %(service)s silindi. " msgid "Service %(service_id)s could not be found." msgstr "%(service_id)s servisi bulunamadı." -#, python-format -msgid "Service %s not found." -msgstr "Servis %s bulunamadı." - msgid "Service is unavailable at this time." msgstr "Şu anda servis kullanılamıyor." @@ -4859,10 +4662,6 @@ msgstr "" msgid "Target volume type is still in use." msgstr "Hedef mantıksal sürücü türü hala kullanımda." -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "Tenant ID: %s bulunmuyor." - msgid "Terminate connection failed" msgstr "Bağlantı sonlandırma başarısız oldu" @@ -4944,10 +4743,6 @@ msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "Bitiş zamanı (%(end)s) başlangıç zamanından (%(start)s) sonra olmalıdır." -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec: %s geçersizdir." - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "extraspec: %(extraspec)s geçersizdir." @@ -4966,14 +4761,6 @@ msgstr "İstemci grubu ya da iSCSI hedefi bulunamadı." msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP kullanıcısı %(user)s mevcut değil." -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"İçe aktarılan lun %(lun_id)s %(host)s istemcisi tarafından yönetilmeyen " -"%(lun_pool)s havuzunda." - msgid "The key cannot be None." msgstr "Anahtar boş olamaz." @@ -5014,12 +4801,6 @@ msgstr "Sonuçlar geçersizdir." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "Disk bölümü bakım modunda iken anlık görüntü oluşturulamaz." -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "" -"Kaynak mantıksal sürücü %s mevcut istemci tarafından yönetilen havuzda değil." - msgid "The source volume for this WebDAV operation not found." msgstr "Bu WebDAV işlemi için kaynak mantıksal sürücü bulunamadı." @@ -5157,10 +4938,6 @@ msgstr "" msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "Kullanım için uygun kaynak yok. (kaynak: %(resource)s)" -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "%s'e ekli geçerli veridepoları yok." - msgid "There are no valid datastores." msgstr "Geçerli veri depoları yok." @@ -5208,11 +4985,6 @@ msgstr "Yapılandırılmış NFS yapılandırma dosyası yok (%s)" msgid "Thin provisioning not supported on this version of LVM." msgstr "LVM'in bu sürümünde ince hazırlık desteklenmiyor." -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "" -"ThinProvisioning Etkinleştirici kurulu değil. İnce mantıksal sürücü " -"oluşturulamıyor" - msgid "This driver does not support deleting in-use snapshots." msgstr "" "Bu sürücü kullanımdaki anlık sistem görüntülerinin silinmesini desteklemez." @@ -5250,13 +5022,6 @@ msgstr "" "%(id)s anlık sistem görüntüsünü silmek için Nova güncellemesi beklenirken " "zaman aşımı oluştu." -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"Ceph kümesine bağlanırken zaman aşımı değeri (saniye) kullanılır. Eğer değer " -"< 0 ise, zaman aşımı ayarlanmamıştır ve öntanımlı librados değeri kullanılır." - #, python-format msgid "Timeout while requesting %(service)s API." msgstr "API %(service)s istenirken zaman aşımı." @@ -5311,9 +5076,6 @@ msgstr "Cinder istemcisi %(space)s alanı için apphosts'a eklenemedi" msgid "Unable to connect or find connection to host" msgstr "Bağlanılamadı ya da istemci için bağlantı bulunamadı" -msgid "Unable to create Barbican Client without project_id." -msgstr "Barbican İstemcisi project_id olmadan oluşturulamadı." - #, python-format msgid "Unable to create consistency group %s" msgstr "%s tutarlılık grubu oluşturulamadı" @@ -5599,9 +5361,6 @@ msgstr "Bilinmeyen iletişim kuralı: %(protocol)s." msgid "Unknown quota resources %(unknown)s." msgstr "%(unknown)s bilinmeyen kota kaynakları." -msgid "Unknown service" -msgstr "Bilinmeyen servis" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Bilinmeyen sıralama yönü, 'desc' ya da 'asc'" @@ -5674,9 +5433,6 @@ msgstr "Kullanıcı ID" msgid "User does not have admin privileges" msgstr "Kullanıcı yönetici ayrıcalıklarına sahip değil" -msgid "User is not authorized to use key manager." -msgstr "Kullanıcı anahtar yöneticisi kullanarak yetkilendirilemez." - msgid "User not authorized to perform WebDAV operations." msgstr "Kullanıcı WebDAV işlemleri yapmaya yetkili değil." @@ -5866,12 +5622,6 @@ msgstr "Mantıksal Sürücü %s yok" msgid "Volume Type %(id)s already exists." msgstr "Mantıksal Sürücü Türü %(id)s zaten var." -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"%(type_id)s Mantıksal Sürücü Türü, %(id)s anahtarlı hiçbir ek özelliğe sahip " -"değil." - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -6092,10 +5842,6 @@ msgstr "Mantıksal sürücü türü boş olamaz." msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "%(volume_type_name)s adında mantıksal sürücü türü bulunamadı." -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "%s numaralı disk bölümü mevcut değildir." - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -6105,13 +5851,6 @@ msgstr "" "Büyütmeyi ancak bitiştirilmiş mantıksal sürücü üzerinde yapabilirsiniz. " "Çıkılıyor..." -#, python-format -msgid "Volume: %s could not be found." -msgstr "Disk bölümü: %s bulunamadı." - -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "Mantıksal sürücüler bu boyutta nesnelere bölünecek (megabayt olarak)." - #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." @@ -6380,9 +6119,6 @@ msgstr "" "create_volume_from_snapshot: Mantıksal sürücü oluşturma için anlık görüntü " "durumu \"kullanılabilir\" olmalı. Geçersiz durum: %s." -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot: Kaynak ve hedef boyutu farklı." - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." @@ -6390,9 +6126,6 @@ msgstr "" "create_volume_from_snapshot: Mantıksal sürücü boyutu anlık görüntü tabanlı " "mantıksal sürücüden farklı." -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "kopyasız sıkıştırma ve otomatik aşamalama aynı anda etkin olamaz." - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -6429,9 +6162,6 @@ msgstr "Uzak düğümden anlık görüntüyü çıkart." msgid "do_setup: No configured nodes." msgstr "do_setup: Yapılandırılmış düğüm yok." -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries 0'a eşit ya da daha büyük olmalıdır" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -6575,9 +6305,6 @@ msgstr "" msgid "key manager error: %(reason)s" msgstr "anahtar yönetici hatası: %(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "keymgr.fixed_key tanımlanamadı" - msgid "limit param must be an integer" msgstr "Sınır parametresi tam sayı olmak zorunda" @@ -6624,10 +6351,6 @@ msgstr "%s anlık sistem görüntüsü ile birden fazla kaynak bulundu" msgid "name cannot be None" msgstr "ad Hiçbiri olamaz" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path: NAVISECCLI araç %(path)s bulunamadı." - #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "drbdmanage içinde %s anlık sistem görüntüsü bulunamadı" @@ -6739,9 +6462,6 @@ msgstr "san_ip ayarlanmamış." msgid "san_ip must be set" msgstr "san_ip ayarlanmış olmalı" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip: Gerekli alan yapılandırması. san_ip ayarlanmamış." - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -6752,20 +6472,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() sadece bir kez çağrılabilir" -msgid "service not found" -msgstr "servis bulunamadı" - -msgid "snapshot does not exist" -msgstr "anlık sistem görüntüsü yok" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "anlık sistem görüntü kimliği:%s bulunamadı" - -#, python-format -msgid "source vol id:%s not found" -msgstr "kaynak mantıksal sürücü kimliği:%s bulunamadı" - #, python-format msgid "source volume id:%s is not replicated" msgstr "kaynak mantıksal sürücü kimliği:%s çoğaltılmamıştır" @@ -6839,9 +6545,6 @@ msgstr "mantıksal sürücü atandı" msgid "volume changed" msgstr "mantıksal sürücü değiştirildi" -msgid "volume does not exist" -msgstr "mantıksal sürücü yok" - msgid "volume is already attached" msgstr "mantıksal sürücü zaten ekli" diff --git a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po index 2365e5a83..5e9492e63 100644 --- a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po +++ b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po @@ -15,9 +15,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -258,9 +258,6 @@ msgstr "必须指定“status”。" msgid "'volume_id' must be specified" msgstr "必须指定“volume_id”" -msgid "'{}' object has no attribute '{}'" -msgstr "'{}' 对象没有属性 '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -408,21 +405,9 @@ msgstr "所指定的要管理的所有存储池都不存在。请检查配置。 msgid "An API version request must be compared to a VersionedMethod object." msgstr "必须将 API 版本请求与 VersionedMethod 对象进行比较。" -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver 中发生了错误。(原因:%(reason)s)" - msgid "An error has occurred during backup operation" msgstr "在备份过程中出现一个错误" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "尝试修改快照“%s”时发生了错误。" - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "查找卷“%s”时发生错误。" - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -520,16 +505,10 @@ msgstr "在 CloudByte 存储器中找不到认证组 [%s] 详细信息。" msgid "Auth user details not found in CloudByte storage." msgstr "在 CloudByte 存储器中找不到认证用户详细信息。" -msgid "Authentication error" -msgstr "认证错误" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "认证失败,请验证交换机凭证,错误代码:%s。" -msgid "Authorization error" -msgstr "授权错误" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "可用性区域“%(s_az)s”无效。" @@ -546,9 +525,6 @@ msgstr "在此存储器系列和 ONTAP 版本上,后端 QoS 规范不受支持 msgid "Backend doesn't exist (%(backend)s)" msgstr "后端不存在 (%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "后端已故障转移。无法故障返回。" - #, python-format msgid "Backend reports: %(message)s" msgstr "后端报告:%(message)s" @@ -559,9 +535,6 @@ msgstr "后端报告:项已存在" msgid "Backend reports: item not found" msgstr "后端报告:找不到项" -msgid "Backend server not NaServer." -msgstr "后端服务器不是 NaServer。" - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "后端服务重试超时匹配项:%(timeout)s 秒" @@ -652,10 +625,6 @@ msgstr "从存储卷后端 API 返回了不正确或意外的响应:%(data)s" msgid "Bad project format: project is not in proper format (%s)" msgstr "项目格式不正确:项目没有采用正确格式 (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "已将错误请求发送至 Datera 集群:无效参数:%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "来自 Datera API 的响应不正确" @@ -672,18 +641,6 @@ msgstr "二进制" msgid "Blank components" msgstr "空组件" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Blockbridge API 认证方案(令牌或密码)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Blockbridge API 密码(对于认证方案“密码”)" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Blockbridge API 令牌(对于认证方案“令牌”)" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Blockbridge API 用户(对于认证方案“密码”)" - msgid "Blockbridge api host not configured" msgstr "未配置 Blockbridge API 主机" @@ -794,9 +751,6 @@ msgstr "无法把 %s 转换成整数" msgid "Can't access 'scality_sofs_config': %s" msgstr "无法访问“scality_sofs_config”:%s" -msgid "Can't attach snapshot." -msgstr "无法附加快照。" - msgid "Can't decode backup record." msgstr "无法将备份记录解码。" @@ -889,10 +843,6 @@ msgid "" "status is not online." msgstr "无法将 LUN %s 导入至 Cinder。快照状态异常或运行状态并非“在线”。" -#, python-format -msgid "Can't open config file: %s" -msgstr "无法打开配置文件 %s" - msgid "Can't parse backup record." msgstr "无法解析备份记录。" @@ -959,11 +909,6 @@ msgstr "无法更改 VF 上下文,指定的 VF 在管理 VF 列表 %(vf_list)s msgid "Cannot connect to ECOM server." msgstr "无法连接至 ECOM 服务器。" -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "无法根据大小为 %(src_vol_size)s 的卷创建大小为 %(vol_size)s 的克隆" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1008,11 +953,6 @@ msgstr "" msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "无法创建或找到名称为 %(sgGroupName)s 的存储器组。" -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "无法根据大小为 %(snap_size)s 的快照创建大小为 %(vol_size)s 的卷" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "无法创建大小为 %s 的卷:该大小不是 8GB 的倍数。" @@ -1301,10 +1241,6 @@ msgstr "未配置 Coho rpc 端口" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "命令 %(cmd)s 在 CLI 中被阻塞,并且已取消" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition:%s 超时" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition:%s 超时。" @@ -1453,18 +1389,10 @@ msgstr "找不到 GPFS 集群标识:%s。" msgid "Could not find GPFS file system device: %s." msgstr "找不到 GPFS 文件系统设备:%s。" -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "对于类型为 %(type_id)s 的卷 %(volume_id)s,找不到主机。" - #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 找不到配置文件。" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "对于卷 %(volumeName)s,找不到 iSCSI 导出。" - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "对于卷 %s,找不到 iSCSI 导出" @@ -1545,15 +1473,6 @@ msgstr "" "备份创建已异常中止,需要的卷状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" -msgid "Create consistency group failed." -msgstr "创建一致性组失败。" - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "不支持从映像 %(image)s 创建类型为 %(type)s的加密卷。" - msgid "Create export for volume failed." msgstr "为卷创建导出失败。" @@ -1633,12 +1552,6 @@ msgid "" "%(group)s." msgstr "卷 %(vol)s 的当前已映射的主机位于具有 %(group)s 的不受支持的主机组中。" -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "建议不要使用:请部署 Cinder API V1。" - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "已不推荐使用:Cinder API 的 Deploy v2。" - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1709,13 +1622,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "无法扩展 Dedup lun" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "未安装去重启用程序。无法创建去重卷" - -msgid "Default pool name if unspecified." -msgstr "缺省池名称(如果未指定)。" - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1728,10 +1634,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "找不到缺省卷类型。" -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "定义一组公开池及其相关联的后端查询字符串" - msgid "Delete LUNcopy error." msgstr "删除 LUNcopy 时发生错误。" @@ -1814,9 +1716,6 @@ msgstr "直接连接不支持 Dell Cinder 驱动程序配置错误复制。" msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "找不到 Dell Cinder 驱动程序配置错误 replication_device %s" -msgid "Deploy v3 of the Cinder API." -msgstr "Cinder API 的 Deploy v3。" - msgid "Describe-resource is admin only functionality" msgstr "Describe-resource是只有管理员才能执行的功能。" @@ -1866,11 +1765,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "驱动程序初始化连接失败(错误:%(err)s)。" -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "驱动程序无法转型,因为卷 (LUN {}) 的快照被禁止迁移。" - msgid "Driver must implement initialize_connection" msgstr "驱动程序必须实现 initialize_connection" @@ -2319,10 +2213,6 @@ msgstr "" "将存储器组 %(storageGroupName)s 关联至快速策略 %(fastPolicyName)s 时出错,错" "误描述:%(errordesc)s。" -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "连接卷 %s 时出错。可能已到达目标限制!" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2483,10 +2373,6 @@ msgstr "对卷 %(space)s 进行空间扩充,额外扩充 %(size)d GB 时出错 msgid "Error managing volume: %s." msgstr "管理卷 %s 时出错。" -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "映射卷 %(vol)s 时出错。%(error)s。" - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2528,17 +2414,9 @@ msgstr "删除 cg 快照 %s 时发生了错误。" msgid "Error occurred when updating consistency group %s." msgstr "更新一致性组 %s 时发生了错误。" -#, python-format -msgid "Error parsing config file: %s" -msgstr "解析配置文件 %s 时出错" - msgid "Error promoting secondary volume to primary" msgstr "将辅助卷升级为主卷时出错" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "移除卷 %(vol)s 时出错。%(error)s。" - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "重命名卷 %(vol)s 时出错:%(err)s。" @@ -2795,10 +2673,6 @@ msgstr "不存在任何快照时,扩展卷仅受此驱动程序支持。" msgid "Extend volume not implemented" msgstr "扩展卷未实现" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "未安装 FAST VP 启用程序。无法对该卷设置分层策略" - msgid "FAST is not supported on this array." msgstr "快速策略在此阵列上不受支持。" @@ -2863,10 +2737,6 @@ msgstr "" "未能获取资源锁定。(序列为 %(serial)s,实例为 %(inst)s,返回为 %(ret)s,标准" "错误为 %(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "%(retries)s 次尝试后将 %(vol)s 添加至 %(sg)s 失败。" - msgid "Failed to add the logical device." msgstr "未能添加逻辑设备。" @@ -2945,9 +2815,6 @@ msgstr "未能从快照 %(cgSnapshot)s 创建 CG %(cgName)s。" msgid "Failed to create IG, %s" msgstr "未能创建映像 %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "未能创建 SolidFire 映像卷" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "未能创建卷组: %(vg_name)s" @@ -3051,9 +2918,6 @@ msgstr "未能创建调度程序管理器卷流" msgid "Failed to create snapshot %s" msgstr "未能创建快照 %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "未能创建快照,因为没有指定任何 LUN 标识" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "未能针对 cg %(cgName)s 创建快照。" @@ -3186,9 +3050,6 @@ msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "未能确保快照资源区域,找不到标识 %s 的卷" -msgid "Failed to establish SSC connection." -msgstr "未能建立 SSC 连接。" - msgid "Failed to establish connection with Coho cluster" msgstr "无法建立与 Coho 集群的连接。" @@ -3238,10 +3099,6 @@ msgstr "未能找到主机 %s。" msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "找不到包含 %(initiator)s 的 iSCSI 启动程序组。" -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "找不到源卷 %s 的存储池。" - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "对于帐户 [%s],未能获取 CloudByte 帐户详细信息。" @@ -3444,21 +3301,6 @@ msgid "" "was not a floating-point number." msgstr "未能管理现有卷 %(name)s,因为已报告的大小 %(size)s 不是浮点数。" -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "未能管理现有卷,因为所选卷类型的池与传入卷引用的 NFS 共享不匹配。" - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "未能管理现有卷,因为所选卷类型的池与传入卷引用的文件系统不匹配。" - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "未能管理现有卷,因为所选卷类型的池与主机的池不匹配。" - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -3760,9 +3602,6 @@ msgstr "查找主机 LUN 标识时发生错误。" msgid "Find lun group from mapping view error." msgstr "从映射视图查找 LUN 组时发生错误。" -msgid "Find lun number error." -msgstr "查找 LUN 号时发生错误。" - msgid "Find mapping view error." msgstr "查找映射视图时发生错误。" @@ -4104,9 +3943,6 @@ msgstr "从 DRBDmanage 中获得了错误路径信息!(%s)" msgid "HBSD error occurs." msgstr "发生 HBSD 错误。" -msgid "HNAS has disconnected SSC" -msgstr "HNAS 已断开 SSC " - msgid "HPELeftHand url not found" msgstr "找不到 HPELeftHand URL" @@ -4143,14 +3979,6 @@ msgstr "主机 %(host)s 与 x509 证书内容不匹配:CommonName %(commonName msgid "Host %s has no FC initiators" msgstr "主机 %s 没有 FC 启动程序" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "主机 %s 没有 iSCSI 启动程序" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "找不到主机“%s”。" - #, python-format msgid "Host group with name %s not found" msgstr "找不到名称为 %s 的主机组" @@ -4165,9 +3993,6 @@ msgstr "主机未冻结。" msgid "Host is already Frozen." msgstr "主机已冻结。" -msgid "Host not found" -msgstr "没有找到主机" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "找不到主机。未能在 %(host)s 上移除 %(service)s。" @@ -4194,9 +4019,6 @@ msgstr "I/O 组 %(iogrp)d 无效;可用的 I/O 组为 %(avail)s。" msgid "ID" msgstr "ID" -msgid "IP address/hostname of Blockbridge API." -msgstr "Blockbridge API 的 IP 地址/主机名。" - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "如果 compression 设置为 True,那么还必须设置 rsize(不等于 -1)。" @@ -4283,12 +4105,6 @@ msgstr "" "Infortrend CLI 异常:%(err)s 参数:%(param)s(返回码:%(rc)s)(输出:" "%(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "初始层:{},策略:{} 无效。" - -msgid "Input type {} is not supported." -msgstr "不支持输入类型 {}。" - msgid "Input volumes or snapshots are invalid." msgstr "输入卷或快照无效。" @@ -4305,13 +4121,6 @@ msgstr "可用空间不足,无法扩展卷。" msgid "Insufficient privileges" msgstr "特权不足" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "连接至 ceph 集群的连接重试之间的时间间隔值(秒)。" - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "为 io_port_list 指定了无效 %(protocol)s 端口 %(port)s。" - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "3PAR 域无效:%(err)s" @@ -4354,10 +4163,6 @@ msgstr "获取卷 %s 的 QoS 策略时,检测到无效 QoS 规范" msgid "Invalid Replication Target: %(reason)s" msgstr "无效复制目标:%(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "无效 VNX 认证类型:%s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4400,14 +4205,6 @@ msgstr "认证密钥无效:%(reason)s" msgid "Invalid backup: %(reason)s" msgstr "备份无效:%(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"无效 barbican api URL:需要版本,例如,“http[s]://|[:port]/" -"”,指定的 URL 为:%s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "在 CloudByte 存储器中找到了无效 chap 用户详细信息。" @@ -4572,10 +4369,6 @@ msgstr "指定的存储池 %s 无效。" msgid "Invalid storage pool is configured." msgstr "配置了无效存储池。" -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "指定了无效同步方式,允许的方式为 %s。" - msgid "Invalid transport type." msgstr "无效传输类型。" @@ -4583,14 +4376,6 @@ msgstr "无效传输类型。" msgid "Invalid update setting: '%s'" msgstr "无效的更新设置:'%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"URL 无效:必须为“http[s]://|[:port]/”格式,指定的 URL " -"为:%s" - #, python-format msgid "Invalid value '%s' for force." msgstr "值“%s”对于 force 无效。" @@ -4727,9 +4512,6 @@ msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "发出故障转移失败,因为未正确配置复制。" -msgid "Item not found" -msgstr "条目没有找到" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "在 CloudByte 的创建卷 [%s] 响应中找不到作业标识。" @@ -4757,9 +4539,6 @@ msgstr "卷 %s 没有 LU" msgid "LUN export failed!" msgstr "LUN 导出失败!" -msgid "LUN id({}) is not valid." -msgstr "LUN 标识 ({}) 无效。" - msgid "LUN map overflow on every channel." msgstr "LUN 映射在每个通道上溢出。" @@ -4767,9 +4546,6 @@ msgstr "LUN 映射在每个通道上溢出。" msgid "LUN not found with given ref %s." msgstr "找不到具有给定引用 %s 的 LUN。" -msgid "LUN number ({}) is not an integer." -msgstr "LUN 编号 ({}) 并非整数。" - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 号超出了通道标识 %(ch_id)s 的范围。" @@ -4942,53 +4718,15 @@ msgstr "对于此卷,已存在元数据备份" msgid "Metadata backup object '%s' already exists" msgstr "元数据备份对象“%s”已存在" -msgid "Metadata item was not found" -msgstr "元数据项目未找到" - -msgid "Metadata item was not found." -msgstr "找不到元数据项。" - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "元数据属性关键字 %s 超过 255 个字符" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "元数据属性关键字 %s 值超过 255 个字符" - -msgid "Metadata property key blank" -msgstr "元数据属性关键字为空白" - msgid "Metadata property key blank." msgstr "元数据属性关键字为空白。" -msgid "Metadata property key greater than 255 characters." -msgstr "元数据属性关键字超过 255 个字符。" - -msgid "Metadata property value greater than 255 characters." -msgstr "元数据属性值超过 255 个字符。" - msgid "Metadata restore failed due to incompatible version" msgstr "由于版本不兼容,元数据复原失败" msgid "Metadata restore failed due to incompatible version." msgstr "由于版本不兼容,元数据复原失败。" -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "迁移卷 %(src)s 失败。" - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "在源卷 %(src)s 与目标卷 %(dst)s 之间迁移卷失败。" - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "LUN %s 的迁移已停止或者发生故障。" - -msgid "MirrorView/S enabler is not installed." -msgstr "未安装 MirrorView/S 启用程序。" - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5014,9 +4752,6 @@ msgstr "请求主体中缺少必需元素“%s”。" msgid "Missing required element 'consistencygroup' in request body." msgstr "请求主体中缺少必需元素“consistencygroup”。" -msgid "Missing required element 'host' in request body." -msgstr "请求主体中缺少必需元素“host”。" - msgid "Missing required element quota_class_set in request body." msgstr "在请求主体中缺少必需元素 quota_class_set。" @@ -5130,9 +4865,6 @@ msgstr "必须指定存储池名称或标识。" msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "必须指定存储库。选项:sio_storage_pools。" -msgid "Must supply a positive value for age" -msgstr "必须为 age 提供正值" - msgid "Must supply a positive, non-zero value for age" msgstr "必须为时效提供非零正值" @@ -5502,9 +5234,6 @@ msgstr "" "在 CloudByte 存储器中查询基于 [%(operation)s] 的作业[%(job)s] 时接收到空响" "应。" -msgid "Number of retries if connection to ceph cluster failed." -msgstr "连接至 ceph 集群失败时的重试次数。" - msgid "Object Count" msgstr "对象计数" @@ -5559,16 +5288,10 @@ msgstr "选项 gpfs_images_share_mode 未正确设置。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "选项 gpfs_mount_point_base 未正确设置。" -msgid "Option map (cls._map) is not defined." -msgstr "未定义选项映射 (cls._map)。" - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "始发 %(res)s %(prop)s 必须为其中一个“%(vals)s”值" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "覆盖 HTTPS 端口以连接至 Blockbridge API 服务器。" - #, python-format msgid "ParseException: %s" msgstr "ParseException:%s" @@ -5802,14 +5525,6 @@ msgstr "PRC 服务器响应不完整" msgid "Raid did not have MCS Channel." msgstr "RAID 不具备 MCS 通道。" -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"达到配置选项 max_luns_per_storage_group 设置的限制。将 %(vol)s 添加至存储器" -"组 %(sg)s 的操作被拒绝。" - #, python-format msgid "Received error string: %s" msgstr "接收到错误字符串:%s" @@ -5981,9 +5696,6 @@ msgstr "找不到必需的配置选项" msgid "Required flag %s is not set" msgstr "未设置必需标记 %s" -msgid "Requires an NaServer instance." -msgstr "需要 NaServer 实例。" - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6166,10 +5878,6 @@ msgstr "在主机 %(host)s 上找不到服务 %(service_id)s。" msgid "Service %(service_id)s could not be found." msgstr "服务 %(service_id)s 没有找到。" -#, python-format -msgid "Service %s not found." -msgstr "找不到服务 %s。" - msgid "Service is too old to fulfil this request." msgstr "服务太旧,无法实现此请求。" @@ -6247,10 +5955,6 @@ msgstr "快照 %(snapshot_id)s 没有找到。" msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "快照 %(snapshot_id)s 没有任何具有键 %(metadata_key)s 的元数据。" -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "快照 %s 不能属于某个一致性组。" - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "快照“%s”在阵列上不存在。" @@ -6275,9 +5979,6 @@ msgstr "不支持对处于以下状态的卷生成快照:%s。" msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "未在任何位置部署的快照资源“%s”?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "快照大小必须是 1 GB 的倍数。" - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "对于 update_snapshot_status,不允许快照状态 %(cur)s" @@ -6420,9 +6121,6 @@ msgstr "未设置存储系统标识。" msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "找不到池 %(poolNameInStr)s 的存储系统。" -msgid "Storage-assisted migration failed during manage volume." -msgstr "管理卷期间,存储器辅助进行的迁移失败。" - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "找不到存储系统 %(array)s。" @@ -6467,10 +6165,6 @@ msgstr "在帐户 [%(account)s] 的 CloudByte 存储器中找不到 TSM [%(tsm)s msgid "Target volume type is still in use." msgstr "目标卷类型仍在使用中。" -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "租户标识 %s 不存在。" - msgid "Terminate connection failed" msgstr "终止连接发生故障" @@ -6558,10 +6252,6 @@ msgstr "路径%(path)s 指向的设备不可用:%(reason)s" msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "结束时间 (%(end)s) 必须在开始时间 (%(start)s) 之后。" -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec %s 无效。" - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "额外规范 %(extraspec)s 无效。" @@ -6607,14 +6297,6 @@ msgstr "主机未准备好故障返回。请重新同步卷并在 Storwize 后 msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP 用户 %(user)s 不存在。" -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"已导入的 LUN %(lun_id)s 位于池 %(lun_pool)s 中,该池并非由主机 %(host)s 管" -"理。" - msgid "The key cannot be None." msgstr "键不能为“无”。" @@ -6689,11 +6371,6 @@ msgstr "保留计数不得高于 %s。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "当卷处于维护方式时,无法创建快照。" -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "源卷 %s 不在由当前主机管理的池中。" - msgid "The source volume for this WebDAV operation not found." msgstr "找不到此 WebDAV 操作的源卷。" @@ -6840,10 +6517,6 @@ msgstr "不存在任何可供使用的资源。(资源:%(resource)s)" msgid "There are no valid ESX hosts." msgstr "不存在有效的 ESX 主机。" -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "不存在任何已连接至 %s 的有效数据存储器。" - msgid "There are no valid datastores." msgstr "不存在任何有效数据存储器。" @@ -6929,9 +6602,6 @@ msgstr "未配置 Quobyte 卷 (%s)。示例:quobyte:/// msgid "Thin provisioning not supported on this version of LVM." msgstr "在此版本的 LVM 上,不支持瘦供应。" -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "未安装瘦供应启用程序。无法创建瘦卷" - msgid "This driver does not support deleting in-use snapshots." msgstr "此驱动程序不支持对正在使用的快照进行删除。" @@ -6960,13 +6630,6 @@ msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "等待 Nova 更新(以便删除快照 %(id)s)时超时。" -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"连接至 CEPH 集群时使用的超时值(以秒计)。如果值小于 0,那么不会设置超时并且" -"会使用缺省 librados 值。" - #, python-format msgid "Timeout while calling %s " msgstr "调用 %s 时超时 " @@ -7046,9 +6709,6 @@ msgstr "无法完成 %s 的故障转移。" msgid "Unable to connect or find connection to host" msgstr "无法连接至主机,或找不到与主机的连接" -msgid "Unable to create Barbican Client without project_id." -msgstr "不具备 project_id,无法创建 Barbican 客户机。" - #, python-format msgid "Unable to create consistency group %s" msgstr "无法创建一致性组 %s" @@ -7140,9 +6800,6 @@ msgstr "" "无法使用 Purity REST API 版本 %(api_version)s 执行复制,需要 " "%(required_versions)s 的其中之一。" -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "无法同时启用复制和 snapcopy。" - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "无法建立与 Storwize 集群 %s 的伙伴关系。" @@ -7492,9 +7149,6 @@ msgstr "未知协议:%(protocol)s。" msgid "Unknown quota resources %(unknown)s." msgstr "配额资源 %(unknown)s 未知。" -msgid "Unknown service" -msgstr "未知服务" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "排序方向未知,必须为“降序”或“升序”" @@ -7600,9 +7254,6 @@ msgstr "用户ID" msgid "User does not have admin privileges" msgstr "用户没有管理员权限" -msgid "User is not authorized to use key manager." -msgstr "用户无权使用密钥管理器。" - msgid "User not authorized to perform WebDAV operations." msgstr "用户无权执行 WebDAV 操作。" @@ -7810,12 +7461,6 @@ msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "卷 %s 已联机。将该卷设置为脱机以便使用 OpenStack 进行管理。" -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "卷 %s 不能正在迁移、已附加、属于某个一致性组或具有快照。" - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "卷 %s 不得是一致性组的一部分。" @@ -7844,10 +7489,6 @@ msgstr "卷组 %s 不存在" msgid "Volume Type %(id)s already exists." msgstr "卷类型 %(id)s 已存在。" -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "卷类型 %(type_id)s 没有任何具有键 %(id)s 的额外规范。" - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8030,9 +7671,6 @@ msgstr "自从最近一次备份以来,卷大小已增加。请执行完全备 msgid "Volume size must be a multiple of 1 GB." msgstr "卷大小必须为 1 GB 的倍数。" -msgid "Volume size must be multiple of 1 GB." -msgstr "卷大小必须为 1 GB 的倍数。" - msgid "Volume size must multiple of 1 GB." msgstr "卷大小必须是 1 GB 的倍数。" @@ -8101,10 +7739,6 @@ msgstr "卷类型名称不能为 空." msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "卷标识为 %s 的卷不存在。" - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -8115,17 +7749,10 @@ msgstr "卷 %(volumeName)s 不是并置卷。只能对并置卷执行扩展。 msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "卷 %(volumeName)s 未添加至存储器组 %(sgGroupName)s。" -#, python-format -msgid "Volume: %s could not be found." -msgstr "找不到卷 %s。" - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "卷 %s 已由 Cinder 管理。" -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "卷将分块为此大小(以兆字节计)的对象。" - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "卷/帐户同时超出主 SolidFire 帐户和辅助 SolidFire 帐户的限制。" @@ -8726,13 +8353,6 @@ msgstr "" "create_consistencygroup_from_src 仅支持 cgsnapshot 源或一致性组源。不能使用多" "个源。" -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src 支持 cgsnapshot 源或一致性组源。不能使用多个" -"源。" - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy:源 vdisk %(src)s (%(src_id)s) 不存在。" @@ -8832,17 +8452,11 @@ msgstr "" "create_volume_from_snapshot:快照状态必须为“可用”,以便创建卷。无效状态为 " "%s。" -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot:源和目标大小不同。" - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "create_volume_from_snapshot:卷大小不同于基于快照的卷。" -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "不能同时启用去重和自动分层。" - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -8884,9 +8498,6 @@ msgstr "从远程节点拆离快照" msgid "do_setup: No configured nodes." msgstr "do_setup:不存在任何已配置的节点。" -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries 必须大于或等于 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -9073,21 +8684,12 @@ msgstr "iscsiadm 执行失败。" msgid "key manager error: %(reason)s" msgstr "发生密钥管理器错误:%(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "未定义 keymgr.fixed_key" - msgid "limit param must be an integer" msgstr "limit 参数必须是整数" msgid "limit param must be positive" msgstr "limit参数必须是正数" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"manage_existing 无法管理连接至主机的卷。请断开此卷与现有主机的连接,然后导入" - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing 需要“name”键以标识现有卷。" @@ -9130,10 +8732,6 @@ msgstr "找到多个快照标识为 %s 的资源" msgid "name cannot be None" msgstr "name不能是None" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path:找不到 NAVISECCLI 工具 %(path)s。" - #, python-format msgid "no REPLY but %r" msgstr "无回复,但收到 %r" @@ -9196,14 +8794,6 @@ msgstr "找不到 rados 和 rbd python 库" msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是“no”、“yes”或“only”其中一项,而不能是 %r" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "应在后端上配置 replication_device:%s。" - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "缺少 backend_id 为 [%s] 的 replication_device。" - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover 失败。找不到 %s。" @@ -9258,9 +8848,6 @@ msgstr "未设置 san_ip。" msgid "san_ip must be set" msgstr "san_ip必须设置" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip:必需字段配置。未设置 san_ip。" - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -9271,16 +8858,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "serve() 只能调用一次" -msgid "service not found" -msgstr "找不到服务" - -msgid "snapshot does not exist" -msgstr "快照不存在" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "找不到快照标识 %s" - #, python-format msgid "snapshot-%s" msgstr "快照 - %s" @@ -9291,10 +8868,6 @@ msgstr "已更改快照" msgid "snapshots changed" msgstr "已更改快照" -#, python-format -msgid "source vol id:%s not found" -msgstr "找不到源卷标识 %s" - #, python-format msgid "source volume id:%s is not replicated" msgstr "未复制源卷标识 %s" @@ -9389,9 +8962,6 @@ msgstr "卷已分配" msgid "volume changed" msgstr "卷已更改" -msgid "volume does not exist" -msgstr "卷不存在" - msgid "volume is already attached" msgstr "卷已连接" @@ -9407,9 +8977,6 @@ msgstr "卷大小 %(volume_size)d 太小,无法复原大小为 %(size)d 的备 msgid "volume size %d is invalid." msgstr "卷大小 %d 无效。" -msgid "volume_type cannot be None" -msgstr "volume_type 不能为 None" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "在一致性组中创建卷时,必须提供 volume_type。" @@ -9440,6 +9007,3 @@ msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "zfssa_manage_policy 属性需要设置为“strict”或“loose”。当前值为:%s。" - -msgid "{} is not a valid option." -msgstr "{} 是无效选项。" diff --git a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po index e5c58ca2b..8500c3784 100644 --- a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po +++ b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: cinder 9.0.0.0b2.dev1\n" +"Project-Id-Version: cinder 9.0.0.0b3.dev544\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-01 22:30+0000\n" +"POT-Creation-Date: 2016-09-01 04:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -250,9 +250,6 @@ msgstr "必須指定 'status'。" msgid "'volume_id' must be specified" msgstr "必須指定 'volume_id'" -msgid "'{}' object has no attribute '{}'" -msgstr "'{}' 物件不含屬性 '{}'" - #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " @@ -400,21 +397,9 @@ msgstr "要管理的所有指定儲存區都不存在。請檢查配置。不存 msgid "An API version request must be compared to a VersionedMethod object." msgstr "API 版本要求必須與 VersionedMethod 物件進行比較。" -#, python-format -msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver 發生錯誤。(原因:%(reason)s)" - msgid "An error has occurred during backup operation" msgstr "執行備份作業期間發生錯誤" -#, python-format -msgid "An error occured while attempting to modifySnapshot '%s'." -msgstr "嘗試修改 Snapshot '%s' 時發生錯誤。" - -#, python-format -msgid "An error occured while seeking for volume \"%s\"." -msgstr "探查磁區 \"%s\" 時發生錯誤。" - #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " @@ -511,16 +496,10 @@ msgstr "在 CloudByte 儲存體中找不到鑑別群組 [%s] 詳細資料。" msgid "Auth user details not found in CloudByte storage." msgstr "在 CloudByte 儲存體中找不到鑑別使用者詳細資料。" -msgid "Authentication error" -msgstr "鑑別錯誤" - #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "鑑別失敗,請驗證交換器認證,錯誤碼:%s。" -msgid "Authorization error" -msgstr "授權錯誤" - #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "可用性區域 '%(s_az)s' 無效。" @@ -537,9 +516,6 @@ msgstr "後端服務品質規格在此儲存體系列和 ONTAP 版本上不受 msgid "Backend doesn't exist (%(backend)s)" msgstr "後端不存在 (%(backend)s)" -msgid "Backend has already been failed over. Unable to fail back." -msgstr "後端已經失效接手。無法失效回復。" - #, python-format msgid "Backend reports: %(message)s" msgstr "後端報告:%(message)s" @@ -550,9 +526,6 @@ msgstr "後端報告:項目已存在" msgid "Backend reports: item not found" msgstr "後端報告:找不到項目" -msgid "Backend server not NaServer." -msgstr "後端伺服器不是 NaServer。" - #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "後端服務重試逾時命中:%(timeout)s 秒" @@ -643,10 +616,6 @@ msgstr "來自儲存磁區後端 API 的回應錯誤或不符合預期:%(data) msgid "Bad project format: project is not in proper format (%s)" msgstr "專案格式不當:專案未採取適當格式 (%s)" -#, python-format -msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" -msgstr "傳送至 Datera 叢集的要求不當:無效的引數:%(args)s | %(message)s" - msgid "Bad response from Datera API" msgstr "Datera API 傳回錯誤的回應" @@ -663,18 +632,6 @@ msgstr "二進位" msgid "Blank components" msgstr "空白元件" -msgid "Blockbridge API authentication scheme (token or password)" -msgstr "Blockbridge API 鑑別方法(記號或密碼)" - -msgid "Blockbridge API password (for auth scheme 'password')" -msgstr "Blockbridge API 密碼(適用於鑑別方法「密碼」)" - -msgid "Blockbridge API token (for auth scheme 'token')" -msgstr "Blockbridge API 記號(適用於鑑別方法「記號」)" - -msgid "Blockbridge API user (for auth scheme 'password')" -msgstr "Blockbridge API 使用者(適用於鑑別方法「密碼」)" - msgid "Blockbridge api host not configured" msgstr "未配置 Blockbridge API 主機" @@ -785,9 +742,6 @@ msgstr "無法將 %s 轉換為整數。" msgid "Can't access 'scality_sofs_config': %s" msgstr "無法存取 'scality_sofs_config':%s" -msgid "Can't attach snapshot." -msgstr "無法連接 Snapshot。" - msgid "Can't decode backup record." msgstr "無法將備份記錄解碼。" @@ -882,10 +836,6 @@ msgstr "" "無法將 Snapshot %s 匯入 Cinder。Snapshot 未處於正常狀態,或者執行中狀態不在線" "上。" -#, python-format -msgid "Can't open config file: %s" -msgstr "無法開啟配置檔 %s" - msgid "Can't parse backup record." msgstr "無法剖析備份記錄。" @@ -956,11 +906,6 @@ msgstr "" msgid "Cannot connect to ECOM server." msgstr "無法連接至 ECOM 伺服器。" -#, python-format -msgid "" -"Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" -msgstr "無法從大小為 %(src_vol_size)s 的磁區建立大小為 %(vol_size)s 的複本" - #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " @@ -1005,11 +950,6 @@ msgstr "" msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "無法建立或找不到名稱為 %(sgGroupName)s 的儲存體群組。" -#, python-format -msgid "" -"Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" -msgstr "無法從大小為 %(snap_size)s 的 Snapshot 建立大小為 %(vol_size)s 的磁區" - #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "無法建立大小為 %s 的磁區:不是 8 GB 的倍數。" @@ -1302,10 +1242,6 @@ msgstr "未配置 Coho RPC 埠" msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "指令 %(cmd)s 在 CLI 中遭到封鎖,且已取消" -#, python-format -msgid "CommandLineHelper._wait_for_a_condition: %s timeout" -msgstr "CommandLineHelper._wait_for_a_condition:%s 逾時" - #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition:%s 逾時。" @@ -1455,18 +1391,10 @@ msgstr "找不到 GPFS 叢集 ID:%s。" msgid "Could not find GPFS file system device: %s." msgstr "找不到 GPFS 檔案系統裝置:%s。" -#, python-format -msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." -msgstr "找不到類型為 %(type_id)s 之磁區 %(volume_id)s 的主機。" - #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 處找不到配置" -#, python-format -msgid "Could not find iSCSI export for volume %(volumeName)s." -msgstr "找不到磁區 %(volumeName)s 的 iSCSI 匯出。" - #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "找不到磁區 %s 的 iSCSI 匯出" @@ -1545,15 +1473,6 @@ msgid "" msgstr "" "已中斷建立備份,預期磁區狀態 %(expected_status)s,但取得 %(actual_status)s。" -msgid "Create consistency group failed." -msgstr "建立一致性群組失敗。" - -#, python-format -msgid "" -"Create encrypted volumes with type %(type)s from image %(image)s is not " -"supported." -msgstr "不支援從映像檔 %(image)s 建立類型為 %(type)s 的已加密磁區。" - msgid "Create export for volume failed." msgstr "針對磁區建立匯出失敗。" @@ -1633,12 +1552,6 @@ msgid "" "%(group)s." msgstr "磁區 %(vol)s 目前對映的主機位於不受支援的主機群組%(group)s 中。" -msgid "DEPRECATED: Deploy v1 of the Cinder API." -msgstr "已淘汰:部署 Cinder API 的第 1 版。" - -msgid "DEPRECATED: Deploy v2 of the Cinder API." -msgstr "已淘汰:部署 Cinder API 的第 2 版。" - #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " @@ -1714,13 +1627,6 @@ msgstr "" msgid "Dedup luns cannot be extended" msgstr "無法延伸 Dedup LUN" -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume" -msgstr "未安裝「刪除重複啟用程式」。無法建立已刪除的重複磁區" - -msgid "Default pool name if unspecified." -msgstr "如果未指定,則將使用預設儲存區名稱。" - #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" @@ -1733,10 +1639,6 @@ msgstr "" msgid "Default volume type can not be found." msgstr "找不到預設磁區類型。" -msgid "" -"Defines the set of exposed pools and their associated backend query strings" -msgstr "定義公開儲存區集及其相關聯的後端查詢字串" - msgid "Delete LUNcopy error." msgstr "刪除 LUNcopy 時發生錯誤。" @@ -1819,9 +1721,6 @@ msgstr "Dell Cinder 驅動程式配置錯誤,直接連接不支援抄寫。" msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "Dell Cinder 驅動程式配置錯誤,找不到 replication_device %s。" -msgid "Deploy v3 of the Cinder API." -msgstr "部署 Cinder API 的第 3 版。" - msgid "Describe-resource is admin only functionality" msgstr "Describe-resource 是管理者專用功能" @@ -1871,12 +1770,6 @@ msgstr "" msgid "Driver initialize connection failed (error: %(err)s)." msgstr "驅動程式起始設定連線失敗(錯誤:%(err)s)。" -msgid "" -"Driver is not able to do retype because the volume (LUN {}) has snapshot " -"which is forbidden to migrate." -msgstr "" -"驅動程式無法執行 Retype 動作,因為磁區 (LUN {}) 具有已禁止移轉的 Snapshot。" - msgid "Driver must implement initialize_connection" msgstr "驅動程式必須實作 initialize_connection" @@ -2343,10 +2236,6 @@ msgstr "" "將儲存體群組 %(storageGroupName)s 與下列 FAST 原則建立關聯時發生錯誤:" "%(fastPolicyName)s,錯誤說明:%(errordesc)s。" -#, python-format -msgid "Error attaching volume %s. Target limit might be reached!" -msgstr "連接磁區 %s 時發生錯誤。可能已達到目標限制!" - #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " @@ -2510,10 +2399,6 @@ msgstr "將磁區 %(space)s 的空間額外延伸 %(size)d GB 時發生錯誤" msgid "Error managing volume: %s." msgstr "管理磁區時發生錯誤:%s。" -#, python-format -msgid "Error mapping volume %(vol)s. %(error)s." -msgstr "對映磁區 %(vol)s 時發生錯誤。%(error)s。" - #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " @@ -2555,17 +2440,9 @@ msgstr "刪除 CgSnapshot %s 時發生錯誤。" msgid "Error occurred when updating consistency group %s." msgstr "更新一致性群組 %s 時發生錯誤。" -#, python-format -msgid "Error parsing config file: %s" -msgstr "剖析配置檔 %s 時發生錯誤" - msgid "Error promoting secondary volume to primary" msgstr "將次要磁區提升為主要磁區時發生錯誤" -#, python-format -msgid "Error removing volume %(vol)s. %(error)s." -msgstr "移除磁區 %(vol)s 時發生錯誤。%(error)s。" - #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "重命名磁區 %(vol)s 時發生錯誤:%(err)s。" @@ -2821,10 +2698,6 @@ msgstr "僅當不存在 Snapshot 時,此驅動程式才支援延伸磁區。" msgid "Extend volume not implemented" msgstr "未實作延伸磁區" -msgid "" -"FAST VP Enabler is not installed. Can't set tiering policy for the volume" -msgstr "未安裝「FAST VP 啟用程式」。無法設定磁區的層級原則" - msgid "FAST is not supported on this array." msgstr "此陣列不支援 FAST 原則。" @@ -2889,10 +2762,6 @@ msgstr "" "無法獲得資源鎖定。(序列:%(serial)s,實例:%(inst)s,ret:%(ret)s,標準錯" "誤:%(err)s)" -#, python-format -msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." -msgstr "在嘗試 %(retries)s 次之後,無法將 %(vol)s 新增至 %(sg)s 中。" - msgid "Failed to add the logical device." msgstr "無法新增邏輯裝置。" @@ -2971,9 +2840,6 @@ msgstr "無法從 Snapshot %(cgSnapshot)s 建立 CG %(cgName)s。" msgid "Failed to create IG, %s" msgstr "無法建立 IG %s" -msgid "Failed to create SolidFire Image-Volume" -msgstr "無法建立 SolidFire 映像檔磁區" - #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "無法建立磁區群組:%(vg_name)s" @@ -3078,9 +2944,6 @@ msgstr "無法建立排定器管理程式磁區流程" msgid "Failed to create snapshot %s" msgstr "無法建立 Snapshot %s" -msgid "Failed to create snapshot as no LUN ID is specified" -msgstr "無法建立 Snapshot,因為未指定任何 LUN ID" - #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "無法建立 CG %(cgName)s 的 Snapshot。" @@ -3213,9 +3076,6 @@ msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "無法確保 Snapshot 資源區域,找不到 ID %s 的磁區" -msgid "Failed to establish SSC connection." -msgstr "無法建立 SSC 連線。" - msgid "Failed to establish connection with Coho cluster" msgstr "無法建立與 Coho 叢集的連線。" @@ -3265,10 +3125,6 @@ msgstr "找不到主機 %s。" msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "找不到包含 %(initiator)s 的 iSCSI 起始器群組。" -#, python-format -msgid "Failed to find storage pool for source volume %s." -msgstr "找不到來源磁區 %s 的儲存區。" - #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "無法取得帳戶 [%s] 的 CloudByte 帳戶詳細資料。" @@ -3472,24 +3328,6 @@ msgid "" "was not a floating-point number." msgstr "無法管理現有磁區 %(name)s,因為所報告的大小 %(size)s不是浮點數字。" -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the NFS share passed in the volume reference." -msgstr "" -"無法管理現有磁區,因為所選磁區類型的儲存區與磁區參照中傳遞的 NFS 共用項目不" -"符。" - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the file system passed in the volume reference." -msgstr "" -"無法管理現有磁區,因為所選磁區類型的儲存區與磁區參照中傳遞的檔案系統不符。" - -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"does not match the pool of the host." -msgstr "無法管理現有磁區,因為所選磁區類型的儲存區與主機的儲存區不符。" - #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " @@ -3801,9 +3639,6 @@ msgstr "尋找主機 LUN ID 時發生錯誤。" msgid "Find lun group from mapping view error." msgstr "從對映視圖中尋找 LUN 群組時發生錯誤。" -msgid "Find lun number error." -msgstr "尋找 LUN 號碼時發生錯誤。" - msgid "Find mapping view error." msgstr "尋找對映視圖時發生錯誤。" @@ -4146,9 +3981,6 @@ msgstr "從 DRBDmanage 取得不正確的路徑資訊!(%s)" msgid "HBSD error occurs." msgstr "發生 HBSD 錯誤。" -msgid "HNAS has disconnected SSC" -msgstr "HNAS 具有已切斷連線的 SSC" - msgid "HPELeftHand url not found" msgstr "找不到 HPELeftHand URL" @@ -4185,14 +4017,6 @@ msgstr "主機 %(host)s 不符合 x509 憑證內容:CommonName %(commonName)s msgid "Host %s has no FC initiators" msgstr "主機 %s 沒有 FC 起始器" -#, python-format -msgid "Host %s has no iSCSI initiator" -msgstr "主機 %s 沒有 iSCSI 起始器" - -#, python-format -msgid "Host '%s' could not be found." -msgstr "找不到主機 '%s'。" - #, python-format msgid "Host group with name %s not found" msgstr "找不到名稱為 %s 的主機群組" @@ -4207,9 +4031,6 @@ msgstr "主機未處於「凍結」狀態。" msgid "Host is already Frozen." msgstr "主機已經處於「凍結」狀態。" -msgid "Host not found" -msgstr "找不到主機" - #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "找不到主機。無法移除 %(host)s 上的 %(service)s。" @@ -4236,9 +4057,6 @@ msgstr "I/O 群組 %(iogrp)d 無效;可用的 I/O 群組數目是 %(avail)s。 msgid "ID" msgstr "識別號" -msgid "IP address/hostname of Blockbridge API." -msgstr "Blockbridge API 的 IP 位址/主機名稱。" - msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "如果壓縮設為 True,則也必須設定調整大小(不等於 -1)。" @@ -4324,12 +4142,6 @@ msgstr "" "Infortrend CLI 異常狀況:%(err)s,參數:%(param)s(回覆碼:%(rc)s)(輸出:" "%(out)s)" -msgid "Initial tier: {}, policy: {} is not valid." -msgstr "起始層 {},原則 {} 無效。" - -msgid "Input type {} is not supported." -msgstr "輸入類型 {} 不受支援。" - msgid "Input volumes or snapshots are invalid." msgstr "輸入磁區或 Snapshot 無效。" @@ -4346,13 +4158,6 @@ msgstr "可用空間不足,無法延伸磁區。" msgid "Insufficient privileges" msgstr "專用權不足" -msgid "Interval value (in seconds) between connection retries to ceph cluster." -msgstr "與 ceph 叢集的連線重試之間的間隔值(以秒為單位)。" - -#, python-format -msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "對 io_port_list 指定了無效的 %(protocol)s 埠 %(port)s。" - #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "無效的 3PAR 網域:%(err)s" @@ -4395,10 +4200,6 @@ msgstr "取得磁區 %s 的服務品質原則時,偵測到無效的服務品 msgid "Invalid Replication Target: %(reason)s" msgstr "無效的抄寫目標:%(reason)s" -#, python-format -msgid "Invalid VNX authentication type: %s" -msgstr "無效的 VNX 鑑別類型:%s" - #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," @@ -4441,14 +4242,6 @@ msgstr "無效的鑑別金鑰:%(reason)s" msgid "Invalid backup: %(reason)s" msgstr "無效的備份:%(reason)s" -#, python-format -msgid "" -"Invalid barbican api url: version is required, e.g. 'http[s]://|" -"[:port]/' url specified is: %s" -msgstr "" -"無效的 Barbican API URL:需要版本,例如 'http[s]://|[:port]/" -"' URL,但指定的是:%s" - msgid "Invalid chap user details found in CloudByte storage." msgstr "在 CloudByte 儲存體中找到無效的 CHAP 使用者詳細資料。" @@ -4613,10 +4406,6 @@ msgstr "所指定的儲存區 %s 無效。" msgid "Invalid storage pool is configured." msgstr "所配置的儲存區無效。" -#, python-format -msgid "Invalid synchronize mode specified, allowed mode is %s." -msgstr "指定的同步化模式無效,容許的模式為 %s。" - msgid "Invalid transport type." msgstr "傳輸類型無效。" @@ -4624,14 +4413,6 @@ msgstr "傳輸類型無效。" msgid "Invalid update setting: '%s'" msgstr "無效的更新設定:'%s'" -#, python-format -msgid "" -"Invalid url: must be in the form 'http[s]://|[:port]/" -"', url specified is: %s" -msgstr "" -"無效的 URL:格式必須是 'http[s]://|[:port]/',指定的 " -"URL 是:%s" - #, python-format msgid "Invalid value '%s' for force." msgstr "force 的值 '%s' 無效。" @@ -4769,9 +4550,6 @@ msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "發出失效接手失敗,因為未正確配置抄寫。" -msgid "Item not found" -msgstr "找不到項目" - #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "在 CloudByte 的建立磁區 [%s] 回應中找不到工作 ID。" @@ -4799,9 +4577,6 @@ msgstr "磁區不存在 LU:%s" msgid "LUN export failed!" msgstr "LUN 匯出失敗!" -msgid "LUN id({}) is not valid." -msgstr "LUN ID ({}) 無效。" - msgid "LUN map overflow on every channel." msgstr "在每個通道上,LUN 對映溢位。" @@ -4809,9 +4584,6 @@ msgstr "在每個通道上,LUN 對映溢位。" msgid "LUN not found with given ref %s." msgstr "找不到具有給定參照 %s 的 LUN。" -msgid "LUN number ({}) is not an integer." -msgstr "LUN 號碼 ({}) 不是整數。" - #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 號碼已超出通道 ID %(ch_id)s 的範圍。" @@ -4983,53 +4755,15 @@ msgstr "此磁區已存在 meta 資料備份" msgid "Metadata backup object '%s' already exists" msgstr "meta 資料備份物件 '%s' 已存在" -msgid "Metadata item was not found" -msgstr "找不到 meta 資料項目" - -msgid "Metadata item was not found." -msgstr "找不到 meta 資料項目。" - -#, python-format -msgid "Metadata property key %s greater than 255 characters" -msgstr "meta 資料內容索引鍵 %s 超過 255 個字元" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters" -msgstr "meta 資料內容索引鍵 %s 值超過 255 個字元" - -msgid "Metadata property key blank" -msgstr "meta 資料內容索引鍵空白" - msgid "Metadata property key blank." msgstr "meta 資料內容索引鍵空白。" -msgid "Metadata property key greater than 255 characters." -msgstr "meta 資料內容索引鍵超過 255 個字元。" - -msgid "Metadata property value greater than 255 characters." -msgstr "meta 資料內容值超過 255 個字元。" - msgid "Metadata restore failed due to incompatible version" msgstr "meta 資料還原失敗,因為版本不相容" msgid "Metadata restore failed due to incompatible version." msgstr "由於版本不相容,meta 資料還原失敗。" -#, python-format -msgid "Migrate volume %(src)s failed." -msgstr "移轉磁區 %(src)s 失敗。" - -#, python-format -msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." -msgstr "在來源磁區 %(src)s 與目的地磁區 %(dst)s 之間移轉磁區失敗。" - -#, python-format -msgid "Migration of LUN %s has been stopped or faulted." -msgstr "LUN %s 移轉已停止或發生錯誤。" - -msgid "MirrorView/S enabler is not installed." -msgstr "未安裝 MirrorView/S 啟用程式。" - msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." @@ -5055,9 +4789,6 @@ msgstr "要求內文遺漏了必要元素 '%s'。" msgid "Missing required element 'consistencygroup' in request body." msgstr "要求內文中遺漏了必要元素 'consistencygroup'。" -msgid "Missing required element 'host' in request body." -msgstr "要求內文中遺漏了必要元素 'host'。" - msgid "Missing required element quota_class_set in request body." msgstr "要求內文中遺漏了必要元素 quota_class_set。" @@ -5170,9 +4901,6 @@ msgstr "必須指定儲存區名稱或 ID。" msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "必須指定儲存區。選項:sio_storage_pools。" -msgid "Must supply a positive value for age" -msgstr "必須為經歷時間提供正數值" - msgid "Must supply a positive, non-zero value for age" msgstr "必須為經歷時間提供非零正數值" @@ -5542,9 +5270,6 @@ msgid "" msgstr "" "在 CloudByte 儲存體中查詢 [%(operation)s] 型工作[%(job)s] 時接收到空值回應。" -msgid "Number of retries if connection to ceph cluster failed." -msgstr "與 ceph 叢集的連線失敗時的重試次數。" - msgid "Object Count" msgstr "物件計數" @@ -5598,16 +5323,10 @@ msgstr "未正確設定選項 gpfs_images_share_mode。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "未正確設定選項 gpfs_mount_point_base。" -msgid "Option map (cls._map) is not defined." -msgstr "未定義選項對映 (cls._map)。" - #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "原始 %(res)s %(prop)s 必須是值 %(vals)s 的其中之一" -msgid "Override HTTPS port to connect to Blockbridge API server." -msgstr "置換 HTTPS 埠以連接 Blockbridge API 伺服器。" - #, python-format msgid "ParseException: %s" msgstr "ParseException:%s" @@ -5847,14 +5566,6 @@ msgstr "RPC 伺服器回應不完整" msgid "Raid did not have MCS Channel." msgstr "Raid 沒有 MCS 通道。" -#, python-format -msgid "" -"Reach limitation set by configuration option max_luns_per_storage_group. " -"Operation to add %(vol)s into Storage Group %(sg)s is rejected." -msgstr "" -"達到由配置選項 max_luns_per_storage_group 設定的限制。用於將 %(vol)s 新增至" -"「儲存體群組」%(sg)s 中的作業遭到拒絕。" - #, python-format msgid "Received error string: %s" msgstr "接收到錯誤字串:%s" @@ -6027,9 +5738,6 @@ msgstr "找不到必要的配置" msgid "Required flag %s is not set" msgstr "未設定必要旗標 %s" -msgid "Requires an NaServer instance." -msgstr "需要 NaServer 實例。" - #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " @@ -6210,10 +5918,6 @@ msgstr "在主機 %(host)s 上找不到服務 %(service_id)s。" msgid "Service %(service_id)s could not be found." msgstr "找不到服務 %(service_id)s。" -#, python-format -msgid "Service %s not found." -msgstr "找不到服務 %s。" - msgid "Service is too old to fulfil this request." msgstr "服務太舊,無法滿足此要求。" @@ -6293,10 +5997,6 @@ msgstr "找不到 Snapshot %(snapshot_id)s。" msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "Snapshot %(snapshot_id)s 沒有索引鍵為 %(metadata_key)s 的 meta 資料。" -#, python-format -msgid "Snapshot %s must not be part of a consistency group." -msgstr "Snapshot %s 不得為一致性群組的一部分。" - #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "陣列上不存在 Snapshot '%s'。" @@ -6323,9 +6023,6 @@ msgstr "狀態 %s 不支援取得磁區的 Snapshot。" msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "未在任何位置部署 Snapshot 資源 \"%s\"?" -msgid "Snapshot size must be multiple of 1 GB." -msgstr "Snapshot 大小必須是 1 GB 的倍數。" - #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Snapshot 狀態 %(cur)s,不為 update_snapshot_status 所接受" @@ -6468,9 +6165,6 @@ msgstr "未設定儲存體系統 ID。" msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "找不到儲存區 %(poolNameInStr)s 的儲存體系統。" -msgid "Storage-assisted migration failed during manage volume." -msgstr "在管理磁區期間,儲存體協助型移轉失敗。" - #, python-format msgid "StorageSystem %(array)s is not found." msgstr "找不到儲存體系統 %(array)s。" @@ -6515,10 +6209,6 @@ msgstr "在帳戶 [%(account)s] 的 CloudByte 儲存體中找不到 TSM [%(tsm)s msgid "Target volume type is still in use." msgstr "目標磁區類型仍在使用中。" -#, python-format -msgid "Tenant ID: %s does not exist." -msgstr "租戶 ID %s 不存在。" - msgid "Terminate connection failed" msgstr "終止連線失敗" @@ -6605,10 +6295,6 @@ msgstr "路徑 %(path)s 中的裝置無法使用:%(reason)s" msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "結束時間 (%(end)s) 必須晚於開始時間 (%(start)s)。" -#, python-format -msgid "The extra_spec: %s is invalid." -msgstr "extra_spec %s 無效。" - #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "額外規格 %(extraspec)s 無效。" @@ -6656,14 +6342,6 @@ msgstr "" msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP 使用者 %(user)s 不存在。" -#, python-format -msgid "" -"The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " -"the host %(host)s." -msgstr "" -"匯入的 LUN %(lun_id)s 位於未受下列主機管理的儲存區 %(lun_pool)s 中:" -"%(host)s。" - msgid "The key cannot be None." msgstr "金鑰不能是「無」。" @@ -6738,11 +6416,6 @@ msgstr "保留計數必須小於或等於 %s。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "當磁區處於維護模式時,無法建立 Snapshot。" -#, python-format -msgid "" -"The source volume %s is not in the pool which is managed by the current host." -msgstr "來源磁區 %s 不在現行主機管理的儲存區中。" - msgid "The source volume for this WebDAV operation not found." msgstr "找不到此 WebDAV 作業的來源磁區。" @@ -6891,10 +6564,6 @@ msgstr "沒有資源可供使用。(資源:%(resource)s)" msgid "There are no valid ESX hosts." msgstr "沒有有效的 ESX 主機。" -#, python-format -msgid "There are no valid datastores attached to %s." -msgstr "%s 沒有連接有效的資料儲存庫。" - msgid "There are no valid datastores." msgstr "沒有有效的資料儲存庫。" @@ -6982,9 +6651,6 @@ msgstr "" msgid "Thin provisioning not supported on this version of LVM." msgstr "此版本的 LVM 不支援精簡供應。" -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" -msgstr "未安裝「精簡供應啟用程式」。無法建立精簡磁區" - msgid "This driver does not support deleting in-use snapshots." msgstr "此驅動程式不支援刪除使用中 Snapshot。" @@ -7013,13 +6679,6 @@ msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "等待 Nova 更新以刪除 Snapshot %(id)s 時發生逾時。" -msgid "" -"Timeout value (in seconds) used when connecting to ceph cluster. If value < " -"0, no timeout is set and default librados value is used." -msgstr "" -"連接至 ceph 叢集時使用的逾時值(以秒為單位)。如果值< 0,則不設定任何逾時值," -"並使用預設 librados 值。" - #, python-format msgid "Timeout while calling %s " msgstr "呼叫 %s 時逾時" @@ -7099,9 +6758,6 @@ msgstr "無法完成 %s 的失效接手。" msgid "Unable to connect or find connection to host" msgstr "無法連接至主機或找不到與主機的連線" -msgid "Unable to create Barbican Client without project_id." -msgstr "無法建立不含 project_id 的 Barbican 用戶端。" - #, python-format msgid "Unable to create consistency group %s" msgstr "無法建立一致性群組 %s" @@ -7193,9 +6849,6 @@ msgstr "" "無法使用純度 REST API %(api_version)s 版來執行抄寫作業,需要下列其中一個:" "%(required_versions)s。" -msgid "Unable to enable replication and snapcopy at the same time." -msgstr "無法同時啟用抄寫和 Snapcopy。" - #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "無法與 Storwize 叢集 %s 建立夥伴關係。" @@ -7552,9 +7205,6 @@ msgstr "不明的通訊協定:%(protocol)s。" msgid "Unknown quota resources %(unknown)s." msgstr "不明的配額資源 %(unknown)s。" -msgid "Unknown service" -msgstr "不明的服務" - msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "不明的排序方向,必須為 'desc' 或 'asc'" @@ -7657,9 +7307,6 @@ msgstr "使用者識別號" msgid "User does not have admin privileges" msgstr "使用者並沒有管理者權力" -msgid "User is not authorized to use key manager." -msgstr "使用者未獲授權來使用金鑰管理程式。" - msgid "User not authorized to perform WebDAV operations." msgstr "使用者未獲授權來執行 WebDAV 作業。" @@ -7865,12 +7512,6 @@ msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "磁區 %s 在線上。請將磁區設為離線,以使用 OpenStack 進行管理。" -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a consistency group or " -"have snapshots." -msgstr "磁區 %s 不得在移轉中、已連接、屬於某個一致性群組或具有 Snapshot。" - #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "磁區 %s 不得為一致性群組的一部分。" @@ -7899,10 +7540,6 @@ msgstr "磁區群組 %s 不存在" msgid "Volume Type %(id)s already exists." msgstr "磁區類型 %(id)s 已存在。" -#, python-format -msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." -msgstr "磁區類型 %(type_id)s 沒有索引鍵為 %(id)s 的額外規格。" - #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " @@ -8089,9 +7726,6 @@ msgstr "磁區大小自前次備份以來已增加。請執行完整備份。" msgid "Volume size must be a multiple of 1 GB." msgstr "磁區大小必須是 1 GB 的倍數。" -msgid "Volume size must be multiple of 1 GB." -msgstr "磁區大小必須是 1 GB 的倍數。" - msgid "Volume size must multiple of 1 GB." msgstr "磁區大小必須是 1 GB 的倍數。" @@ -8160,10 +7794,6 @@ msgstr "磁區類型名稱不能為空。" msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "找不到名稱為 %(volume_type_name)s 的磁區類型。" -#, python-format -msgid "Volume with volume id %s does not exist." -msgstr "磁區 ID 為 %s 的磁區不存在。" - #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " @@ -8176,17 +7806,10 @@ msgstr "" msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "未將磁區 %(volumeName)s 新增至儲存體群組 %(sgGroupName)s。" -#, python-format -msgid "Volume: %s could not be found." -msgstr "找不到磁區 %s。" - #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "磁區 %s 已經由 Cinder 進行管理。" -msgid "Volumes will be chunked into objects of this size (in megabytes)." -msgstr "磁區將區塊化成此大小(以 MB 為單位)的物件。" - msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "超過了主要及次要 SolidFire 帳戶上的磁區/帳戶。" @@ -8786,13 +8409,6 @@ msgstr "" "create_consistencygroup_from_src 僅支援一個 cgsnapshot 來源或一個一致性群組來" "源。不能使用多個來源。" -msgid "" -"create_consistencygroup_from_src supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src 支援一個 cgsnapshot 來源或一個一致性群組來" -"源。不能使用多個來源。" - #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy:來源 vdisk %(src)s (%(src_id)s) 不存在。" @@ -8892,17 +8508,11 @@ msgstr "" "create_volume_from_snapshot:Snapshot 狀態必須為「可用」,才能建立磁區。無效" "的狀態為:%s。" -msgid "create_volume_from_snapshot: Source and destination size differ." -msgstr "create_volume_from_snapshot:來源及目的地大小不同。" - msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "create_volume_from_snapshot:磁區大小與 Snapshot 型磁區不同。" -msgid "deduplicated and auto tiering can't be both enabled." -msgstr "無法同時啟用已刪除的重複層級和自動層級。" - #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " @@ -8944,9 +8554,6 @@ msgstr "將 Snapshot 從遠端節點分離" msgid "do_setup: No configured nodes." msgstr "do_setup:未配置節點。" -msgid "eqlx_cli_max_retries must be greater than or equal to 0" -msgstr "eqlx_cli_max_retries 必須大於或等於 0" - #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " @@ -9134,22 +8741,12 @@ msgstr "iscsiadm 執行失敗。" msgid "key manager error: %(reason)s" msgstr "金鑰管理程式錯誤:%(reason)s" -msgid "keymgr.fixed_key not defined" -msgstr "未定義 keymgr.fixed_key" - msgid "limit param must be an integer" msgstr "限制參數必須是整數" msgid "limit param must be positive" msgstr "限制參數必須是正數" -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing" -msgstr "" -"manage_existing 無法管理已連接至主機的磁區。請先斷開此磁區與現有主機的連線," -"然後再匯入" - msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing 需要 'name' 索引鍵來確認現有磁區。" @@ -9193,10 +8790,6 @@ msgstr "找到多個資源具有 Snapshot ID %s" msgid "name cannot be None" msgstr "名稱不能為 None" -#, python-format -msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." -msgstr "naviseccli_path:找不到 NAVISECCLI 工具 %(path)s。" - #, python-format msgid "no REPLY but %r" msgstr "沒有回覆,但 %r" @@ -9258,14 +8851,6 @@ msgstr "找不到 rados 及 rbd python 程式庫" msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是 'no'、'yes' 或 'only' 其中之一,不能是 %r" -#, python-format -msgid "replication_device should be configured on backend: %s." -msgstr "應該在後端上配置 replication_device:%s。" - -#, python-format -msgid "replication_device with backend_id [%s] is missing." -msgstr "遺漏 backend_id 為 [%s] 的 replication_device。" - #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover 失敗。找不到 %s。" @@ -9319,9 +8904,6 @@ msgstr "未設定 san_ip。" msgid "san_ip must be set" msgstr "必須設定 san_ip" -msgid "san_ip: Mandatory field configuration. san_ip is not set." -msgstr "san_ip:必要欄位配置。未設定 san_ip。" - msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." @@ -9332,16 +8914,6 @@ msgstr "" msgid "serve() can only be called once" msgstr "只能呼叫 serve() 一次" -msgid "service not found" -msgstr "找不到服務" - -msgid "snapshot does not exist" -msgstr "Snapshot 不存在" - -#, python-format -msgid "snapshot id:%s not found" -msgstr "找不到 Snapshot ID:%s" - #, python-format msgid "snapshot-%s" msgstr "snapshot-%s" @@ -9352,10 +8924,6 @@ msgstr "已指派 Snapshot" msgid "snapshots changed" msgstr "已變更 Snapshot" -#, python-format -msgid "source vol id:%s not found" -msgstr "找不到來源磁區 ID:%s" - #, python-format msgid "source volume id:%s is not replicated" msgstr "未抄寫來源磁區 ID:%s" @@ -9450,9 +9018,6 @@ msgstr "已指派磁區" msgid "volume changed" msgstr "已變更磁區" -msgid "volume does not exist" -msgstr "磁區不存在" - msgid "volume is already attached" msgstr "已連接磁區" @@ -9468,9 +9033,6 @@ msgstr "磁區大小 %(volume_size)d 太小,無法還原大小為 %(size)d 的 msgid "volume size %d is invalid." msgstr "磁區大小 %d 無效。" -msgid "volume_type cannot be None" -msgstr "volume_type 不能為 None" - msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "在一致性群組中建立磁區時,必須提供volume_type。" @@ -9501,6 +9063,3 @@ msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "zfssa_manage_policy 內容需要設為 'strict' 或 'loose'。現行值為:%s。" - -msgid "{} is not a valid option." -msgstr "{} 不是有效的選項。" diff --git a/cinder/manager.py b/cinder/manager.py index 0195b7fb8..4d239d1b3 100644 --- a/cinder/manager.py +++ b/cinder/manager.py @@ -80,10 +80,11 @@ class Manager(base.Base, PeriodicTasks): target = messaging.Target(version=RPC_API_VERSION) - def __init__(self, host=None, db_driver=None): + def __init__(self, host=None, db_driver=None, cluster=None): if not host: host = CONF.host self.host = host + self.cluster = cluster self.additional_endpoints = [] super(Manager, self).__init__(db_driver) @@ -91,13 +92,17 @@ class Manager(base.Base, PeriodicTasks): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) - def init_host(self): + def init_host(self, added_to_cluster=None): """Handle initialization if this is a standalone service. A hook point for services to execute tasks before the services are made available (i.e. showing up on RPC and starting to accept RPC calls) to other components. Child classes should override this method. + :param added_to_cluster: True when a host's cluster configuration has + changed from not being defined or being '' to + any other value and the DB service record + reflects this new value. """ pass @@ -140,12 +145,14 @@ class SchedulerDependentManager(Manager): """ - def __init__(self, host=None, db_driver=None, service_name='undefined'): + def __init__(self, host=None, db_driver=None, service_name='undefined', + cluster=None): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self._tp = greenpool.GreenPool() - super(SchedulerDependentManager, self).__init__(host, db_driver) + super(SchedulerDependentManager, self).__init__(host, db_driver, + cluster=cluster) def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" diff --git a/cinder/objects/__init__.py b/cinder/objects/__init__.py index 10367843a..9026b3e95 100644 --- a/cinder/objects/__init__.py +++ b/cinder/objects/__init__.py @@ -26,9 +26,15 @@ def register_all(): # need to receive it via RPC. __import__('cinder.objects.backup') __import__('cinder.objects.cgsnapshot') + __import__('cinder.objects.cluster') __import__('cinder.objects.consistencygroup') + __import__('cinder.objects.qos_specs') + __import__('cinder.objects.request_spec') __import__('cinder.objects.service') __import__('cinder.objects.snapshot') __import__('cinder.objects.volume') __import__('cinder.objects.volume_attachment') __import__('cinder.objects.volume_type') + __import__('cinder.objects.group_type') + __import__('cinder.objects.group') + __import__('cinder.objects.group_snapshot') diff --git a/cinder/objects/backup.py b/cinder/objects/backup.py index 9f5778f42..dec8ffd76 100644 --- a/cinder/objects/backup.py +++ b/cinder/objects/backup.py @@ -122,7 +122,9 @@ class Backup(base.CinderPersistentObject, base.CinderObject, def destroy(self): with self.obj_as_admin(): - db.backup_destroy(self._context, self.id) + updated_values = db.backup_destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) @staticmethod def decode_record(backup_url): diff --git a/cinder/objects/base.py b/cinder/objects/base.py index af7054995..fd202cb71 100644 --- a/cinder/objects/base.py +++ b/cinder/objects/base.py @@ -79,6 +79,10 @@ class CinderObjectVersionsHistory(dict): return self[self.get_current()] def add(self, ver, updates): + if ver in self.versions: + msg = 'Version %s already exists in history.' % ver + raise exception.ProgrammingError(reason=msg) + self[ver] = self[self.get_current()].copy() self.versions.append(ver) self[ver].update(updates) @@ -100,6 +104,18 @@ OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'}) OBJ_VERSIONS.add('1.3', {'Service': '1.3'}) OBJ_VERSIONS.add('1.4', {'Snapshot': '1.1'}) OBJ_VERSIONS.add('1.5', {'VolumeType': '1.1'}) +OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0', + 'QualityOfServiceSpecsList': '1.0', + 'VolumeType': '1.2'}) +OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0', + 'Service': '1.4', 'Volume': '1.4', + 'ConsistencyGroup': '1.3'}) +OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'}) +OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'}) +OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5', + 'RequestSpec': '1.1', 'VolumeProperties': '1.1'}) +OBJ_VERSIONS.add('1.11', {'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0', + 'Group': '1.1'}) class CinderObjectRegistry(base.VersionedObjectRegistry): @@ -125,11 +141,6 @@ class CinderObject(base.VersionedObject): # from one another. OBJ_PROJECT_NAMESPACE = 'cinder' - # NOTE(thangp): As more objects are added to cinder, each object should - # have a custom map of version compatibility. This just anchors the base - # version compatibility. - VERSION_COMPATIBILITY = {'7.0.0': '1.0'} - def cinder_obj_get_changes(self): """Returns a dict of changed fields with tz unaware datetimes. @@ -259,7 +270,7 @@ class CinderPersistentObject(object): self._context = original_context @classmethod - def _get_expected_attrs(cls, context): + def _get_expected_attrs(cls, context, *args, **kwargs): return None @classmethod @@ -271,9 +282,10 @@ class CinderPersistentObject(object): (cls.obj_name())) raise NotImplementedError(msg) - model = db.get_model_for_versioned_object(cls) - orm_obj = db.get_by_id(context, model, id, *args, **kwargs) + orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs) expected_attrs = cls._get_expected_attrs(context) + # We pass parameters because fields to expect may depend on them + expected_attrs = cls._get_expected_attrs(context, *args, **kwargs) kargs = {} if expected_attrs: kargs = {'expected_attrs': expected_attrs} @@ -407,19 +419,14 @@ class CinderPersistentObject(object): current = self.get_by_id(self._context, self.id) - for field in self.fields: - # Only update attributes that are already set. We do not want to - # unexpectedly trigger a lazy-load. - if self.obj_attr_is_set(field): - current_field = getattr(current, field) - if getattr(self, field) != current_field: - setattr(self, field, current_field) - self.obj_reset_changes() + # Copy contents retrieved from the DB into self + my_data = vars(self) + my_data.clear() + my_data.update(vars(current)) @classmethod def exists(cls, context, id_): - model = db.get_model_for_versioned_object(cls) - return db.resource_exists(context, model, id_) + return db.resource_exists(context, cls.model, id_) class CinderComparableObject(base.ComparableVersionedObject): @@ -439,6 +446,12 @@ class ObjectListBase(base.ObjectListBase): target_version) +class ClusteredObject(object): + @property + def service_topic_queue(self): + return self.cluster_name or self.host + + class CinderObjectSerializer(base.VersionedObjectSerializer): OBJ_BASE_CLASS = CinderObject diff --git a/cinder/objects/cgsnapshot.py b/cinder/objects/cgsnapshot.py index d5ad3772e..2e12fec2b 100644 --- a/cinder/objects/cgsnapshot.py +++ b/cinder/objects/cgsnapshot.py @@ -40,6 +40,10 @@ class CGSnapshot(base.CinderPersistentObject, base.CinderObject, 'snapshots': fields.ObjectField('SnapshotList', nullable=True), } + @property + def service_topic_queue(self): + return self.consistencygroup.service_topic_queue + @classmethod def _from_db_object(cls, context, cgsnapshot, db_cgsnapshots, expected_attrs=None): @@ -114,7 +118,9 @@ class CGSnapshot(base.CinderPersistentObject, base.CinderObject, def destroy(self): with self.obj_as_admin(): - db.cgsnapshot_destroy(self._context, self.id) + updated_values = db.cgsnapshot_destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register diff --git a/cinder/objects/cluster.py b/cinder/objects/cluster.py new file mode 100644 index 000000000..857aa3b89 --- /dev/null +++ b/cinder/objects/cluster.py @@ -0,0 +1,186 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import fields + +from cinder import db +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import base +from cinder import utils + + +@base.CinderObjectRegistry.register +class Cluster(base.CinderPersistentObject, base.CinderObject, + base.CinderComparableObject): + """Cluster Versioned Object. + + Method get_by_id supports as additional named arguments: + - get_services: If we want to load all services from this cluster. + - services_summary: If we want to load num_nodes and num_down_nodes + fields. + - is_up: Boolean value to filter based on the cluster's up status. + - read_deleted: Filtering based on delete status. Default value "no". + - Any other cluster field will be used as a filter. + """ + # Version 1.0: Initial version + VERSION = '1.0' + OPTIONAL_FIELDS = ('num_hosts', 'num_down_hosts', 'services') + + # NOTE(geguileo): We don't want to expose race_preventer field at the OVO + # layer since it is only meant for the DB layer internal mechanism to + # prevent races. + fields = { + 'id': fields.IntegerField(), + 'name': fields.StringField(nullable=False), + 'binary': fields.StringField(nullable=False), + 'disabled': fields.BooleanField(default=False, nullable=True), + 'disabled_reason': fields.StringField(nullable=True), + 'num_hosts': fields.IntegerField(default=0, read_only=True), + 'num_down_hosts': fields.IntegerField(default=0, read_only=True), + 'last_heartbeat': fields.DateTimeField(nullable=True, read_only=True), + 'services': fields.ObjectField('ServiceList', nullable=True, + read_only=True), + } + + @classmethod + def _get_expected_attrs(cls, context, *args, **kwargs): + """Return expected attributes when getting a cluster. + + Expected attributes depend on whether we are retrieving all related + services as well as if we are getting the services summary. + """ + expected_attrs = [] + if kwargs.get('get_services'): + expected_attrs.append('services') + if kwargs.get('services_summary'): + expected_attrs.extend(('num_hosts', 'num_down_hosts')) + return expected_attrs + + @staticmethod + def _from_db_object(context, cluster, db_cluster, expected_attrs=None): + """Fill cluster OVO fields from cluster ORM instance.""" + expected_attrs = expected_attrs or tuple() + for name, field in cluster.fields.items(): + # The only field that cannot be assigned using setattr is services, + # because it is an ObjectField. So we don't assign the value if + # it's a non expected optional field or if it's services field. + if ((name in Cluster.OPTIONAL_FIELDS + and name not in expected_attrs) or name == 'services'): + continue + value = getattr(db_cluster, name) + setattr(cluster, name, value) + + cluster._context = context + if 'services' in expected_attrs: + cluster.services = base.obj_make_list( + context, + objects.ServiceList(context), + objects.Service, + db_cluster.services) + + cluster.obj_reset_changes() + return cluster + + def obj_load_attr(self, attrname): + """Lazy load services attribute.""" + # NOTE(geguileo): We only allow lazy loading services to raise + # awareness of the high cost of lazy loading num_hosts and + # num_down_hosts, so if we are going to need this information we should + # be certain we really need it and it should loaded when retrieving the + # data from the DB the first time we read the OVO. + if attrname != 'services': + raise exception.ObjectActionError( + action='obj_load_attr', + reason=_('attribute %s not lazy-loadable') % attrname) + if not self._context: + raise exception.OrphanedObjectError(method='obj_load_attr', + objtype=self.obj_name()) + + self.services = objects.ServiceList.get_all( + self._context, {'cluster_name': self.name}) + + self.obj_reset_changes(fields=('services',)) + + def create(self): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason=_('already created')) + updates = self.cinder_obj_get_changes() + if updates: + for field in self.OPTIONAL_FIELDS: + if field in updates: + raise exception.ObjectActionError( + action='create', reason=_('%s assigned') % field) + + db_cluster = db.cluster_create(self._context, updates) + self._from_db_object(self._context, self, db_cluster) + + def save(self): + updates = self.cinder_obj_get_changes() + if updates: + for field in self.OPTIONAL_FIELDS: + if field in updates: + raise exception.ObjectActionError( + action='save', reason=_('%s changed') % field) + db.cluster_update(self._context, self.id, updates) + self.obj_reset_changes() + + def destroy(self): + with self.obj_as_admin(): + updated_values = db.cluster_destroy(self._context, self.id) + for field, value in updated_values.items(): + setattr(self, field, value) + self.obj_reset_changes(updated_values.keys()) + + def is_up(self): + return (self.last_heartbeat and + self.last_heartbeat >= utils.service_expired_time(True)) + + +@base.CinderObjectRegistry.register +class ClusterList(base.ObjectListBase, base.CinderObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = {'objects': fields.ListOfObjectsField('Cluster')} + + @classmethod + def get_all(cls, context, is_up=None, get_services=False, + services_summary=False, read_deleted='no', **filters): + """Get all clusters that match the criteria. + + :param is_up: Boolean value to filter based on the cluster's up status. + :param get_services: If we want to load all services from this cluster. + :param services_summary: If we want to load num_nodes and + num_down_nodes fields. + :param read_deleted: Filtering based on delete status. Default value is + "no". + :param filters: Field based filters in the form of key/value. + """ + + expected_attrs = Cluster._get_expected_attrs( + context, + get_services=get_services, + services_summary=services_summary) + + clusters = db.cluster_get_all(context, is_up=is_up, + get_services=get_services, + services_summary=services_summary, + read_deleted=read_deleted, + **filters) + return base.obj_make_list(context, cls(context), Cluster, clusters, + expected_attrs=expected_attrs) diff --git a/cinder/objects/consistencygroup.py b/cinder/objects/consistencygroup.py index 14ff6585c..46fb5e461 100644 --- a/cinder/objects/consistencygroup.py +++ b/cinder/objects/consistencygroup.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_utils import versionutils + from cinder import db from cinder import exception from cinder.i18n import _ @@ -23,18 +25,22 @@ from oslo_versionedobjects import fields @base.CinderObjectRegistry.register class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat): + base.CinderObjectDictCompat, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added cgsnapshots and volumes relationships # Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField - VERSION = '1.2' + # Version 1.3: Added cluster fields + VERSION = '1.3' - OPTIONAL_FIELDS = ['cgsnapshots', 'volumes'] + OPTIONAL_FIELDS = ('cgsnapshots', 'volumes', 'cluster') fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), + 'cluster_name': fields.StringField(nullable=True), + 'cluster': fields.ObjectField('Cluster', nullable=True, + read_only=True), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), @@ -47,6 +53,18 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, 'volumes': fields.ObjectField('VolumeList', nullable=True), } + def obj_make_compatible(self, primitive, target_version): + """Make a CG representation compatible with a target version.""" + # Convert all related objects + super(ConsistencyGroup, self).obj_make_compatible(primitive, + target_version) + + target_version = versionutils.convert_version_to_tuple(target_version) + # Before v1.3 we didn't have cluster fields so we have to remove them. + if target_version < (1, 3): + for obj_field in ('cluster', 'cluster_name'): + primitive.pop(obj_field, None) + @classmethod def _from_db_object(cls, context, consistencygroup, db_consistencygroup, expected_attrs=None): @@ -72,6 +90,18 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, db_consistencygroup['volumes']) consistencygroup.volumes = volumes + if 'cluster' in expected_attrs: + db_cluster = db_consistencygroup.get('cluster') + # If this consistency group doesn't belong to a cluster the cluster + # field in the ORM instance will have value of None. + if db_cluster: + consistencygroup.cluster = objects.Cluster(context) + objects.Cluster._from_db_object(context, + consistencygroup.cluster, + db_cluster) + else: + consistencygroup.cluster = None + consistencygroup._context = context consistencygroup.obj_reset_changes() return consistencygroup @@ -96,6 +126,10 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, raise exception.ObjectActionError(action='create', reason=_('volumes assigned')) + if 'cluster' in updates: + raise exception.ObjectActionError( + action='create', reason=_('cluster assigned')) + db_consistencygroups = db.consistencygroup_create(self._context, updates, cg_snap_id, @@ -119,6 +153,15 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, self.volumes = objects.VolumeList.get_all_by_group(self._context, self.id) + # If this consistency group doesn't belong to a cluster (cluster_name + # is empty), then cluster field will be None. + if attrname == 'cluster': + if self.cluster_name: + self.cluster = objects.Cluster.get_by_id( + self._context, name=self.cluster_name) + else: + self.cluster = None + self.obj_reset_changes(fields=[attrname]) def save(self): @@ -130,13 +173,19 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, if 'volumes' in updates: raise exception.ObjectActionError( action='save', reason=_('volumes changed')) + if 'cluster' in updates: + raise exception.ObjectActionError( + action='save', reason=_('cluster changed')) db.consistencygroup_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): - db.consistencygroup_destroy(self._context, self.id) + updated_values = db.consistencygroup_destroy(self._context, + self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register @@ -149,6 +198,26 @@ class ConsistencyGroupList(base.ObjectListBase, base.CinderObject): 'objects': fields.ListOfObjectsField('ConsistencyGroup') } + @staticmethod + def include_in_cluster(context, cluster, partial_rename=True, **filters): + """Include all consistency groups matching the filters into a cluster. + + When partial_rename is set we will not set the cluster_name with + cluster parameter value directly, we'll replace provided cluster_name + or host filter value with cluster instead. + + This is useful when we want to replace just the cluster name but leave + the backend and pool information as it is. If we are using + cluster_name to filter, we'll use that same DB field to replace the + cluster value and leave the rest as it is. Likewise if we use the host + to filter. + + Returns the number of consistency groups that have been changed. + """ + return db.consistencygroup_include_in_cluster(context, cluster, + partial_rename, + **filters) + @classmethod def get_all(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): diff --git a/cinder/objects/fields.py b/cinder/objects/fields.py index 9f8121f44..1c8e80367 100644 --- a/cinder/objects/fields.py +++ b/cinder/objects/fields.py @@ -23,7 +23,12 @@ Field = fields.Field FieldType = fields.FieldType -class BackupStatus(Enum): +class BaseCinderEnum(Enum): + def __init__(self): + super(BaseCinderEnum, self).__init__(valid_values=self.__class__.ALL) + + +class BackupStatus(BaseCinderEnum): ERROR = 'error' ERROR_DELETING = 'error_deleting' CREATING = 'creating' @@ -35,15 +40,12 @@ class BackupStatus(Enum): ALL = (ERROR, ERROR_DELETING, CREATING, AVAILABLE, DELETING, DELETED, RESTORING) - def __init__(self): - super(BackupStatus, self).__init__(valid_values=BackupStatus.ALL) - class BackupStatusField(BaseEnumField): AUTO_TYPE = BackupStatus() -class ConsistencyGroupStatus(Enum): +class ConsistencyGroupStatus(BaseCinderEnum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' @@ -55,16 +57,30 @@ class ConsistencyGroupStatus(Enum): ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, ERROR_DELETING) - def __init__(self): - super(ConsistencyGroupStatus, self).__init__( - valid_values=ConsistencyGroupStatus.ALL) - class ConsistencyGroupStatusField(BaseEnumField): AUTO_TYPE = ConsistencyGroupStatus() -class ReplicationStatus(Enum): +class GroupStatus(BaseCinderEnum): + ERROR = 'error' + AVAILABLE = 'available' + CREATING = 'creating' + DELETING = 'deleting' + DELETED = 'deleted' + UPDATING = 'updating' + IN_USE = 'in-use' + ERROR_DELETING = 'error_deleting' + + ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, + UPDATING, IN_USE, ERROR_DELETING) + + +class GroupStatusField(BaseEnumField): + AUTO_TYPE = GroupStatus() + + +class ReplicationStatus(BaseCinderEnum): ERROR = 'error' ENABLED = 'enabled' DISABLED = 'disabled' @@ -76,16 +92,12 @@ class ReplicationStatus(Enum): ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER, FAILED_OVER) - def __init__(self): - super(ReplicationStatus, self).__init__( - valid_values=ReplicationStatus.ALL) - class ReplicationStatusField(BaseEnumField): AUTO_TYPE = ReplicationStatus() -class SnapshotStatus(Enum): +class SnapshotStatus(BaseCinderEnum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' @@ -97,10 +109,18 @@ class SnapshotStatus(Enum): ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, ERROR_DELETING) - def __init__(self): - super(SnapshotStatus, self).__init__( - valid_values=SnapshotStatus.ALL) - class SnapshotStatusField(BaseEnumField): AUTO_TYPE = SnapshotStatus() + + +class QoSConsumerValues(BaseCinderEnum): + BACK_END = 'back-end' + FRONT_END = 'front-end' + BOTH = 'both' + + ALL = (BACK_END, FRONT_END, BOTH) + + +class QoSConsumerField(BaseEnumField): + AUTO_TYPE = QoSConsumerValues() diff --git a/cinder/objects/group.py b/cinder/objects/group.py new file mode 100644 index 000000000..68244080e --- /dev/null +++ b/cinder/objects/group.py @@ -0,0 +1,197 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import db +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import base +from cinder.objects import fields as c_fields +from oslo_versionedobjects import fields + +OPTIONAL_FIELDS = ['volumes', 'volume_types', 'group_snapshots'] + + +@base.CinderObjectRegistry.register +class Group(base.CinderPersistentObject, base.CinderObject, + base.CinderObjectDictCompat): + # Version 1.0: Initial version + # Version 1.1: Added group_snapshots, group_snapshot_id, and + # source_group_id + VERSION = '1.1' + + fields = { + 'id': fields.UUIDField(), + 'user_id': fields.StringField(), + 'project_id': fields.StringField(), + 'cluster_name': fields.StringField(nullable=True), + 'host': fields.StringField(nullable=True), + 'availability_zone': fields.StringField(nullable=True), + 'name': fields.StringField(nullable=True), + 'description': fields.StringField(nullable=True), + 'group_type_id': fields.StringField(), + 'volume_type_ids': fields.ListOfStringsField(nullable=True), + 'status': c_fields.GroupStatusField(nullable=True), + 'group_snapshot_id': fields.UUIDField(nullable=True), + 'source_group_id': fields.UUIDField(nullable=True), + 'volumes': fields.ObjectField('VolumeList', nullable=True), + 'volume_types': fields.ObjectField('VolumeTypeList', + nullable=True), + 'group_snapshots': fields.ObjectField('GroupSnapshotList', + nullable=True), + } + + @staticmethod + def _from_db_object(context, group, db_group, + expected_attrs=None): + if expected_attrs is None: + expected_attrs = [] + for name, field in group.fields.items(): + if name in OPTIONAL_FIELDS: + continue + value = db_group.get(name) + setattr(group, name, value) + + if 'volumes' in expected_attrs: + volumes = base.obj_make_list( + context, objects.VolumeList(context), + objects.Volume, + db_group['volumes']) + group.volumes = volumes + + if 'volume_types' in expected_attrs: + volume_types = base.obj_make_list( + context, objects.VolumeTypeList(context), + objects.VolumeType, + db_group['volume_types']) + group.volume_types = volume_types + + if 'group_snapshots' in expected_attrs: + group_snapshots = base.obj_make_list( + context, objects.GroupSnapshotList(context), + objects.GroupSnapshot, + db_group['group_snapshots']) + group.group_snapshots = group_snapshots + + group._context = context + group.obj_reset_changes() + return group + + def create(self, group_snapshot_id=None, source_group_id=None): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason=_('already_created')) + updates = self.cinder_obj_get_changes() + + if 'volume_types' in updates: + raise exception.ObjectActionError( + action='create', + reason=_('volume_types assigned')) + + if 'volumes' in updates: + raise exception.ObjectActionError(action='create', + reason=_('volumes assigned')) + + if 'group_snapshots' in updates: + raise exception.ObjectActionError( + action='create', + reason=_('group_snapshots assigned')) + + db_groups = db.group_create(self._context, + updates, + group_snapshot_id, + source_group_id) + self._from_db_object(self._context, self, db_groups) + + def obj_load_attr(self, attrname): + if attrname not in OPTIONAL_FIELDS: + raise exception.ObjectActionError( + action='obj_load_attr', + reason=_('attribute %s not lazy-loadable') % attrname) + if not self._context: + raise exception.OrphanedObjectError(method='obj_load_attr', + objtype=self.obj_name()) + + if attrname == 'volume_types': + self.volume_types = objects.VolumeTypeList.get_all_by_group( + self._context, self.id) + + if attrname == 'volumes': + self.volumes = objects.VolumeList.get_all_by_generic_group( + self._context, self.id) + + if attrname == 'group_snapshots': + self.group_snapshots = objects.GroupSnapshotList.get_all_by_group( + self._context, self.id) + + self.obj_reset_changes(fields=[attrname]) + + def save(self): + updates = self.cinder_obj_get_changes() + if updates: + if 'volume_types' in updates: + msg = _('Cannot save volume_types changes in group object ' + 'update.') + raise exception.ObjectActionError( + action='save', reason=msg) + if 'volumes' in updates: + msg = _('Cannot save volumes changes in group object update.') + raise exception.ObjectActionError( + action='save', reason=msg) + if 'group_snapshots' in updates: + msg = _('Cannot save group_snapshots changes in group object ' + 'update.') + raise exception.ObjectActionError( + action='save', reason=msg) + + db.group_update(self._context, self.id, updates) + self.obj_reset_changes() + + def destroy(self): + with self.obj_as_admin(): + db.group_destroy(self._context, self.id) + + +@base.CinderObjectRegistry.register +class GroupList(base.ObjectListBase, base.CinderObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'objects': fields.ListOfObjectsField('Group') + } + child_version = { + '1.0': '1.0', + } + + @classmethod + def get_all(cls, context, filters=None, marker=None, limit=None, + offset=None, sort_keys=None, sort_dirs=None): + groups = db.group_get_all( + context, filters=filters, marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) + return base.obj_make_list(context, cls(context), + objects.Group, + groups) + + @classmethod + def get_all_by_project(cls, context, project_id, filters=None, marker=None, + limit=None, offset=None, sort_keys=None, + sort_dirs=None): + groups = db.group_get_all_by_project( + context, project_id, filters=filters, marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) + return base.obj_make_list(context, cls(context), + objects.Group, + groups) diff --git a/cinder/objects/group_snapshot.py b/cinder/objects/group_snapshot.py new file mode 100644 index 000000000..0fb5062e1 --- /dev/null +++ b/cinder/objects/group_snapshot.py @@ -0,0 +1,152 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import db +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import base +from oslo_versionedobjects import fields + +OPTIONAL_FIELDS = ['group', 'snapshots'] + + +@base.CinderObjectRegistry.register +class GroupSnapshot(base.CinderPersistentObject, base.CinderObject, + base.CinderObjectDictCompat): + VERSION = '1.0' + + fields = { + 'id': fields.UUIDField(), + 'group_id': fields.UUIDField(nullable=False), + 'project_id': fields.StringField(nullable=True), + 'user_id': fields.StringField(nullable=True), + 'name': fields.StringField(nullable=True), + 'description': fields.StringField(nullable=True), + 'status': fields.StringField(nullable=True), + 'group_type_id': fields.UUIDField(nullable=True), + 'group': fields.ObjectField('Group', nullable=True), + 'snapshots': fields.ObjectField('SnapshotList', nullable=True), + } + + @staticmethod + def _from_db_object(context, group_snapshot, db_group_snapshots, + expected_attrs=None): + expected_attrs = expected_attrs or [] + for name, field in group_snapshot.fields.items(): + if name in OPTIONAL_FIELDS: + continue + value = db_group_snapshots.get(name) + setattr(group_snapshot, name, value) + + if 'group' in expected_attrs: + group = objects.Group(context) + group._from_db_object(context, group, + db_group_snapshots['group']) + group_snapshot.group = group + + if 'snapshots' in expected_attrs: + snapshots = base.obj_make_list( + context, objects.SnapshotsList(context), + objects.Snapshots, + db_group_snapshots['snapshots']) + group_snapshot.snapshots = snapshots + + group_snapshot._context = context + group_snapshot.obj_reset_changes() + return group_snapshot + + def create(self): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason=_('already_created')) + updates = self.cinder_obj_get_changes() + + if 'group' in updates: + raise exception.ObjectActionError( + action='create', reason=_('group assigned')) + + db_group_snapshots = db.group_snapshot_create(self._context, updates) + self._from_db_object(self._context, self, db_group_snapshots) + + def obj_load_attr(self, attrname): + if attrname not in OPTIONAL_FIELDS: + raise exception.ObjectActionError( + action='obj_load_attr', + reason=_('attribute %s not lazy-loadable') % attrname) + if not self._context: + raise exception.OrphanedObjectError(method='obj_load_attr', + objtype=self.obj_name()) + + if attrname == 'group': + self.group = objects.Group.get_by_id( + self._context, self.group_id) + + if attrname == 'snapshots': + self.snapshots = objects.SnapshotList.get_all_for_group_snapshot( + self._context, self.id) + + self.obj_reset_changes(fields=[attrname]) + + def save(self): + updates = self.cinder_obj_get_changes() + if updates: + if 'group' in updates: + raise exception.ObjectActionError( + action='save', reason=_('group changed')) + if 'snapshots' in updates: + raise exception.ObjectActionError( + action='save', reason=_('snapshots changed')) + db.group_snapshot_update(self._context, self.id, updates) + self.obj_reset_changes() + + def destroy(self): + with self.obj_as_admin(): + updated_values = db.group_snapshot_destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) + + +@base.CinderObjectRegistry.register +class GroupSnapshotList(base.ObjectListBase, base.CinderObject): + VERSION = '1.0' + + fields = { + 'objects': fields.ListOfObjectsField('GroupSnapshot') + } + child_version = { + '1.0': '1.0' + } + + @classmethod + def get_all(cls, context, filters=None): + group_snapshots = db.group_snapshot_get_all(context, filters) + return base.obj_make_list(context, cls(context), objects.GroupSnapshot, + group_snapshots) + + @classmethod + def get_all_by_project(cls, context, project_id, filters=None): + group_snapshots = db.group_snapshot_get_all_by_project(context, + project_id, + filters) + return base.obj_make_list(context, cls(context), objects.GroupSnapshot, + group_snapshots) + + @classmethod + def get_all_by_group(cls, context, group_id, filters=None): + group_snapshots = db.group_snapshot_get_all_by_group(context, group_id, + filters) + return base.obj_make_list(context, cls(context), + objects.GroupSnapshot, + group_snapshots) diff --git a/cinder/objects/group_type.py b/cinder/objects/group_type.py new file mode 100644 index 000000000..bae6b9b7d --- /dev/null +++ b/cinder/objects/group_type.py @@ -0,0 +1,121 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import fields + +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import base +from cinder.volume import group_types + + +OPTIONAL_FIELDS = ['group_specs', 'projects'] + + +@base.CinderObjectRegistry.register +class GroupType(base.CinderPersistentObject, base.CinderObject, + base.CinderObjectDictCompat, base.CinderComparableObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'id': fields.UUIDField(), + 'name': fields.StringField(nullable=True), + 'description': fields.StringField(nullable=True), + 'is_public': fields.BooleanField(default=True, nullable=True), + 'projects': fields.ListOfStringsField(nullable=True), + 'group_specs': fields.DictOfNullableStringsField(nullable=True), + } + + @classmethod + def _get_expected_attrs(cls, context): + return 'group_specs', 'projects' + + @staticmethod + def _from_db_object(context, type, db_type, expected_attrs=None): + if expected_attrs is None: + expected_attrs = [] + for name, field in type.fields.items(): + if name in OPTIONAL_FIELDS: + continue + value = db_type[name] + if isinstance(field, fields.IntegerField): + value = value or 0 + type[name] = value + + # Get data from db_type object that was queried by joined query + # from DB + if 'group_specs' in expected_attrs: + type.group_specs = {} + specs = db_type.get('group_specs') + if specs and isinstance(specs, list): + type.group_specs = {item['key']: item['value'] + for item in specs} + elif specs and isinstance(specs, dict): + type.group_specs = specs + if 'projects' in expected_attrs: + type.projects = db_type.get('projects', []) + + type._context = context + type.obj_reset_changes() + return type + + def create(self): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason=_('already created')) + db_group_type = group_types.create(self._context, self.name, + self.group_specs, + self.is_public, self.projects, + self.description) + self._from_db_object(self._context, self, db_group_type) + + def save(self): + updates = self.cinder_obj_get_changes() + if updates: + group_types.update(self._context, self.id, self.name, + self.description) + self.obj_reset_changes() + + def destroy(self): + with self.obj_as_admin(): + group_types.destroy(self._context, self.id) + + +@base.CinderObjectRegistry.register +class GroupTypeList(base.ObjectListBase, base.CinderObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'objects': fields.ListOfObjectsField('GroupType'), + } + + child_versions = { + '1.0': '1.0', + } + + @classmethod + def get_all(cls, context, inactive=0, filters=None, marker=None, + limit=None, sort_keys=None, sort_dirs=None, offset=None): + types = group_types.get_all_group_types(context, inactive, filters, + marker=marker, limit=limit, + sort_keys=sort_keys, + sort_dirs=sort_dirs, + offset=offset) + expected_attrs = GroupType._get_expected_attrs(context) + return base.obj_make_list(context, cls(context), + objects.GroupType, types.values(), + expected_attrs=expected_attrs) diff --git a/cinder/objects/qos_specs.py b/cinder/objects/qos_specs.py new file mode 100644 index 000000000..d9b63cd34 --- /dev/null +++ b/cinder/objects/qos_specs.py @@ -0,0 +1,202 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import exception as db_exc +from oslo_log import log as logging + +from cinder import db +from cinder import exception +from cinder.i18n import _, _LE +from cinder import objects +from cinder.objects import base +from cinder.objects import fields as c_fields +from oslo_versionedobjects import fields + +LOG = logging.getLogger(__name__) + + +@base.CinderObjectRegistry.register +class QualityOfServiceSpecs(base.CinderPersistentObject, + base.CinderObject, + base.CinderObjectDictCompat, + base.CinderComparableObject): + # Version + # 1.0: Initial version + VERSION = "1.0" + + OPTIONAL_FIELDS = ['volume_types'] + + fields = { + 'id': fields.UUIDField(), + 'name': fields.StringField(), + 'consumer': c_fields.QoSConsumerField( + default=c_fields.QoSConsumerValues.BACK_END), + 'specs': fields.DictOfNullableStringsField(nullable=True), + 'volume_types': fields.ObjectField('VolumeTypeList', nullable=True), + } + + def __init__(self, *args, **kwargs): + super(QualityOfServiceSpecs, self).__init__(*args, **kwargs) + self._init_specs = {} + + def __setattr__(self, name, value): + try: + super(QualityOfServiceSpecs, self).__setattr__(name, value) + except ValueError: + if name == 'consumer': + # Give more descriptive error message for invalid 'consumer' + msg = (_("Valid consumer of QoS specs are: %s") % + c_fields.QoSConsumerField()) + raise exception.InvalidQoSSpecs(reason=msg) + else: + raise + + def obj_reset_changes(self, fields=None, recursive=False): + super(QualityOfServiceSpecs, self).obj_reset_changes(fields, recursive) + if fields is None or 'specs' in fields: + self._init_specs = self.specs.copy() if self.specs else {} + + def obj_what_changed(self): + changes = super(QualityOfServiceSpecs, self).obj_what_changed() + + # Do comparison of what's in the dict vs. reference to the specs object + if self.obj_attr_is_set('id'): + if self.specs != self._init_specs: + changes.add('specs') + else: + # If both dicts are equal don't consider anything gets changed + if 'specs' in changes: + changes.remove('specs') + + return changes + + def obj_get_changes(self): + changes = super(QualityOfServiceSpecs, self).obj_get_changes() + if 'specs' in changes: + # For specs, we only want what has changed in the dictionary, + # because otherwise we'll individually overwrite the DB value for + # every key in 'specs' even if it hasn't changed + specs_changes = {} + for key, val in self.specs.items(): + if val != self._init_specs.get(key): + specs_changes[key] = val + changes['specs'] = specs_changes + + specs_keys_removed = (set(self._init_specs.keys()) - + set(self.specs.keys())) + if specs_keys_removed: + # Special key notifying which specs keys have been deleted + changes['specs_keys_removed'] = specs_keys_removed + + return changes + + def obj_load_attr(self, attrname): + if attrname not in QualityOfServiceSpecs.OPTIONAL_FIELDS: + raise exception.ObjectActionError( + action='obj_load_attr', + reason=_('attribute %s not lazy-loadable') % attrname) + if not self._context: + raise exception.OrphanedObjectError(method='obj_load_attr', + objtype=self.obj_name()) + + if attrname == 'volume_types': + self.volume_types = objects.VolumeTypeList.get_all_types_for_qos( + self._context, self.id) + + @staticmethod + def _from_db_object(context, qos_spec, db_qos_spec, expected_attrs=None): + if expected_attrs is None: + expected_attrs = [] + + for name, field in qos_spec.fields.items(): + if name not in QualityOfServiceSpecs.OPTIONAL_FIELDS: + value = db_qos_spec.get(name) + # 'specs' could be null if only a consumer is given, so make + # it an empty dict instead of None + if not value and isinstance(field, fields.DictOfStringsField): + value = {} + setattr(qos_spec, name, value) + + if 'volume_types' in expected_attrs: + volume_types = objects.VolumeTypeList.get_all_types_for_qos( + context, db_qos_spec['id']) + qos_spec.volume_types = volume_types + + qos_spec._context = context + qos_spec.obj_reset_changes() + return qos_spec + + def create(self): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason='already created') + updates = self.cinder_obj_get_changes() + + try: + create_ret = db.qos_specs_create(self._context, updates) + except db_exc.DBDataError: + msg = _('Error writing field to database') + LOG.exception(msg) + raise exception.Invalid(msg) + except db_exc.DBError: + LOG.exception(_LE('DB error occurred when creating QoS specs.')) + raise exception.QoSSpecsCreateFailed(name=self.name, + qos_specs=self.specs) + # Save ID with the object + updates['id'] = create_ret['id'] + self._from_db_object(self._context, self, updates) + + def save(self): + updates = self.cinder_obj_get_changes() + if updates: + if 'specs_keys_removed' in updates.keys(): + for specs_key_to_remove in updates['specs_keys_removed']: + db.qos_specs_item_delete( + self._context, self.id, specs_key_to_remove) + del updates['specs_keys_removed'] + db.qos_specs_update(self._context, self.id, updates) + + self.obj_reset_changes() + + def destroy(self, force=False): + """Deletes the QoS spec. + + :param force: when force is True, all volume_type mappings for this QoS + are deleted. When force is False and volume_type + mappings still exist, a QoSSpecsInUse exception is thrown + """ + if self.volume_types: + if not force: + raise exception.QoSSpecsInUse(specs_id=self.id) + # remove all association + db.qos_specs_disassociate_all(self._context, self.id) + updated_values = db.qos_specs_delete(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) + + +@base.CinderObjectRegistry.register +class QualityOfServiceSpecsList(base.ObjectListBase, base.CinderObject): + VERSION = '1.0' + + fields = { + 'objects': fields.ListOfObjectsField('QualityOfServiceSpecs'), + } + child_versions = { + '1.0': '1.0', + } + + @classmethod + def get_all(cls, context, *args, **kwargs): + specs = db.qos_specs_get_all(context, *args, **kwargs) + return base.obj_make_list(context, cls(context), + objects.QualityOfServiceSpecs, specs) diff --git a/cinder/objects/request_spec.py b/cinder/objects/request_spec.py new file mode 100644 index 000000000..b60c08534 --- /dev/null +++ b/cinder/objects/request_spec.py @@ -0,0 +1,127 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import fields + +from cinder import objects +from cinder.objects import base + + +@base.CinderObjectRegistry.register +class RequestSpec(base.CinderObject, base.CinderObjectDictCompat, + base.CinderComparableObject): + # Version 1.0: Initial version + # Version 1.1: Added group_id and group_backend + VERSION = '1.1' + + fields = { + 'consistencygroup_id': fields.UUIDField(nullable=True), + 'group_id': fields.UUIDField(nullable=True), + 'cgsnapshot_id': fields.UUIDField(nullable=True), + 'image_id': fields.UUIDField(nullable=True), + 'snapshot_id': fields.UUIDField(nullable=True), + 'source_replicaid': fields.UUIDField(nullable=True), + 'source_volid': fields.UUIDField(nullable=True), + 'volume_id': fields.UUIDField(nullable=True), + 'volume': fields.ObjectField('Volume', nullable=True), + 'volume_type': fields.ObjectField('VolumeType', nullable=True), + 'volume_properties': fields.ObjectField('VolumeProperties', + nullable=True), + 'CG_backend': fields.StringField(nullable=True), + 'group_backend': fields.StringField(nullable=True), + } + + obj_extra_fields = ['resource_properties'] + + @property + def resource_properties(self): + # TODO(dulek): This is to maintain compatibility with filters from + # oslo-incubator. As we've moved them into our codebase we should adapt + # them to use volume_properties and remove this shim. + return self.volume_properties + + @classmethod + def from_primitives(cls, spec): + """Returns RequestSpec object creating it from legacy dictionary. + + FIXME(dulek): This should go away in early O as we stop supporting + backward compatibility with M. + """ + spec = spec.copy() + spec_obj = cls() + + vol_props = spec.pop('volume_properties', {}) + if vol_props is not None: + vol_props = VolumeProperties(**vol_props) + spec_obj.volume_properties = vol_props + + if 'volume' in spec: + vol = spec.pop('volume', {}) + vol.pop('name', None) + if vol is not None: + vol = objects.Volume(**vol) + spec_obj.volume = vol + + if 'volume_type' in spec: + vol_type = spec.pop('volume_type', {}) + if vol_type is not None: + vol_type = objects.VolumeType(**vol_type) + spec_obj.volume_type = vol_type + + spec.pop('resource_properties', None) + + for k, v in spec.items(): + setattr(spec_obj, k, v) + + return spec_obj + + +@base.CinderObjectRegistry.register +class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat): + # Version 1.0: Initial version + # Version 1.1: Added group_id and group_type_id + VERSION = '1.1' + + # TODO(dulek): We should add this to initially move volume_properites to + # ovo, but this should be removed as soon as possible. Most of the data + # here is already in request_spec and volume there. Outstanding ones would + # be reservation, and qos_specs. First one may be moved to request_spec and + # second added as relationship in volume_type field and whole + # volume_properties (and resource_properties) in request_spec won't be + # needed. + + fields = { + 'attach_status': fields.StringField(nullable=True), + 'availability_zone': fields.StringField(nullable=True), + 'cgsnapshot_id': fields.UUIDField(nullable=True), + 'consistencygroup_id': fields.UUIDField(nullable=True), + 'group_id': fields.UUIDField(nullable=True), + 'display_description': fields.StringField(nullable=True), + 'display_name': fields.StringField(nullable=True), + 'encryption_key_id': fields.UUIDField(nullable=True), + 'metadata': fields.DictOfStringsField(nullable=True), + 'multiattach': fields.BooleanField(nullable=True), + 'project_id': fields.StringField(nullable=True), + 'qos_specs': fields.DictOfStringsField(nullable=True), + 'replication_status': fields.StringField(nullable=True), + 'reservations': fields.ListOfStringsField(nullable=True), + 'size': fields.IntegerField(nullable=True), + 'snapshot_id': fields.UUIDField(nullable=True), + 'source_replicaid': fields.UUIDField(nullable=True), + 'source_volid': fields.UUIDField(nullable=True), + 'status': fields.StringField(nullable=True), + 'user_id': fields.StringField(nullable=True), + 'volume_type_id': fields.UUIDField(nullable=True), + 'group_type_id': fields.UUIDField(nullable=True), + } diff --git a/cinder/objects/service.py b/cinder/objects/service.py index d64f21be6..aeae75b61 100644 --- a/cinder/objects/service.py +++ b/cinder/objects/service.py @@ -25,18 +25,24 @@ from cinder.objects import fields as c_fields @base.CinderObjectRegistry.register class Service(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, - base.CinderComparableObject): + base.CinderObjectDictCompat, base.CinderComparableObject, + base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Add rpc_current_version and object_current_version fields # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version() # Version 1.3: Add replication fields - VERSION = '1.3' + # Version 1.4: Add cluster fields + VERSION = '1.4' + + OPTIONAL_FIELDS = ('cluster',) fields = { 'id': fields.IntegerField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), + 'cluster_name': fields.StringField(nullable=True), + 'cluster': fields.ObjectField('Cluster', nullable=True, + read_only=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(default=0), 'disabled': fields.BooleanField(default=False, nullable=True), @@ -54,9 +60,23 @@ class Service(base.CinderPersistentObject, base.CinderObject, 'active_backend_id': fields.StringField(nullable=True), } + def obj_make_compatible(self, primitive, target_version): + """Make a service representation compatible with a target version.""" + # Convert all related objects + super(Service, self).obj_make_compatible(primitive, target_version) + + target_version = versionutils.convert_version_to_tuple(target_version) + # Before v1.4 we didn't have cluster fields so we have to remove them. + if target_version < (1, 4): + for obj_field in ('cluster', 'cluster_name'): + primitive.pop(obj_field, None) + @staticmethod - def _from_db_object(context, service, db_service): + def _from_db_object(context, service, db_service, expected_attrs=None): + expected_attrs = expected_attrs or [] for name, field in service.fields.items(): + if name in Service.OPTIONAL_FIELDS: + continue value = db_service.get(name) if isinstance(field, fields.IntegerField): value = value or 0 @@ -65,17 +85,49 @@ class Service(base.CinderPersistentObject, base.CinderObject, service[name] = value service._context = context + if 'cluster' in expected_attrs: + db_cluster = db_service.get('cluster') + # If this service doesn't belong to a cluster the cluster field in + # the ORM instance will have value of None. + if db_cluster: + service.cluster = objects.Cluster(context) + objects.Cluster._from_db_object(context, service.cluster, + db_cluster) + else: + service.cluster = None + service.obj_reset_changes() return service + def obj_load_attr(self, attrname): + if attrname not in self.OPTIONAL_FIELDS: + raise exception.ObjectActionError( + action='obj_load_attr', + reason=_('attribute %s not lazy-loadable') % attrname) + if not self._context: + raise exception.OrphanedObjectError(method='obj_load_attr', + objtype=self.obj_name()) + + # NOTE(geguileo): We only have 1 optional field, so we don't need to + # confirm that we are loading the cluster. + # If this service doesn't belong to a cluster (cluster_name is empty), + # then cluster field will be None. + if self.cluster_name: + self.cluster = objects.Cluster.get_by_id(self._context, + name=self.cluster_name) + else: + self.cluster = None + self.obj_reset_changes(fields=(attrname,)) + @classmethod def get_by_host_and_topic(cls, context, host, topic): - db_service = db.service_get_by_host_and_topic(context, host, topic) + db_service = db.service_get(context, disabled=False, host=host, + topic=topic) return cls._from_db_object(context, cls(context), db_service) @classmethod def get_by_args(cls, context, host, binary_key): - db_service = db.service_get_by_args(context, host, binary_key) + db_service = db.service_get(context, host=host, binary=binary_key) return cls._from_db_object(context, cls(context), db_service) def create(self): @@ -83,18 +135,26 @@ class Service(base.CinderPersistentObject, base.CinderObject, raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() + if 'cluster' in updates: + raise exception.ObjectActionError( + action='create', reason=_('cluster assigned')) db_service = db.service_create(self._context, updates) self._from_db_object(self._context, self, db_service) def save(self): updates = self.cinder_obj_get_changes() + if 'cluster' in updates: + raise exception.ObjectActionError( + action='save', reason=_('cluster changed')) if updates: db.service_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): - db.service_destroy(self._context, self.id) + updated_values = db.service_destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) @classmethod def _get_minimum_version(cls, attribute, context, binary): @@ -104,12 +164,13 @@ class Service(base.CinderPersistentObject, base.CinderObject, for s in services: ver_str = getattr(s, attribute) if ver_str is None: - # FIXME(dulek) None in *_current_version means that this - # service is in Liberty version, so we must assume this is the - # lowest one. We use handy and easy to remember token to - # indicate that. This may go away as soon as we drop - # compatibility with Liberty, possibly in early N. - return 'liberty' + # NOTE(dulek) None in *_current_version means that this + # service is in Liberty version, which we now don't provide + # backward compatibility to. + msg = _('One of the services is in Liberty version. We do not ' + 'provide backward compatibility with Liberty now, you ' + 'need to upgrade to Mitaka first.') + raise exception.ServiceTooOld(msg) ver = versionutils.convert_version_to_int(ver_str) if min_ver is None or ver < min_ver: min_ver = ver @@ -122,7 +183,7 @@ class Service(base.CinderPersistentObject, base.CinderObject, return cls._get_minimum_version('rpc_current_version', context, binary) @classmethod - def get_minimum_obj_version(cls, context, binary): + def get_minimum_obj_version(cls, context, binary=None): return cls._get_minimum_version('object_current_version', context, binary) @@ -139,20 +200,19 @@ class ServiceList(base.ObjectListBase, base.CinderObject): @classmethod def get_all(cls, context, filters=None): - services = db.service_get_all(context, filters) + services = db.service_get_all(context, **(filters or {})) return base.obj_make_list(context, cls(context), objects.Service, services) @classmethod def get_all_by_topic(cls, context, topic, disabled=None): - services = db.service_get_all_by_topic(context, topic, - disabled=disabled) + services = db.service_get_all(context, topic=topic, disabled=disabled) return base.obj_make_list(context, cls(context), objects.Service, services) @classmethod def get_all_by_binary(cls, context, binary, disabled=None): - services = db.service_get_all_by_binary(context, binary, - disabled=disabled) + services = db.service_get_all(context, binary=binary, + disabled=disabled) return base.obj_make_list(context, cls(context), objects.Service, services) diff --git a/cinder/objects/snapshot.py b/cinder/objects/snapshot.py index ebcfc7dfc..f74e24759 100644 --- a/cinder/objects/snapshot.py +++ b/cinder/objects/snapshot.py @@ -36,7 +36,7 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, # NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They # are typically the relationship in the sqlalchemy object. - OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot') + OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot', 'group_snapshot') fields = { 'id': fields.UUIDField(), @@ -46,6 +46,7 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, 'volume_id': fields.UUIDField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), + 'group_snapshot_id': fields.UUIDField(nullable=True), 'status': c_fields.SnapshotStatusField(nullable=True), 'progress': fields.StringField(nullable=True), 'volume_size': fields.IntegerField(nullable=True), @@ -63,10 +64,15 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, 'volume': fields.ObjectField('Volume', nullable=True), 'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True), + 'group_snapshot': fields.ObjectField('GroupSnapshot', nullable=True), } + @property + def service_topic_queue(self): + return self.volume.service_topic_queue + @classmethod - def _get_expected_attrs(cls, context): + def _get_expected_attrs(cls, context, *args, **kwargs): return 'metadata', # NOTE(thangp): obj_extra_fields is used to hold properties that are not @@ -129,6 +135,12 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, cgsnapshot._from_db_object(context, cgsnapshot, db_snapshot['cgsnapshot']) snapshot.cgsnapshot = cgsnapshot + if 'group_snapshot' in expected_attrs: + group_snapshot = objects.GroupSnapshot(context) + group_snapshot._from_db_object(context, group_snapshot, + db_snapshot['group_snapshot']) + snapshot.group_snapshot = group_snapshot + if 'metadata' in expected_attrs: metadata = db_snapshot.get('snapshot_metadata') if metadata is None: @@ -151,6 +163,13 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, if 'cgsnapshot' in updates: raise exception.ObjectActionError(action='create', reason=_('cgsnapshot assigned')) + if 'cluster' in updates: + raise exception.ObjectActionError( + action='create', reason=_('cluster assigned')) + if 'group_snapshot' in updates: + raise exception.ObjectActionError( + action='create', + reason=_('group_snapshot assigned')) db_snapshot = db.snapshot_create(self._context, updates) self._from_db_object(self._context, self, db_snapshot) @@ -164,6 +183,13 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, if 'cgsnapshot' in updates: raise exception.ObjectActionError( action='save', reason=_('cgsnapshot changed')) + if 'group_snapshot' in updates: + raise exception.ObjectActionError( + action='save', reason=_('group_snapshot changed')) + + if 'cluster' in updates: + raise exception.ObjectActionError( + action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the @@ -178,7 +204,9 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, self.obj_reset_changes() def destroy(self): - db.snapshot_destroy(self._context, self.id) + updated_values = db.snapshot_destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: @@ -197,6 +225,11 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject, self.cgsnapshot = objects.CGSnapshot.get_by_id(self._context, self.cgsnapshot_id) + if attrname == 'group_snapshot': + self.group_snapshot = objects.GroupSnapshot.get_by_id( + self._context, + self.group_snapshot_id) + self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, context, key): @@ -271,3 +304,11 @@ class SnapshotList(base.ObjectListBase, base.CinderObject): expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) + + @classmethod + def get_all_for_group_snapshot(cls, context, group_snapshot_id): + snapshots = db.snapshot_get_all_for_group_snapshot( + context, group_snapshot_id) + expected_attrs = Snapshot._get_expected_attrs(context) + return base.obj_make_list(context, cls(context), objects.Snapshot, + snapshots, expected_attrs=expected_attrs) diff --git a/cinder/objects/volume.py b/cinder/objects/volume.py index 7a62f26ec..f1aa619e0 100644 --- a/cinder/objects/volume.py +++ b/cinder/objects/volume.py @@ -49,17 +49,20 @@ class MetadataObject(dict): @base.CinderObjectRegistry.register class Volume(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.CinderComparableObject): + base.CinderObjectDictCompat, base.CinderComparableObject, + base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added metadata, admin_metadata, volume_attachment, and # volume_type # Version 1.2: Added glance_metadata, consistencygroup and snapshots # Version 1.3: Added finish_volume_migration() - VERSION = '1.3' + # Version 1.4: Added cluster fields + # Version 1.5: Added group + VERSION = '1.5' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', - 'snapshots') + 'snapshots', 'cluster', 'group') fields = { 'id': fields.UUIDField(), @@ -70,6 +73,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject, 'snapshot_id': fields.UUIDField(nullable=True), + 'cluster_name': fields.StringField(nullable=True), + 'cluster': fields.ObjectField('Cluster', nullable=True, + read_only=True), 'host': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'availability_zone': fields.StringField(nullable=True), @@ -94,6 +100,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject, 'encryption_key_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), + 'group_id': fields.UUIDField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), 'bootable': fields.BooleanField(default=False, nullable=True), @@ -114,6 +121,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject, 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), + 'group': fields.ObjectField('Group', nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not @@ -122,7 +130,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject, 'volume_admin_metadata', 'volume_glance_metadata'] @classmethod - def _get_expected_attrs(cls, context): + def _get_expected_attrs(cls, context, *args, **kwargs): expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs'] if context.is_admin: expected_attrs.append('admin_metadata') @@ -221,9 +229,15 @@ class Volume(base.CinderPersistentObject, base.CinderObject, return changes def obj_make_compatible(self, primitive, target_version): - """Make an object representation compatible with a target version.""" + """Make a Volume representation compatible with a target version.""" + # Convert all related objects super(Volume, self).obj_make_compatible(primitive, target_version) + target_version = versionutils.convert_version_to_tuple(target_version) + # Before v1.4 we didn't have cluster fields so we have to remove them. + if target_version < (1, 4): + for obj_field in ('cluster', 'cluster_name'): + primitive.pop(obj_field, None) @classmethod def _from_db_object(cls, context, volume, db_volume, expected_attrs=None): @@ -277,6 +291,22 @@ class Volume(base.CinderPersistentObject, base.CinderObject, objects.Snapshot, db_volume['snapshots']) volume.snapshots = snapshots + if 'cluster' in expected_attrs: + db_cluster = db_volume.get('cluster') + # If this volume doesn't belong to a cluster the cluster field in + # the ORM instance will have value of None. + if db_cluster: + volume.cluster = objects.Cluster(context) + objects.Cluster._from_db_object(context, volume.cluster, + db_cluster) + else: + volume.cluster = None + if 'group' in expected_attrs: + group = objects.Group(context) + group._from_db_object(context, + group, + db_volume['group']) + volume.group = group volume._context = context volume.obj_reset_changes() @@ -294,6 +324,12 @@ class Volume(base.CinderPersistentObject, base.CinderObject, if 'snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('snapshots assigned')) + if 'cluster' in updates: + raise exception.ObjectActionError( + action='create', reason=_('cluster assigned')) + if 'group' in updates: + raise exception.ObjectActionError( + action='create', reason=_('group assigned')) db_volume = db.volume_create(self._context, updates) self._from_db_object(self._context, self, db_volume) @@ -304,12 +340,18 @@ class Volume(base.CinderPersistentObject, base.CinderObject, if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) + if 'group' in updates: + raise exception.ObjectActionError( + action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) + if 'cluster' in updates: + raise exception.ObjectActionError( + action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted @@ -327,7 +369,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject, def destroy(self): with self.obj_as_admin(): - db.volume_destroy(self._context, self.id) + updated_values = db.volume_destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: @@ -373,6 +417,18 @@ class Volume(base.CinderPersistentObject, base.CinderObject, elif attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_volume( self._context, self.id) + elif attrname == 'cluster': + # If this volume doesn't belong to a cluster (cluster_name is + # empty), then cluster field will be None. + if self.cluster_name: + self.cluster = objects.Cluster.get_by_id( + self._context, name=self.cluster_name) + else: + self.cluster = None + elif attrname == 'group': + group = objects.Group.get_by_id( + self._context, self.group_id) + self.group = group self.obj_reset_changes(fields=[attrname]) @@ -426,6 +482,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject, setattr(self, key, value) setattr(dest_volume, key, value_to_dst) + self.save() dest_volume.save() return dest_volume @@ -438,8 +495,27 @@ class VolumeList(base.ObjectListBase, base.CinderObject): 'objects': fields.ListOfObjectsField('Volume'), } + @staticmethod + def include_in_cluster(context, cluster, partial_rename=True, **filters): + """Include all volumes matching the filters into a cluster. + + When partial_rename is set we will not set the cluster_name with + cluster parameter value directly, we'll replace provided cluster_name + or host filter value with cluster instead. + + This is useful when we want to replace just the cluster name but leave + the backend and pool information as it is. If we are using + cluster_name to filter, we'll use that same DB field to replace the + cluster value and leave the rest as it is. Likewise if we use the host + to filter. + + Returns the number of volumes that have been changed. + """ + return db.volume_include_in_cluster(context, cluster, partial_rename, + **filters) + @classmethod - def _get_expected_attrs(cls, context): + def _get_expected_attrs(cls, context, *args, **kwargs): expected_attrs = ['metadata', 'volume_type'] if context.is_admin: expected_attrs.append('admin_metadata') @@ -465,11 +541,21 @@ class VolumeList(base.ObjectListBase, base.CinderObject): @classmethod def get_all_by_group(cls, context, group_id, filters=None): + # Consistency group volumes = db.volume_get_all_by_group(context, group_id, filters) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) + @classmethod + def get_all_by_generic_group(cls, context, group_id, filters=None): + # Generic volume group + volumes = db.volume_get_all_by_generic_group(context, group_id, + filters) + expected_attrs = cls._get_expected_attrs(context) + return base.obj_make_list(context, cls(context), objects.Volume, + volumes, expected_attrs=expected_attrs) + @classmethod def get_all_by_project(cls, context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, @@ -481,3 +567,13 @@ class VolumeList(base.ObjectListBase, base.CinderObject): expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) + + @classmethod + def get_volume_summary_all(cls, context): + volumes = db.get_volume_summary_all(context) + return volumes + + @classmethod + def get_volume_summary_by_project(cls, context, project_id): + volumes = db.get_volume_summary_by_project(context, project_id) + return volumes diff --git a/cinder/objects/volume_type.py b/cinder/objects/volume_type.py index 7536ba1c8..0d033a98b 100644 --- a/cinder/objects/volume_type.py +++ b/cinder/objects/volume_type.py @@ -15,6 +15,7 @@ from oslo_utils import versionutils from oslo_versionedobjects import fields +from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects @@ -22,7 +23,7 @@ from cinder.objects import base from cinder.volume import volume_types -OPTIONAL_FIELDS = ['extra_specs', 'projects'] +OPTIONAL_FIELDS = ['extra_specs', 'projects', 'qos_specs'] @base.CinderObjectRegistry.register @@ -30,7 +31,8 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Changed extra_specs to DictOfNullableStringsField - VERSION = '1.1' + # Version 1.2: Added qos_specs + VERSION = '1.2' fields = { 'id': fields.UUIDField(), @@ -39,6 +41,8 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject, 'is_public': fields.BooleanField(default=True, nullable=True), 'projects': fields.ListOfStringsField(nullable=True), 'extra_specs': fields.DictOfNullableStringsField(nullable=True), + 'qos_specs': fields.ObjectField('QualityOfServiceSpecs', + nullable=True), } def obj_make_compatible(self, primitive, target_version): @@ -55,13 +59,13 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject, primitive['extra_specs'][k] = '' @classmethod - def _get_expected_attrs(cls, context): + def _get_expected_attrs(cls, context, *args, **kwargs): return 'extra_specs', 'projects' @staticmethod def _from_db_object(context, type, db_type, expected_attrs=None): if expected_attrs is None: - expected_attrs = [] + expected_attrs = ['extra_specs', 'projects'] for name, field in type.fields.items(): if name in OPTIONAL_FIELDS: continue @@ -82,7 +86,10 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject, type.extra_specs = specs if 'projects' in expected_attrs: type.projects = db_type.get('projects', []) - + if 'qos_specs' in expected_attrs: + qos_specs = objects.QualityOfServiceSpecs(context) + qos_specs._from_db_object(context, qos_specs, db_type['qos_specs']) + type.qos_specs = qos_specs type._context = context type.obj_reset_changes() return type @@ -106,7 +113,9 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject, def destroy(self): with self.obj_as_admin(): - volume_types.destroy(self._context, self.id) + updated_values = volume_types.destroy(self._context, self.id) + self.update(updated_values) + self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register @@ -130,3 +139,19 @@ class VolumeTypeList(base.ObjectListBase, base.CinderObject): return base.obj_make_list(context, cls(context), objects.VolumeType, types.values(), expected_attrs=expected_attrs) + + @classmethod + def get_all_types_for_qos(cls, context, qos_id): + types = db.qos_specs_associations_get(context, qos_id) + return base.obj_make_list(context, cls(context), objects.VolumeType, + types) + + @classmethod + def get_all_by_group(cls, context, group_id): + # Generic volume group + types = volume_types.get_all_types_by_group( + context.elevated(), group_id) + expected_attrs = VolumeType._get_expected_attrs(context) + return base.obj_make_list(context, cls(context), + objects.VolumeType, types, + expected_attrs=expected_attrs) diff --git a/cinder/opts.py b/cinder/opts.py index e8ea9cbfb..5b9cd8da0 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -41,9 +41,7 @@ from cinder.db import base as cinder_db_base from cinder import exception as cinder_exception from cinder.image import glance as cinder_image_glance from cinder.image import image_utils as cinder_image_imageutils -import cinder.keymgr from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr -from cinder.keymgr import key_mgr as cinder_keymgr_keymgr from cinder.message import api as cinder_message_api from cinder import quota as cinder_quota from cinder.scheduler import driver as cinder_scheduler_driver @@ -89,6 +87,8 @@ from cinder.volume.drivers.emc.vnx import common as \ from cinder.volume.drivers.emc import xtremio as \ cinder_volume_drivers_emc_xtremio from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx +from cinder.volume.drivers.falconstor import fss_common as \ + cinder_volume_drivers_falconstor_fsscommon from cinder.volume.drivers.fujitsu import eternus_dx_common as \ cinder_volume_drivers_fujitsu_eternusdxcommon from cinder.volume.drivers.fusionstorage import dsware as \ @@ -107,6 +107,8 @@ from cinder.volume.drivers.hitachi import hnas_iscsi as \ cinder_volume_drivers_hitachi_hnasiscsi from cinder.volume.drivers.hitachi import hnas_nfs as \ cinder_volume_drivers_hitachi_hnasnfs +from cinder.volume.drivers.hitachi import hnas_utils as \ + cinder_volume_drivers_hitachi_hnasutils from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ @@ -122,14 +124,14 @@ from cinder.volume.drivers.ibm import flashsystem_fc as \ from cinder.volume.drivers.ibm import flashsystem_iscsi as \ cinder_volume_drivers_ibm_flashsystemiscsi from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs +from cinder.volume.drivers.ibm import ibm_storage as \ + cinder_volume_drivers_ibm_ibmstorage from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi -from cinder.volume.drivers.ibm import xiv_ds8k as \ - cinder_volume_drivers_ibm_xivds8k from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli as \ cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli from cinder.volume.drivers.kaminario import kaminario_common as \ @@ -192,19 +194,15 @@ def list_opts(): return [ ('FC-ZONE-MANAGER', itertools.chain( - cinder_zonemanager_fczonemanager.zone_manager_opts, cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts, + cinder_zonemanager_fczonemanager.zone_manager_opts, cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts, )), - ('KEYMGR', - itertools.chain( - cinder_keymgr_keymgr.encryption_opts, - cinder.keymgr.keymgr_opts, - cinder_keymgr_confkeymgr.key_mgr_opts, - )), ('DEFAULT', itertools.chain( cinder_backup_driver.service_opts, + [cinder_cmd_volume.cluster_opt], + cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts, cinder_api_common.api_common_opts, cinder_backup_drivers_ceph.service_opts, cinder_volume_drivers_smbfs.volume_opts, @@ -224,6 +222,7 @@ def list_opts(): cinder_volume_drivers_netapp_options.netapp_eseries_opts, cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts, cinder_volume_drivers_netapp_options.netapp_san_opts, + cinder_volume_drivers_netapp_options.netapp_replication_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi. storwize_svc_iscsi_opts, cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts, @@ -265,7 +264,7 @@ def list_opts(): cinder_volume_drivers_pure.PURE_OPTS, cinder_context.context_opts, cinder_scheduler_driver.scheduler_driver_opts, - cinder_volume_drivers_scality.volume_opts, + cinder_volume_drivers_ibm_ibmstorage.driver_opts, cinder_volume_drivers_vmware_vmdk.vmdk_opts, cinder_volume_drivers_lenovo_lenovocommon.common_opts, cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, @@ -278,9 +277,11 @@ def list_opts(): cinder_scheduler_weights_volumenumber. volume_number_weight_opts, cinder_volume_drivers_coho.coho_opts, + cinder_volume_drivers_scality.volume_opts, cinder_volume_drivers_xio.XIO_OPTS, cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. storwize_svc_fc_opts, + cinder_volume_drivers_falconstor_fsscommon.FSS_OPTS, cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS, cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, @@ -349,7 +350,6 @@ def list_opts(): [cinder_volume_api.volume_host_opt], [cinder_volume_api.volume_same_az_opt], [cinder_volume_api.az_cache_time_opt], - cinder_volume_drivers_ibm_xivds8k.xiv_ds8k_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_datera.d_opts, cinder_volume_drivers_zadara.zadara_opts, @@ -372,6 +372,10 @@ def list_opts(): itertools.chain( cinder_coordination.coordination_opts, )), + ('KEY_MANAGER', + itertools.chain( + cinder_keymgr_confkeymgr.key_mgr_opts, + )), ('BACKEND', itertools.chain( [cinder_cmd_volume.host_opt], diff --git a/cinder/quota.py b/cinder/quota.py index 05438a669..df077d537 100644 --- a/cinder/quota.py +++ b/cinder/quota.py @@ -45,6 +45,9 @@ quota_opts = [ cfg.IntOpt('quota_consistencygroups', default=10, help='Number of consistencygroups allowed per project'), + cfg.IntOpt('quota_groups', + default=10, + help='Number of groups allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' @@ -1202,5 +1205,30 @@ class CGQuotaEngine(QuotaEngine): def register_resources(self, resources): raise NotImplementedError(_("Cannot register resources")) + +class GroupQuotaEngine(QuotaEngine): + """Represent the group quotas.""" + + @property + def resources(self): + """Fetches all possible quota resources.""" + + result = {} + # Global quotas. + argses = [('groups', '_sync_groups', + 'quota_groups'), ] + for args in argses: + resource = ReservableResource(*args) + result[resource.name] = resource + + return result + + def register_resource(self, resource): + raise NotImplementedError(_("Cannot register resource")) + + def register_resources(self, resources): + raise NotImplementedError(_("Cannot register resources")) + QUOTAS = VolumeTypeQuotaEngine() CGQUOTAS = CGQuotaEngine() +GROUP_QUOTAS = GroupQuotaEngine() diff --git a/cinder/quota_utils.py b/cinder/quota_utils.py index 6abfb7f8d..38c885515 100644 --- a/cinder/quota_utils.py +++ b/cinder/quota_utils.py @@ -12,8 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import webob - from oslo_config import cfg from oslo_log import log as logging @@ -38,12 +36,14 @@ class GenericProjectInfo(object): def __init__(self, project_id, project_keystone_api_version, project_parent_id=None, project_subtree=None, - project_parent_tree=None): + project_parent_tree=None, + is_admin_project=False): self.id = project_id self.keystone_api_version = project_keystone_api_version self.parent_id = project_parent_id self.subtree = project_subtree self.parents = project_parent_tree + self.is_admin_project = is_admin_project def get_volume_type_reservation(ctxt, volume, type_id, @@ -90,7 +90,7 @@ def _filter_domain_id_from_parents(domain_id, tree): def get_project_hierarchy(context, project_id, subtree_as_ids=False, - parents_as_ids=False): + parents_as_ids=False, is_admin_project=False): """A Helper method to get the project hierarchy. Along with hierarchical multitenancy in keystone API v3, projects can be @@ -99,28 +99,26 @@ def get_project_hierarchy(context, project_id, subtree_as_ids=False, If the domain is being used as the top most parent, it is filtered out from the parent tree and parent_id. """ - try: - keystone = _keystone_client(context) - generic_project = GenericProjectInfo(project_id, keystone.version) - if keystone.version == 'v3': - project = keystone.projects.get(project_id, - subtree_as_ids=subtree_as_ids, - parents_as_ids=parents_as_ids) + keystone = _keystone_client(context) + generic_project = GenericProjectInfo(project_id, keystone.version) + if keystone.version == 'v3': + project = keystone.projects.get(project_id, + subtree_as_ids=subtree_as_ids, + parents_as_ids=parents_as_ids) - generic_project.parent_id = None - if project.parent_id != project.domain_id: - generic_project.parent_id = project.parent_id + generic_project.parent_id = None + if project.parent_id != project.domain_id: + generic_project.parent_id = project.parent_id - generic_project.subtree = ( - project.subtree if subtree_as_ids else None) + generic_project.subtree = ( + project.subtree if subtree_as_ids else None) - generic_project.parents = None - if parents_as_ids: - generic_project.parents = _filter_domain_id_from_parents( - project.domain_id, project.parents) - except exceptions.NotFound: - msg = (_("Tenant ID: %s does not exist.") % project_id) - raise webob.exc.HTTPNotFound(explanation=msg) + generic_project.parents = None + if parents_as_ids: + generic_project.parents = _filter_domain_id_from_parents( + project.domain_id, project.parents) + + generic_project.is_admin_project = is_admin_project return generic_project diff --git a/cinder/rpc.py b/cinder/rpc.py index 00043b946..4b1950e43 100644 --- a/cinder/rpc.py +++ b/cinder/rpc.py @@ -191,62 +191,45 @@ class RPCAPI(object): def __init__(self): target = messaging.Target(topic=self.TOPIC, version=self.RPC_API_VERSION) - obj_version_cap = self._determine_obj_version_cap() + obj_version_cap = self.determine_obj_version_cap() serializer = base.CinderObjectSerializer(obj_version_cap) - rpc_version_cap = self._determine_rpc_version_cap() + rpc_version_cap = self.determine_rpc_version_cap() self.client = get_client(target, version_cap=rpc_version_cap, serializer=serializer) - def _determine_rpc_version_cap(self): + @classmethod + def determine_rpc_version_cap(cls): global LAST_RPC_VERSIONS - if self.BINARY in LAST_RPC_VERSIONS: - return LAST_RPC_VERSIONS[self.BINARY] + if cls.BINARY in LAST_RPC_VERSIONS: + return LAST_RPC_VERSIONS[cls.BINARY] version_cap = objects.Service.get_minimum_rpc_version( - cinder.context.get_admin_context(), self.BINARY) - if version_cap == 'liberty': - # NOTE(dulek): This means that one of the services is Liberty, - # we should cap to it's RPC version. - version_cap = LIBERTY_RPC_VERSIONS[self.BINARY] - elif not version_cap: + cinder.context.get_admin_context(), cls.BINARY) + if not version_cap: # If there is no service we assume they will come up later and will # have the same version as we do. - version_cap = self.RPC_API_VERSION + version_cap = cls.RPC_API_VERSION LOG.info(_LI('Automatically selected %(binary)s RPC version ' '%(version)s as minimum service version.'), - {'binary': self.BINARY, 'version': version_cap}) - LAST_RPC_VERSIONS[self.BINARY] = version_cap + {'binary': cls.BINARY, 'version': version_cap}) + LAST_RPC_VERSIONS[cls.BINARY] = version_cap return version_cap - def _determine_obj_version_cap(self): + @classmethod + def determine_obj_version_cap(cls): global LAST_OBJ_VERSIONS - if self.BINARY in LAST_OBJ_VERSIONS: - return LAST_OBJ_VERSIONS[self.BINARY] + if cls.BINARY in LAST_OBJ_VERSIONS: + return LAST_OBJ_VERSIONS[cls.BINARY] version_cap = objects.Service.get_minimum_obj_version( - cinder.context.get_admin_context(), self.BINARY) + cinder.context.get_admin_context(), cls.BINARY) # If there is no service we assume they will come up later and will # have the same version as we do. if not version_cap: version_cap = base.OBJ_VERSIONS.get_current() LOG.info(_LI('Automatically selected %(binary)s objects version ' '%(version)s as minimum service version.'), - {'binary': self.BINARY, 'version': version_cap}) - LAST_OBJ_VERSIONS[self.BINARY] = version_cap + {'binary': cls.BINARY, 'version': version_cap}) + LAST_OBJ_VERSIONS[cls.BINARY] = version_cap return version_cap - - -# FIXME(dulek): Liberty haven't reported its RPC versions, so we need to have -# them hardcoded. This dict may go away as soon as we drop compatibility with -# L, which should be in early N. -# -# This is the only time we need to have such dictionary. We don't need to add -# similar ones for any release following Liberty. -LIBERTY_RPC_VERSIONS = { - 'cinder-volume': '1.30', - 'cinder-scheduler': '1.8', - # NOTE(dulek) backup.manager had specified version '1.2', but backup.rpcapi - # was really only sending messages up to '1.1'. - 'cinder-backup': '1.1', -} diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py index 94d47f147..ac7bf61f1 100644 --- a/cinder/scheduler/driver.py +++ b/cinder/scheduler/driver.py @@ -66,6 +66,16 @@ def group_update_db(context, group, host): return group +def generic_group_update_db(context, group, host): + """Set the host and the scheduled_at field of a group. + + :returns: A Group with the updated fields set properly. + """ + group.update({'host': host, 'updated_at': timeutils.utcnow()}) + group.save() + return group + + class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" @@ -93,7 +103,8 @@ class Scheduler(object): host, capabilities) - def host_passes_filters(self, context, volume_id, host, filter_properties): + def host_passes_filters(self, context, host, request_spec, + filter_properties): """Check if the specified host passes the filters.""" raise NotImplementedError(_("Must implement host_passes_filters")) @@ -117,6 +128,15 @@ class Scheduler(object): raise NotImplementedError(_( "Must implement schedule_create_consistencygroup")) + def schedule_create_group(self, context, group, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_( + "Must implement schedule_create_group")) + def get_pools(self, context, filters): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 5db592c6c..7c7e8af18 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -22,6 +22,7 @@ Weighing Functions. from oslo_config import cfg from oslo_log import log as logging +from oslo_serialization import jsonutils from cinder import exception from cinder.i18n import _, _LE, _LW @@ -80,6 +81,28 @@ class FilterScheduler(driver.Scheduler): self.volume_rpcapi.create_consistencygroup(context, updated_group, host) + def schedule_create_group(self, context, group, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list): + weighed_host = self._schedule_generic_group( + context, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list) + + if not weighed_host: + raise exception.NoValidHost(reason=_("No weighed hosts available")) + + host = weighed_host.obj.host + + updated_group = driver.generic_group_update_db(context, group, host) + + self.volume_rpcapi.create_group(context, + updated_group, host) + def schedule_create_volume(self, context, request_spec, filter_properties): weighed_host = self._schedule(context, request_spec, filter_properties) @@ -257,27 +280,22 @@ class FilterScheduler(driver.Scheduler): """ elevated = context.elevated() - volume_properties = request_spec['volume_properties'] # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. - resource_properties = volume_properties.copy() - volume_type = request_spec.get("volume_type", None) - resource_type = request_spec.get("volume_type", None) - request_spec.update({'resource_properties': resource_properties}) + volume_type = resource_type = request_spec.get("volume_type") config_options = self._get_configuration_options() if filter_properties is None: filter_properties = {} - self._populate_retry(filter_properties, resource_properties) + self._populate_retry(filter_properties, + request_spec['volume_properties']) - if resource_type is None: - msg = _("volume_type cannot be None") - raise exception.InvalidVolumeType(reason=msg) + request_spec_dict = jsonutils.to_primitive(request_spec) filter_properties.update({'context': context, - 'request_spec': request_spec, + 'request_spec': request_spec_dict, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) @@ -288,7 +306,8 @@ class FilterScheduler(driver.Scheduler): # If multiattach is enabled on a volume, we need to add # multiattach to extra specs, so that the capability # filtering is enabled. - multiattach = volume_properties.get('multiattach', False) + multiattach = request_spec['volume_properties'].get('multiattach', + False) if multiattach and 'multiattach' not in resource_type.get( 'extra_specs', {}): if 'extra_specs' not in resource_type: @@ -410,23 +429,199 @@ class FilterScheduler(driver.Scheduler): return weighed_hosts + def _get_weighted_candidates_generic_group( + self, context, group_spec, request_spec_list, + group_filter_properties=None, + filter_properties_list=None): + """Finds hosts that supports the group. + + Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + hosts_by_group_type = self._get_weighted_candidates_by_group_type( + context, group_spec, group_filter_properties) + + weighed_hosts = [] + hosts_by_vol_type = [] + index = 0 + for request_spec in request_spec_list: + volume_properties = request_spec['volume_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, + # copying 'volume_XX' to 'resource_XX' will make both filters + # happy. + resource_properties = volume_properties.copy() + volume_type = request_spec.get("volume_type", None) + resource_type = request_spec.get("volume_type", None) + request_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + filter_properties = {} + if filter_properties_list: + filter_properties = filter_properties_list[index] + if filter_properties is None: + filter_properties = {} + self._populate_retry(filter_properties, resource_properties) + + # Add group_support in extra_specs if it is not there. + # Make sure it is populated in filter_properties + # if 'group_support' not in resource_type.get( + # 'extra_specs', {}): + # resource_type['extra_specs'].update( + # group_support=' True') + + filter_properties.update({'context': context, + 'request_spec': request_spec, + 'config_options': config_options, + 'volume_type': volume_type, + 'resource_type': resource_type}) + + self.populate_filter_properties(request_spec, + filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + all_hosts = self.host_manager.get_all_host_states(elevated) + if not all_hosts: + return [] + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(all_hosts, + filter_properties) + + if not hosts: + return [] + + LOG.debug("Filtered %s", hosts) + + # weighted_host = WeightedHost() ... the best + # host for the job. + temp_weighed_hosts = self.host_manager.get_weighed_hosts( + hosts, + filter_properties) + if not temp_weighed_hosts: + return [] + if index == 0: + hosts_by_vol_type = temp_weighed_hosts + else: + hosts_by_vol_type = self._find_valid_hosts( + hosts_by_vol_type, temp_weighed_hosts) + if not hosts_by_vol_type: + return [] + + index += 1 + + # Find hosts selected by both the group type and volume types. + weighed_hosts = self._find_valid_hosts(hosts_by_vol_type, + hosts_by_group_type) + + return weighed_hosts + + def _find_valid_hosts(self, host_list1, host_list2): + new_hosts = [] + for host1 in host_list1: + for host2 in host_list2: + # Should schedule creation of group on backend level, + # not pool level. + if (utils.extract_host(host1.obj.host) == + utils.extract_host(host2.obj.host)): + new_hosts.append(host1) + if not new_hosts: + return [] + return new_hosts + + def _get_weighted_candidates_by_group_type( + self, context, group_spec, + group_filter_properties=None): + """Finds hosts that supports the group type. + + Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + weighed_hosts = [] + volume_properties = group_spec['volume_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, + # copying 'volume_XX' to 'resource_XX' will make both filters + # happy. + resource_properties = volume_properties.copy() + group_type = group_spec.get("group_type", None) + resource_type = group_spec.get("group_type", None) + group_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + if group_filter_properties is None: + group_filter_properties = {} + self._populate_retry(group_filter_properties, resource_properties) + + group_filter_properties.update({'context': context, + 'request_spec': group_spec, + 'config_options': config_options, + 'group_type': group_type, + 'resource_type': resource_type}) + + self.populate_filter_properties(group_spec, + group_filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + all_hosts = self.host_manager.get_all_host_states(elevated) + if not all_hosts: + return [] + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(all_hosts, + group_filter_properties) + + if not hosts: + return [] + + LOG.debug("Filtered %s", hosts) + + # weighted_host = WeightedHost() ... the best + # host for the job. + weighed_hosts = self.host_manager.get_weighed_hosts( + hosts, + group_filter_properties) + if not weighed_hosts: + return [] + + return weighed_hosts + def _schedule(self, context, request_spec, filter_properties=None): weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) # When we get the weighed_hosts, we clear those hosts whose backend # is not same as consistencygroup's backend. - CG_backend = request_spec.get('CG_backend') - if weighed_hosts and CG_backend: + if request_spec.get('CG_backend'): + group_backend = request_spec.get('CG_backend') + else: + group_backend = request_spec.get('group_backend') + if weighed_hosts and group_backend: # Get host name including host@backend#pool info from # weighed_hosts. for host in weighed_hosts[::-1]: backend = utils.extract_host(host.obj.host) - if backend != CG_backend: + if backend != group_backend: weighed_hosts.remove(host) if not weighed_hosts: LOG.warning(_LW('No weighed hosts found for volume ' 'with properties: %s'), - filter_properties['request_spec']['volume_type']) + filter_properties['request_spec'].get('volume_type')) return None return self._choose_top_host(weighed_hosts, request_spec) @@ -440,6 +635,19 @@ class FilterScheduler(driver.Scheduler): return None return self._choose_top_host_group(weighed_hosts, request_spec_list) + def _schedule_generic_group(self, context, group_spec, request_spec_list, + group_filter_properties=None, + filter_properties_list=None): + weighed_hosts = self._get_weighted_candidates_generic_group( + context, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list) + if not weighed_hosts: + return None + return self._choose_top_host_generic_group(weighed_hosts) + def _choose_top_host(self, weighed_hosts, request_spec): top_host = weighed_hosts[0] host_state = top_host.obj @@ -453,3 +661,9 @@ class FilterScheduler(driver.Scheduler): host_state = top_host.obj LOG.debug("Choosing %s", host_state.host) return top_host + + def _choose_top_host_generic_group(self, weighed_hosts): + top_host = weighed_hosts[0] + host_state = top_host.obj + LOG.debug("Choosing %s", host_state.host) + return top_host diff --git a/cinder/scheduler/filters/capabilities_filter.py b/cinder/scheduler/filters/capabilities_filter.py index e6e3c2d51..5d64b150c 100644 --- a/cinder/scheduler/filters/capabilities_filter.py +++ b/cinder/scheduler/filters/capabilities_filter.py @@ -30,6 +30,10 @@ class CapabilitiesFilter(filters.BaseHostFilter): Check that the capabilities provided by the services satisfy the extra specs associated with the resource type. """ + + if not resource_type: + return True + extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py index b0ba35972..79df4f40b 100644 --- a/cinder/scheduler/filters/capacity_filter.py +++ b/cinder/scheduler/filters/capacity_filter.py @@ -81,11 +81,22 @@ class CapacityFilter(filters.BaseHostFilter): "requested": volume_size, "available": free} + # NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs, + # we will not use max_over_subscription_ratio and + # provisioned_capacity_gb to determine whether a volume can be + # provisioned. Instead free capacity will be used to evaluate. + thin = True + vol_type = filter_properties.get('volume_type', {}) or {} + provision_type = vol_type.get('extra_specs', {}).get( + 'provisioning:type') + if provision_type == 'thick': + thin = False + # Only evaluate using max_over_subscription_ratio if # thin_provisioning_support is True. Check if the ratio of # provisioned capacity over total capacity has exceeded over # subscription ratio. - if (host_state.thin_provisioning_support and + if (thin and host_state.thin_provisioning_support and host_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((host_state.provisioned_capacity_gb + volume_size) / total) @@ -110,7 +121,7 @@ class CapacityFilter(filters.BaseHostFilter): adjusted_free_virtual = ( free * host_state.max_over_subscription_ratio) return adjusted_free_virtual >= volume_size - elif host_state.thin_provisioning_support: + elif thin and host_state.thin_provisioning_support: LOG.warning(_LW("Filtering out host %(host)s with an invalid " "maximum over subscription ratio of " "%(oversub_ratio).2f. The ratio should be a " diff --git a/cinder/scheduler/flows/create_volume.py b/cinder/scheduler/flows/create_volume.py index 2a8797557..c5f81eca1 100644 --- a/cinder/scheduler/flows/create_volume.py +++ b/cinder/scheduler/flows/create_volume.py @@ -43,7 +43,7 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask): **kwargs) self.db_api = db_api - def _populate_request_spec(self, context, volume, snapshot_id, + def _populate_request_spec(self, volume, snapshot_id, image_id): # Create the full request spec using the volume object. # @@ -69,7 +69,7 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask): image_id): # For RPC version < 1.2 backward compatibility if request_spec is None: - request_spec = self._populate_request_spec(context, volume.id, + request_spec = self._populate_request_spec(volume.id, snapshot_id, image_id) return { 'request_spec': request_spec, @@ -120,7 +120,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask): "payload %(payload)s"), {'topic': self.FAILURE_TOPIC, 'payload': payload}) - def execute(self, context, request_spec, filter_properties): + def execute(self, context, request_spec, filter_properties, volume): try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) @@ -141,9 +141,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask): try: self._handle_failure(context, request_spec, e) finally: - common.error_out_volume(context, self.db_api, - request_spec['volume_id'], - reason=e) + common.error_out(volume, reason=e) def get_flow(context, db_api, driver_api, request_spec=None, diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index 00a2e8781..c5e7b2da8 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -21,15 +21,16 @@ import collections from oslo_config import cfg from oslo_log import log as logging +from oslo_utils import importutils from oslo_utils import timeutils +from cinder.common import constants from cinder import context as cinder_context from cinder import exception from cinder import objects from cinder import utils from cinder.i18n import _LI, _LW from cinder.scheduler import filters -from cinder.scheduler import weights from cinder.volume import utils as vol_utils @@ -46,7 +47,11 @@ host_manager_opts = [ default=[ 'CapacityWeigher' ], - help='Which weigher class names to use for weighing hosts.') + help='Which weigher class names to use for weighing hosts.'), + cfg.StrOpt('scheduler_weight_handler', + default='cinder.scheduler.weights.OrderedHostWeightHandler', + help='Which handler to use for selecting the host/pool ' + 'after weighing'), ] CONF = cfg.CONF @@ -347,8 +352,9 @@ class HostManager(object): self.filter_handler = filters.HostFilterHandler('cinder.scheduler.' 'filters') self.filter_classes = self.filter_handler.get_all_classes() - self.weight_handler = weights.HostWeightHandler('cinder.scheduler.' - 'weights') + self.weight_handler = importutils.import_object( + CONF.scheduler_weight_handler, + 'cinder.scheduler.weights') self.weight_classes = self.weight_handler.get_all_classes() self._no_capabilities_hosts = set() # Hosts having no capabilities @@ -451,7 +457,7 @@ class HostManager(object): def _update_host_state_map(self, context): # Get resource usage across the available volume nodes: - topic = CONF.volume_topic + topic = constants.VOLUME_TOPIC volume_services = objects.ServiceList.get_all_by_topic(context, topic, disabled=False) diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index 86eec42b6..aa2b053a9 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -37,6 +37,7 @@ from cinder import objects from cinder import quota from cinder import rpc from cinder.scheduler.flows import create_volume +from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.volume import rpcapi as volume_rpcapi @@ -60,7 +61,7 @@ class SchedulerManager(manager.Manager): # create_consistencygroup(), create_volume(), migrate_volume_to_host(), # retype() and manage_existing() in v3.0 of RPC API. - RPC_API_VERSION = '2.1' + RPC_API_VERSION = scheduler_rpcapi.SchedulerAPI.RPC_API_VERSION target = messaging.Target(version=RPC_API_VERSION) @@ -123,6 +124,35 @@ class SchedulerManager(manager.Manager): group.status = 'error' group.save() + def create_group(self, context, topic, + group, + group_spec=None, + group_filter_properties=None, + request_spec_list=None, + filter_properties_list=None): + + self._wait_for_scheduler() + try: + self.driver.schedule_create_group( + context, group, + group_spec, + request_spec_list, + group_filter_properties, + filter_properties_list) + except exception.NoValidHost: + LOG.error(_LE("Could not find a host for group " + "%(group_id)s."), + {'group_id': group.id}) + group.status = 'error' + group.save() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_LE("Failed to create generic group " + "%(group_id)s."), + {'group_id': group.id}) + group.status = 'error' + group.save() + def create_volume(self, context, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None): @@ -135,6 +165,11 @@ class SchedulerManager(manager.Manager): # volume by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) + # FIXME(dulek): Remove this in v3.0 of RPC API. + if isinstance(request_spec, dict): + # We may receive request_spec as dict from older clients. + request_spec = objects.RequestSpec.from_primitives(request_spec) + try: flow_engine = create_volume.get_flow(context, db, self.driver, @@ -214,7 +249,7 @@ class SchedulerManager(manager.Manager): volume = objects.Volume.get_by_id(context, volume_id) def _retype_volume_set_error(self, context, ex, request_spec, - volume_ref, msg, reservations): + volume_ref, reservations, msg=None): if reservations: QUOTAS.rollback(context, reservations) previous_status = ( @@ -230,7 +265,7 @@ class SchedulerManager(manager.Manager): msg = _('New volume type not specified in request_spec.') ex = exception.ParameterNotFound(param='volume_type') _retype_volume_set_error(self, context, ex, request_spec, - volume, msg, reservations) + volume, reservations, msg) # Default migration policy is 'never' migration_policy = request_spec.get('migration_policy') @@ -241,14 +276,11 @@ class SchedulerManager(manager.Manager): tgt_host = self.driver.find_retype_host(context, request_spec, filter_properties, migration_policy) - except exception.NoValidHost as ex: - msg = (_("Could not find a host for volume %(volume_id)s with " - "type %(type_id)s.") % - {'type_id': new_type['id'], 'volume_id': volume.id}) - _retype_volume_set_error(self, context, ex, request_spec, - volume, msg, reservations) except Exception as ex: - with excutils.save_and_reraise_exception(): + # Not having a valid host is an expected exception, so we don't + # reraise on it. + reraise = not isinstance(ex, exception.NoValidHost) + with excutils.save_and_reraise_exception(reraise=reraise): _retype_volume_set_error(self, context, ex, request_spec, volume, None, reservations) else: diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py index 35020b05a..2b128bdc9 100644 --- a/cinder/scheduler/rpcapi.py +++ b/cinder/scheduler/rpcapi.py @@ -16,15 +16,12 @@ Client side of the scheduler manager RPC API. """ -from oslo_config import cfg from oslo_serialization import jsonutils +from cinder.common import constants from cinder import rpc -CONF = cfg.CONF - - class SchedulerAPI(rpc.RPCAPI): """Client side of the scheduler rpc API. @@ -53,10 +50,12 @@ class SchedulerAPI(rpc.RPCAPI): 2.0 - Remove 1.x compatibility 2.1 - Adds support for sending objects over RPC in manage_existing() + 2.2 - Sends request_spec as object in create_volume() + 2.3 - Add create_group method """ - RPC_API_VERSION = '2.1' - TOPIC = CONF.scheduler_topic + RPC_API_VERSION = '2.3' + TOPIC = constants.SCHEDULER_TOPIC BINARY = 'cinder-scheduler' # FIXME(caosf): Remove unused argument 'topic' from functions @@ -79,6 +78,27 @@ class SchedulerAPI(rpc.RPCAPI): request_spec_list=request_spec_p_list, filter_properties_list=filter_properties_list) + def create_group(self, ctxt, topic, group, + group_spec=None, + request_spec_list=None, + group_filter_properties=None, + filter_properties_list=None): + version = '2.3' + cctxt = self.client.prepare(version=version) + request_spec_p_list = [] + for request_spec in request_spec_list: + request_spec_p = jsonutils.to_primitive(request_spec) + request_spec_p_list.append(request_spec_p) + group_spec_p = jsonutils.to_primitive(group_spec) + + return cctxt.cast(ctxt, 'create_group', + topic=topic, + group=group, + group_spec=group_spec_p, + request_spec_list=request_spec_p_list, + group_filter_properties=group_filter_properties, + filter_properties_list=filter_properties_list) + def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None): @@ -87,7 +107,11 @@ class SchedulerAPI(rpc.RPCAPI): 'snapshot_id': snapshot_id, 'image_id': image_id, 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'volume': volume} - version = '2.0' + version = '2.2' + if not self.client.can_send_version('2.2'): + # Send request_spec as dict + version = '2.0' + msg_args['request_spec'] = jsonutils.to_primitive(request_spec) cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, 'create_volume', **msg_args) diff --git a/cinder/scheduler/weights/__init__.py b/cinder/scheduler/weights/__init__.py index b122c9756..93bc224d3 100644 --- a/cinder/scheduler/weights/__init__.py +++ b/cinder/scheduler/weights/__init__.py @@ -37,8 +37,9 @@ class BaseHostWeigher(base_weight.BaseWeigher): pass -class HostWeightHandler(base_weight.BaseWeightHandler): +class OrderedHostWeightHandler(base_weight.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): - super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) + super(OrderedHostWeightHandler, self).__init__(BaseHostWeigher, + namespace) diff --git a/cinder/scheduler/weights/capacity.py b/cinder/scheduler/weights/capacity.py index 81a1979dc..82f30cebd 100644 --- a/cinder/scheduler/weights/capacity.py +++ b/cinder/scheduler/weights/capacity.py @@ -114,13 +114,25 @@ class CapacityWeigher(weights.BaseHostWeigher): # capacity anymore. free = -1 if CONF.capacity_weight_multiplier > 0 else float('inf') else: + # NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs, + # we will not use max_over_subscription_ratio and + # provisioned_capacity_gb to determine whether a volume can be + # provisioned. Instead free capacity will be used to evaluate. + thin = True + vol_type = weight_properties.get('volume_type', {}) + provision_type = vol_type.get('extra_specs', {}).get( + 'provisioning:type') + if provision_type == 'thick': + thin = False + free = utils.calculate_virtual_free_capacity( total_space, free_space, host_state.provisioned_capacity_gb, host_state.thin_provisioning_support, host_state.max_over_subscription_ratio, - host_state.reserved_percentage) + host_state.reserved_percentage, + thin) return free diff --git a/cinder/scheduler/weights/stochastic.py b/cinder/scheduler/weights/stochastic.py new file mode 100644 index 000000000..b2105ee17 --- /dev/null +++ b/cinder/scheduler/weights/stochastic.py @@ -0,0 +1,82 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Stochastic weight handler + +This weight handler differs from the default weight +handler by giving every pool a chance to be chosed +wheret the probability is proportional to each pools' +weight. +""" + +import random + +from cinder.scheduler import base_weight +from cinder.scheduler import weights as wts + + +class StochasticHostWeightHandler(base_weight.BaseWeightHandler): + def __init__(self, namespace): + super(StochasticHostWeightHandler, self).__init__(wts.BaseHostWeigher, + namespace) + + def get_weighed_objects(self, weigher_classes, obj_list, + weighing_properties): + # The normalization performed in the superclass is nonlinear, which + # messes up the probabilities, so override it. The probabilistic + # approach we use here is self-normalizing. + # Also, the sorting done by the parent implementation is harmless but + # useless for us. + + # Compute the object weights as the parent would but without sorting + # or normalization. + weighed_objs = [wts.WeighedHost(obj, 0.0) for obj in obj_list] + for weigher_cls in weigher_classes: + weigher = weigher_cls() + weights = weigher.weigh_objects(weighed_objs, weighing_properties) + for i, weight in enumerate(weights): + obj = weighed_objs[i] + obj.weight += weigher.weight_multiplier() * weight + + # Avoid processing empty lists + if not weighed_objs: + return [] + + # First compute the total weight of all the objects and the upper + # bound for each object to "win" the lottery. + total_weight = 0 + table = [] + for weighed_obj in weighed_objs: + total_weight += weighed_obj.weight + max_value = total_weight + table.append((max_value, weighed_obj)) + + # Now draw a random value with the computed range + winning_value = random.random() * total_weight + + # Scan the table to find the first object with a maximum higher than + # the random number. Save the index of the winner. + winning_index = 0 + for (i, (max_value, weighed_obj)) in enumerate(table): + if max_value > winning_value: + # Return a single element array with the winner. + winning_index = i + break + + # It's theoretically possible for the above loop to terminate with no + # winner. This happens when winning_value >= total_weight, which + # could only occur with very large numbers and floating point + # rounding. In those cases the actual winner should have been the + # last element, so return it. + return weighed_objs[winning_index:] + weighed_objs[0:winning_index] diff --git a/cinder/service.py b/cinder/service.py index 69500a265..d979d4235 100644 --- a/cinder/service.py +++ b/cinder/service.py @@ -36,6 +36,8 @@ profiler = importutils.try_import('osprofiler.profiler') osprofiler_web = importutils.try_import('osprofiler.web') profiler_opts = importutils.try_import('osprofiler.opts') + +from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder import coordination from cinder import exception @@ -43,7 +45,9 @@ from cinder.i18n import _, _LE, _LI, _LW from cinder import objects from cinder.objects import base as objects_base from cinder import rpc +from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import version +from cinder.volume import rpcapi as volume_rpcapi LOG = logging.getLogger(__name__) @@ -116,12 +120,14 @@ class Service(service.Service): def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, - service_name=None, coordination=False, *args, **kwargs): + service_name=None, coordination=False, cluster=None, *args, + **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) + self.cluster = cluster self.host = host self.binary = binary self.topic = topic @@ -133,21 +139,61 @@ class Service(service.Service): # NOTE(geguileo): We need to create the Service DB entry before we # create the manager, otherwise capped versions for serializer and rpc - # client would used existing DB entries not including us, which could + # client would use existing DB entries not including us, which could # result in us using None (if it's the first time the service is run) # or an old version (if this is a normal upgrade of a single service). ctxt = context.get_admin_context() + self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary) try: service_ref = objects.Service.get_by_args(ctxt, host, binary) service_ref.rpc_current_version = manager_class.RPC_API_VERSION obj_version = objects_base.OBJ_VERSIONS.get_current() service_ref.object_current_version = obj_version + # TODO(geguileo): In O we can remove the service upgrading part on + # the next equation, because by then all our services will be + # properly setting the cluster during volume migrations since + # they'll have the new Volume ORM model. But until then we can + # only set the cluster in the DB and pass added_to_cluster to + # init_host when we have completed the rolling upgrade from M to N. + + # added_to_cluster attribute marks when we consider that we have + # just added a host to a cluster so we can include resources into + # that cluster. We consider that we have added the host when we + # didn't have data in the cluster DB field and our current + # configuration has a cluster value. We don't want to do anything + # automatic if the cluster is changed, in those cases we'll want + # to use cinder manage command and to it manually. + self.added_to_cluster = (not service_ref.cluster_name and cluster + and not self.is_upgrading_to_n) + + # TODO(geguileo): In O - Remove self.is_upgrading_to_n part + if (service_ref.cluster_name != cluster and + not self.is_upgrading_to_n): + LOG.info(_LI('This service has been moved from cluster ' + '%(cluster_svc)s to %(cluster_cfg)s. Resources ' + 'will %(opt_no)sbe moved to the new cluster'), + {'cluster_svc': service_ref.cluster_name, + 'cluster_cfg': cluster, + 'opt_no': '' if self.added_to_cluster else 'NO '}) + + if self.added_to_cluster: + # We pass copy service's disable status in the cluster if we + # have to create it. + self._ensure_cluster_exists(ctxt, service_ref.disabled) + service_ref.cluster_name = cluster service_ref.save() self.service_id = service_ref.id except exception.NotFound: + # We don't want to include cluster information on the service or + # create the cluster entry if we are upgrading. self._create_service_ref(ctxt, manager_class.RPC_API_VERSION) + # TODO(geguileo): In O set added_to_cluster to True + # We don't want to include resources in the cluster during the + # start while we are still doing the rolling upgrade. + self.added_to_cluster = not self.is_upgrading_to_n self.manager = manager_class(host=self.host, + cluster=self.cluster, service_name=service_name, *args, **kwargs) self.report_interval = report_interval @@ -159,6 +205,18 @@ class Service(service.Service): setup_profiler(binary, host) self.rpcserver = None + self.cluster_rpcserver = None + + # TODO(geguileo): Remove method in O since it will no longer be used. + @staticmethod + def is_svc_upgrading_to_n(binary): + """Given an RPC API class determine if the service is upgrading.""" + rpcapis = {'cinder-scheduler': scheduler_rpcapi.SchedulerAPI, + 'cinder-volume': volume_rpcapi.VolumeAPI, + 'cinder-backup': backup_rpcapi.BackupAPI} + rpc_api = rpcapis[binary] + # If we are pinned to 1.3, then we are upgrading from M to N + return rpc_api.determine_obj_version_cap() == '1.3' def start(self): version_string = version.version_string() @@ -169,17 +227,33 @@ class Service(service.Service): if self.coordination: coordination.COORDINATOR.start() - self.manager.init_host() + self.manager.init_host(added_to_cluster=self.added_to_cluster) LOG.debug("Creating RPC server for service %s", self.topic) + ctxt = context.get_admin_context() target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] endpoints.extend(self.manager.additional_endpoints) - serializer = objects_base.CinderObjectSerializer() + obj_version_cap = objects.Service.get_minimum_obj_version(ctxt) + LOG.debug("Pinning object versions for RPC server serializer to %s", + obj_version_cap) + serializer = objects_base.CinderObjectSerializer(obj_version_cap) self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() + # TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part + if self.cluster and not self.is_svc_upgrading_to_n(self.binary): + LOG.info(_LI('Starting %(topic)s cluster %(cluster)s (version ' + '%(version)s)'), + {'topic': self.topic, 'version': version_string, + 'cluster': self.cluster}) + target = messaging.Target(topic=self.topic, server=self.cluster) + serializer = objects_base.CinderObjectSerializer(obj_version_cap) + self.cluster_rpcserver = rpc.get_server(target, endpoints, + serializer) + self.cluster_rpcserver.start() + self.manager.init_host_with_rpc() if self.report_interval: @@ -218,6 +292,25 @@ class Service(service.Service): 'new_down_time': new_down_time}) CONF.set_override('service_down_time', new_down_time) + def _ensure_cluster_exists(self, context, disabled=None): + if self.cluster: + try: + objects.Cluster.get_by_id(context, None, name=self.cluster, + binary=self.binary) + except exception.ClusterNotFound: + cluster = objects.Cluster(context=context, name=self.cluster, + binary=self.binary) + # If disabled has been specified overwrite default value + if disabled is not None: + cluster.disabled = disabled + try: + cluster.create() + + # Race condition occurred and another service created the + # cluster, so we can continue as it already exists. + except exception.ClusterExists: + pass + def _create_service_ref(self, context, rpc_version=None): zone = CONF.storage_availability_zone kwargs = { @@ -229,9 +322,16 @@ class Service(service.Service): 'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION, 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), } + # TODO(geguileo): In O unconditionally set cluster_name like above + # If we are upgrading we have to ignore the cluster value + if not self.is_upgrading_to_n: + kwargs['cluster_name'] = self.cluster service_ref = objects.Service(context=context, **kwargs) service_ref.create() self.service_id = service_ref.id + # TODO(geguileo): In O unconditionally ensure that the cluster exists + if not self.is_upgrading_to_n: + self._ensure_cluster_exists(context) def __getattr__(self, key): manager = self.__dict__.get('manager', None) @@ -241,7 +341,7 @@ class Service(service.Service): def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, - coordination=False): + coordination=False, cluster=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host @@ -251,6 +351,7 @@ class Service(service.Service): :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay + :param cluster: Defaults to None, as only some services will have it """ if not host: @@ -273,7 +374,8 @@ class Service(service.Service): periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, - coordination=coordination) + coordination=coordination, + cluster=cluster) return service_obj @@ -282,6 +384,8 @@ class Service(service.Service): # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpcserver.stop() + if self.cluster_rpcserver: + self.cluster_rpcserver.stop() except Exception: pass @@ -309,6 +413,8 @@ class Service(service.Service): pass if self.rpcserver: self.rpcserver.wait() + if self.cluster_rpcserver: + self.cluster_rpcserver.wait() super(Service, self).wait() def periodic_tasks(self, raise_on_error=False): diff --git a/cinder/test.py b/cinder/test.py index 9c9fae731..a9faef66f 100644 --- a/cinder/test.py +++ b/cinder/test.py @@ -24,7 +24,6 @@ inline callbacks. import copy import logging import os -import shutil import uuid import fixtures @@ -37,6 +36,7 @@ from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import strutils from oslo_utils import timeutils from oslotest import moxstubout +import six import testtools from cinder.common import config # noqa Need to register global_opts @@ -63,11 +63,8 @@ class TestingException(Exception): class Database(fixtures.Fixture): - def __init__(self, db_api, db_migrate, sql_connection, - sqlite_db, sqlite_clean_db): + def __init__(self, db_api, db_migrate, sql_connection): self.sql_connection = sql_connection - self.sqlite_db = sqlite_db - self.sqlite_clean_db = sqlite_clean_db # Suppress logging for test runs migrate_logger = logging.getLogger('migrate') @@ -77,26 +74,15 @@ class Database(fixtures.Fixture): self.engine.dispose() conn = self.engine.connect() db_migrate.db_sync() - if sql_connection == "sqlite://": - conn = self.engine.connect() - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - else: - cleandb = os.path.join(CONF.state_path, sqlite_clean_db) - testdb = os.path.join(CONF.state_path, sqlite_db) - shutil.copyfile(testdb, cleandb) + self._DB = "".join(line for line in conn.connection.iterdump()) + self.engine.dispose() def setUp(self): super(Database, self).setUp() - if self.sql_connection == "sqlite://": - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - else: - shutil.copyfile( - os.path.join(CONF.state_path, self.sqlite_clean_db), - os.path.join(CONF.state_path, self.sqlite_db)) + conn = self.engine.connect() + conn.connection.executescript(self._DB) + self.addCleanup(self.engine.dispose) class TestCase(testtools.TestCase): @@ -180,9 +166,7 @@ class TestCase(testtools.TestCase): global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, - sql_connection=CONF.database.connection, - sqlite_db=CONF.database.sqlite_db, - sqlite_clean_db='clean.sqlite') + sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects @@ -203,7 +187,7 @@ class TestCase(testtools.TestCase): self.injected = [] self._services = [] - fake_notifier.stub_notifier(self.stubs) + fake_notifier.mock_notifier(self) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture @@ -307,19 +291,24 @@ class TestCase(testtools.TestCase): self._services.append(svc) return svc - def mock_object(self, obj, attr_name, new_attr=None, **kwargs): + def mock_object(self, obj, attr_name, *args, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ - if not new_attr: - new_attr = mock.Mock() - patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs) - patcher.start() + patcher = mock.patch.object(obj, attr_name, *args, **kwargs) + result = patcher.start() self.addCleanup(patcher.stop) - return new_attr + return result + + def patch(self, path, *args, **kwargs): + """Use python mock to mock a path with automatic cleanup.""" + patcher = mock.patch(path, *args, **kwargs) + result = patcher.start() + self.addCleanup(patcher.stop) + return result # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): @@ -378,3 +367,53 @@ class TestCase(testtools.TestCase): 'd1value': d1value, 'd2value': d2value, }) + + def assert_notify_called(self, mock_notify, calls): + for i in range(0, len(calls)): + mock_call = mock_notify.call_args_list[i] + call = calls[i] + + posargs = mock_call[0] + + self.assertEqual(call[0], posargs[0]) + self.assertEqual(call[1], posargs[2]) + + +class ModelsObjectComparatorMixin(object): + def _dict_from_object(self, obj, ignored_keys): + if ignored_keys is None: + ignored_keys = [] + if isinstance(obj, dict): + items = obj.items() + else: + items = obj.iteritems() + return {k: v for k, v in items + if k not in ignored_keys} + + def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): + obj1 = self._dict_from_object(obj1, ignored_keys) + obj2 = self._dict_from_object(obj2, ignored_keys) + + self.assertEqual( + len(obj1), len(obj2), + "Keys mismatch: %s" % six.text_type( + set(obj1.keys()) ^ set(obj2.keys()))) + for key, value in obj1.items(): + self.assertEqual(value, obj2[key]) + + def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None, + msg=None): + obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) + sort_key = lambda d: [d[k] for k in sorted(d)] + conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) + + self.assertListEqual(conv_and_sort(objs1), conv_and_sort(objs2), + msg=msg) + + def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): + self.assertEqual(len(primitives1), len(primitives2)) + for primitive in primitives1: + self.assertIn(primitive, primitives2) + + for primitive in primitives2: + self.assertIn(primitive, primitives1) diff --git a/cinder/tests/unit/fake_driver.py b/cinder/tests/fake_driver.py similarity index 95% rename from cinder/tests/unit/fake_driver.py rename to cinder/tests/fake_driver.py index e115d5303..a1a466cb9 100644 --- a/cinder/tests/unit/fake_driver.py +++ b/cinder/tests/fake_driver.py @@ -15,6 +15,7 @@ from oslo_utils import timeutils from cinder import exception +from cinder import objects from cinder.objects import fields from cinder.tests.unit.brick import fake_lvm from cinder.volume import driver @@ -39,10 +40,16 @@ class FakeISCSIDriver(lvm.LVMVolumeDriver): pass def initialize_connection(self, volume, connector): - volume_metadata = {} - - for metadata in volume['volume_admin_metadata']: - volume_metadata[metadata['key']] = metadata['value'] + # NOTE(thangp): There are several places in the core cinder code where + # the volume passed through is a dict and not an oslo_versionedobject. + # We need to react appropriately to what type of volume is passed in, + # until the switch over to oslo_versionedobjects is complete. + if isinstance(volume, objects.Volume): + volume_metadata = volume.admin_metadata + else: + volume_metadata = {} + for metadata in volume['volume_admin_metadata']: + volume_metadata[metadata['key']] = metadata['value'] access_mode = volume_metadata.get('attached_mode') if access_mode is None: diff --git a/cinder/tests/functional/api/client.py b/cinder/tests/functional/api/client.py index fc37c3a42..53a2e796f 100644 --- a/cinder/tests/functional/api/client.py +++ b/cinder/tests/functional/api/client.py @@ -22,40 +22,37 @@ from cinder.tests.unit import fake_constants as fake class OpenStackApiException(Exception): - def __init__(self, message=None, response=None): + message = 'Unspecified error' + + def __init__(self, response=None, msg=None): self.response = response - if not message: - message = 'Unspecified error' + # Give chance to override default message + if msg: + self.message = msg if response: - message = _('%(message)s\nStatus Code: %(_status)s\n' - 'Body: %(_body)s') % {'_status': response.status_code, - '_body': response.text} + self.message = _( + '%(message)s\nStatus Code: %(_status)s\nBody: %(_body)s') % { + '_status': response.status_code, '_body': response.text, + 'message': self.message} - super(OpenStackApiException, self).__init__(message) + super(OpenStackApiException, self).__init__(self.message) -class OpenStackApiAuthenticationException(OpenStackApiException): - def __init__(self, response=None, message=None): - if not message: - message = _("Authentication error") - super(OpenStackApiAuthenticationException, self).__init__(message, - response) +class OpenStackApiException401(OpenStackApiException): + message = _("401 Unauthorized Error") -class OpenStackApiAuthorizationException(OpenStackApiException): - def __init__(self, response=None, message=None): - if not message: - message = _("Authorization error") - super(OpenStackApiAuthorizationException, self).__init__(message, - response) +class OpenStackApiException404(OpenStackApiException): + message = _("404 Not Found Error") -class OpenStackApiNotFoundException(OpenStackApiException): - def __init__(self, response=None, message=None): - if not message: - message = _("Item not found") - super(OpenStackApiNotFoundException, self).__init__(message, response) +class OpenStackApiException413(OpenStackApiException): + message = _("413 Request entity too large") + + +class OpenStackApiException400(OpenStackApiException): + message = _("400 Bad Request") class TestOpenStackClient(object): @@ -102,8 +99,8 @@ class TestOpenStackClient(object): return response - def _authenticate(self): - if self.auth_result: + def _authenticate(self, reauthenticate=False): + if self.auth_result and not reauthenticate: return self.auth_result auth_uri = self.auth_uri @@ -116,11 +113,15 @@ class TestOpenStackClient(object): http_status = response.status_code if http_status == 401: - raise OpenStackApiAuthenticationException(response=response) + raise OpenStackApiException401(response=response) self.auth_result = response.headers return self.auth_result + def update_project(self, new_project_id): + self.project_id = new_project_id + self._authenticate(True) + def api_request(self, relative_uri, check_response_status=None, **kwargs): auth_result = self._authenticate() @@ -135,17 +136,15 @@ class TestOpenStackClient(object): response = self.request(full_uri, **kwargs) http_status = response.status_code - if check_response_status: if http_status not in check_response_status: - if http_status == 404: - raise OpenStackApiNotFoundException(response=response) - elif http_status == 401: - raise OpenStackApiAuthorizationException(response=response) - else: - raise OpenStackApiException( - message=_("Unexpected status code"), - response=response) + message = None + try: + exc = globals()["OpenStackApiException%s" % http_status] + except KeyError: + exc = OpenStackApiException + message = _("Unexpected status code") + raise exc(response, message) return response @@ -204,6 +203,16 @@ class TestOpenStackClient(object): def put_volume(self, volume_id, volume): return self.api_put('/volumes/%s' % volume_id, volume)['volume'] + def quota_set(self, project_id, quota_update): + return self.api_put( + 'os-quota-sets/%s' % project_id, + {'quota_set': quota_update})['quota_set'] + + def quota_get(self, project_id, usage=True): + + return self.api_get('os-quota-sets/%s?usage=%s' + % (project_id, usage))['quota_set'] + def create_type(self, type_name, extra_specs=None): type = {"volume_type": {"name": type_name}} if extra_specs: diff --git a/cinder/tests/functional/functional_helpers.py b/cinder/tests/functional/functional_helpers.py index 9a8f955c9..240ec7b90 100644 --- a/cinder/tests/functional/functional_helpers.py +++ b/cinder/tests/functional/functional_helpers.py @@ -19,6 +19,7 @@ Provides common functionality for functional tests import os.path import random import string +import time import uuid import fixtures @@ -80,6 +81,9 @@ class _FunctionalTestBase(test.TestCase): self.api = client.TestOpenStackClient(fake.USER_ID, fake.PROJECT_ID, self.auth_url) + def _update_project(self, new_project_id): + self.api.update_project(new_project_id) + def _start_api_service(self): default_conf = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', '..', '..', @@ -138,3 +142,28 @@ class _FunctionalTestBase(test.TestCase): server_name = self.get_unused_server_name() server['name'] = server_name return server + + def _poll_volume_while(self, volume_id, continue_states, + expected_end_status=None, max_retries=5): + """Poll (briefly) while the state is in continue_states. + + Continues until the state changes from continue_states or max_retries + are hit. If expected_end_status is specified, we assert that the end + status of the volume is expected_end_status. + """ + retries = 0 + while retries <= max_retries: + try: + found_volume = self.api.get_volume(volume_id) + except client.OpenStackApiException404: + return None + + self.assertEqual(volume_id, found_volume['id']) + vol_status = found_volume['status'] + if vol_status not in continue_states: + if expected_end_status: + self.assertEqual(expected_end_status, vol_status) + return found_volume + + time.sleep(1) + retries += 1 diff --git a/cinder/tests/functional/test_quotas.py b/cinder/tests/functional/test_quotas.py new file mode 100644 index 000000000..85b230e1e --- /dev/null +++ b/cinder/tests/functional/test_quotas.py @@ -0,0 +1,170 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock +import uuid + +from cinder import quota +from cinder.tests import fake_driver +from cinder.tests.functional.api import client +from cinder.tests.functional import functional_helpers + + +class NestedQuotasTest(functional_helpers._FunctionalTestBase): + _vol_type_name = 'functional_test_type' + + def setUp(self): + super(NestedQuotasTest, self).setUp() + self.api.create_type(self._vol_type_name) + fake_driver.LoggingVolumeDriver.clear_logs() + self._create_project_hierarchy() + # Need to mock out Keystone so the functional tests don't require other + # services + _keystone_client = mock.MagicMock() + _keystone_client.version = 'v3' + _keystone_client.projects.get.side_effect = self._get_project + _keystone_client_get = mock.patch( + 'cinder.quota_utils._keystone_client', + lambda *args, **kwargs: _keystone_client) + _keystone_client_get.start() + self.addCleanup(_keystone_client_get.stop) + # The QUOTA engine in Cinder is a global variable that lazy loads the + # quota driver, so even if we change the config for the quota driver, + # we won't reliably change the driver being used (or change it back) + # unless the global variables get cleaned up, so using mock instead to + # simulate this change + nested_driver = quota.NestedDbQuotaDriver() + _driver_patcher = mock.patch( + 'cinder.quota.QuotaEngine._driver', new=nested_driver) + _driver_patcher.start() + self.addCleanup(_driver_patcher.stop) + # Default to using the top parent in the hierarchy + self._update_project(self.A.id) + + def _get_flags(self): + f = super(NestedQuotasTest, self)._get_flags() + f['volume_driver'] = \ + 'cinder.tests.fake_driver.LoggingVolumeDriver' + f['default_volume_type'] = self._vol_type_name + return f + + # Currently we use 413 error for over quota + over_quota_exception = client.OpenStackApiException413 + + def _create_project_hierarchy(self): + """Sets up the nested hierarchy show below. + + +-----------+ + | A | + | / \ | + | B C | + | / | + | D | + +-----------+ + """ + self.A = self.FakeProject() + self.B = self.FakeProject(parent_id=self.A.id) + self.C = self.FakeProject(parent_id=self.A.id) + self.D = self.FakeProject(parent_id=self.B.id) + + self.B.subtree = {self.D.id: self.D.subtree} + self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree} + + self.A.parents = None + self.B.parents = {self.A.id: None} + self.C.parents = {self.A.id: None} + self.D.parents = {self.B.id: self.B.parents} + + # project_by_id attribute is used to recover a project based on its id. + self.project_by_id = {self.A.id: self.A, self.B.id: self.B, + self.C.id: self.C, self.D.id: self.D} + + class FakeProject(object): + _dom_id = uuid.uuid4().hex + + def __init__(self, parent_id=None): + self.id = uuid.uuid4().hex + self.parent_id = parent_id + self.domain_id = self._dom_id + self.subtree = None + self.parents = None + + def _get_project(self, project_id, *args, **kwargs): + return self.project_by_id[project_id] + + def _create_volume(self): + return self.api.post_volume({'volume': {'size': 1}}) + + def test_default_quotas_enforced(self): + # Should be able to create volume on parent project by default + created_vol = self._create_volume() + self._poll_volume_while(created_vol['id'], ['creating'], 'available') + self._update_project(self.B.id) + # Shouldn't be able to create volume on child project by default + self.assertRaises(self.over_quota_exception, self._create_volume) + + def test_update_child_with_parent_default_quota(self): + # Make sure we can update to a reasonable value + self.api.quota_set(self.B.id, {'volumes': 5}) + # Ensure that the update took and we can create a volume + self._poll_volume_while( + self._create_volume()['id'], ['creating'], 'available') + + def test_quota_update_child_greater_than_parent(self): + self.assertRaises( + client.OpenStackApiException400, + self.api.quota_set, self.B.id, {'volumes': 11}) + + def test_child_soft_limit_propagates_to_parent(self): + self.api.quota_set(self.B.id, {'volumes': 0}) + self.api.quota_set(self.D.id, {'volumes': -1}) + self._update_project(self.D.id) + self.assertRaises(self.over_quota_exception, self._create_volume) + + def test_child_quota_hard_limits_affects_parents_allocated(self): + self.api.quota_set(self.B.id, {'volumes': 5}) + self.api.quota_set(self.C.id, {'volumes': 3}) + alloc = self.api.quota_get(self.A.id)['volumes']['allocated'] + self.assertEqual(8, alloc) + self.assertRaises(client.OpenStackApiException400, + self.api.quota_set, self.C.id, {'volumes': 6}) + + def _update_quota_and_def_type(self, project_id, quota): + self.api.quota_set(project_id, quota) + type_updates = {'%s_%s' % (key, self._vol_type_name): val for key, val + in quota.items() if key != 'per_volume_gigabytes'} + return self.api.quota_set(project_id, type_updates) + + def test_grandchild_soft_limit_propogates_up(self): + quota = {'volumes': -1, 'gigabytes': -1, 'per_volume_gigabytes': -1} + self._update_quota_and_def_type(self.B.id, quota) + self._update_quota_and_def_type(self.D.id, quota) + self._update_project(self.D.id) + # Create two volumes in the grandchild project and ensure grandparent's + # allocated is updated accordingly + vol = self._create_volume() + self._create_volume() + self._update_project(self.A.id) + alloc = self.api.quota_get(self.A.id)['volumes']['allocated'] + self.assertEqual(2, alloc) + alloc = self.api.quota_get(self.B.id)['volumes']['allocated'] + self.assertEqual(2, alloc) + # Ensure delete reduces the quota + self._update_project(self.D.id) + self.api.delete_volume(vol['id']) + self._poll_volume_while(vol['id'], ['deleting']) + self._update_project(self.A.id) + alloc = self.api.quota_get(self.A.id)['volumes']['allocated'] + self.assertEqual(1, alloc) + alloc = self.api.quota_get(self.B.id)['volumes']['allocated'] + self.assertEqual(1, alloc) diff --git a/cinder/tests/functional/test_volumes.py b/cinder/tests/functional/test_volumes.py index 89031c1b8..e7591eda2 100644 --- a/cinder/tests/functional/test_volumes.py +++ b/cinder/tests/functional/test_volumes.py @@ -13,11 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -import time - -from cinder.tests.functional.api import client +from cinder.tests import fake_driver from cinder.tests.functional import functional_helpers -from cinder.tests.unit import fake_driver class VolumesTest(functional_helpers._FunctionalTestBase): @@ -31,7 +28,7 @@ class VolumesTest(functional_helpers._FunctionalTestBase): def _get_flags(self): f = super(VolumesTest, self)._get_flags() f['volume_driver'] = \ - 'cinder.tests.unit.fake_driver.LoggingVolumeDriver' + 'cinder.tests.fake_driver.LoggingVolumeDriver' f['default_volume_type'] = self._vol_type_name return f @@ -45,27 +42,6 @@ class VolumesTest(functional_helpers._FunctionalTestBase): volumes = self.api.get_volumes() self.assertIsNotNone(volumes) - def _poll_while(self, volume_id, continue_states, max_retries=5): - """Poll (briefly) while the state is in continue_states.""" - retries = 0 - while True: - try: - found_volume = self.api.get_volume(volume_id) - except client.OpenStackApiNotFoundException: - found_volume = None - break - - self.assertEqual(volume_id, found_volume['id']) - - if found_volume['status'] not in continue_states: - break - - time.sleep(1) - retries = retries + 1 - if retries > max_retries: - break - return found_volume - def test_create_and_delete_volume(self): """Creates and deletes a volume.""" @@ -85,7 +61,7 @@ class VolumesTest(functional_helpers._FunctionalTestBase): self.assertIn(created_volume_id, volume_names) # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_while(created_volume_id, ['creating']) + found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) @@ -94,7 +70,7 @@ class VolumesTest(functional_helpers._FunctionalTestBase): self.api.delete_volume(created_volume_id) # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_while(created_volume_id, ['deleting']) + found_volume = self._poll_volume_while(created_volume_id, ['deleting']) # Should be gone self.assertFalse(found_volume) diff --git a/cinder/tests/tempest/api/volume/test_consistencygroups.py b/cinder/tests/tempest/api/volume/test_consistencygroups.py new file mode 100644 index 000000000..09dd84e31 --- /dev/null +++ b/cinder/tests/tempest/api/volume/test_consistencygroups.py @@ -0,0 +1,283 @@ +# Copyright (C) 2015 EMC Corporation. +# Copyright (C) 2016 Pure Storage, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from tempest.api.volume import base +from tempest.common import waiters +from tempest import config +from tempest.lib.common.utils import data_utils +from tempest import test + +from cinder.tests.tempest import cinder_clients + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): + + @classmethod + def setup_clients(cls): + cls._api_version = 2 + super(ConsistencyGroupsV2Test, cls).setup_clients() + + manager = cinder_clients.Manager(cls.os_adm) + cls.consistencygroups_adm_client = manager.consistencygroups_adm_client + + @classmethod + def skip_checks(cls): + super(ConsistencyGroupsV2Test, cls).skip_checks() + if not CONF.cinder.consistency_group: + raise cls.skipException("Cinder consistency group " + "feature disabled") + + def _delete_consistencygroup(self, cg_id): + self.consistencygroups_adm_client.delete_consistencygroup(cg_id) + vols = self.admin_volume_client.list_volumes(detail=True)['volumes'] + for vol in vols: + if vol['consistencygroup_id'] == cg_id: + self.admin_volume_client.wait_for_resource_deletion(vol['id']) + self.consistencygroups_adm_client.wait_for_consistencygroup_deletion( + cg_id) + + def _delete_cgsnapshot(self, cgsnapshot_id, cg_id): + self.consistencygroups_adm_client.delete_cgsnapshot(cgsnapshot_id) + vols = self.admin_volume_client.list_volumes(detail=True)['volumes'] + snapshots = self.admin_snapshots_client.list_snapshots( + detail=True)['snapshots'] + for vol in vols: + for snap in snapshots: + if (vol['consistencygroup_id'] == cg_id and + vol['id'] == snap['volume_id']): + self.snapshots_client.wait_for_resource_deletion( + snap['id']) + self.consistencygroups_adm_client.wait_for_cgsnapshot_deletion( + cgsnapshot_id) + + @test.idempotent_id('3fe776ba-ec1f-4e6c-8d78-4b14c3a7fc44') + def test_consistencygroup_create_delete(self): + # Create volume type + name = data_utils.rand_name("volume-type") + volume_type = self.admin_volume_types_client.create_volume_type( + name=name)['volume_type'] + + # Create CG + cg_name = data_utils.rand_name('CG') + create_consistencygroup = ( + self.consistencygroups_adm_client.create_consistencygroup) + cg = create_consistencygroup(volume_type['id'], + name=cg_name)['consistencygroup'] + vol_name = data_utils.rand_name("volume") + self.name_field = self.special_fields['name_field'] + params = {self.name_field: vol_name, + 'volume_type': volume_type['id'], + 'consistencygroup_id': cg['id']} + + # Create volume + volume = self.admin_volume_client.create_volume(**params)['volume'] + + waiters.wait_for_volume_status(self.admin_volume_client, + volume['id'], 'available') + self.consistencygroups_adm_client.wait_for_consistencygroup_status( + cg['id'], 'available') + self.assertEqual(cg_name, cg['name']) + + # Get a given CG + cg = self.consistencygroups_adm_client.show_consistencygroup( + cg['id'])['consistencygroup'] + self.assertEqual(cg_name, cg['name']) + + # Get all CGs with detail + cgs = self.consistencygroups_adm_client.list_consistencygroups( + detail=True)['consistencygroups'] + self.assertIn((cg['name'], cg['id']), + [(m['name'], m['id']) for m in cgs]) + + # Clean up + self._delete_consistencygroup(cg['id']) + self.admin_volume_types_client.delete_volume_type(volume_type['id']) + + @test.idempotent_id('2134dd52-f333-4456-bb05-6cb0f009a44f') + def test_consistencygroup_cgsnapshot_create_delete(self): + # Create volume type + name = data_utils.rand_name("volume-type") + volume_type = self.admin_volume_types_client.create_volume_type( + name=name)['volume_type'] + + # Create CG + cg_name = data_utils.rand_name('CG') + create_consistencygroup = ( + self.consistencygroups_adm_client.create_consistencygroup) + cg = create_consistencygroup(volume_type['id'], + name=cg_name)['consistencygroup'] + vol_name = data_utils.rand_name("volume") + self.name_field = self.special_fields['name_field'] + params = {self.name_field: vol_name, + 'volume_type': volume_type['id'], + 'consistencygroup_id': cg['id']} + + # Create volume + volume = self.admin_volume_client.create_volume(**params)['volume'] + waiters.wait_for_volume_status(self.admin_volume_client, + volume['id'], 'available') + self.consistencygroups_adm_client.wait_for_consistencygroup_status( + cg['id'], 'available') + self.assertEqual(cg_name, cg['name']) + + # Create cgsnapshot + cgsnapshot_name = data_utils.rand_name('cgsnapshot') + create_cgsnapshot = ( + self.consistencygroups_adm_client.create_cgsnapshot) + cgsnapshot = create_cgsnapshot(cg['id'], + name=cgsnapshot_name)['cgsnapshot'] + snapshots = self.admin_snapshots_client.list_snapshots( + detail=True)['snapshots'] + for snap in snapshots: + if volume['id'] == snap['volume_id']: + waiters.wait_for_snapshot_status(self.admin_snapshots_client, + snap['id'], 'available') + self.consistencygroups_adm_client.wait_for_cgsnapshot_status( + cgsnapshot['id'], 'available') + self.assertEqual(cgsnapshot_name, cgsnapshot['name']) + + # Get a given CG snapshot + cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot( + cgsnapshot['id'])['cgsnapshot'] + self.assertEqual(cgsnapshot_name, cgsnapshot['name']) + + # Get all CG snapshots with detail + cgsnapshots = self.consistencygroups_adm_client.list_cgsnapshots( + detail=True)['cgsnapshots'] + self.assertIn((cgsnapshot['name'], cgsnapshot['id']), + [(m['name'], m['id']) for m in cgsnapshots]) + + # Clean up + self._delete_cgsnapshot(cgsnapshot['id'], cg['id']) + self._delete_consistencygroup(cg['id']) + self.admin_volume_types_client.delete_volume_type(volume_type['id']) + + @test.idempotent_id('3a6a5525-25ca-4a6c-aac4-cac6fa8f5b43') + def test_create_consistencygroup_from_cgsnapshot(self): + # Create volume type + name = data_utils.rand_name("volume-type") + volume_type = self.admin_volume_types_client.create_volume_type( + name=name)['volume_type'] + + # Create CG + cg_name = data_utils.rand_name('CG') + create_consistencygroup = ( + self.consistencygroups_adm_client.create_consistencygroup) + cg = create_consistencygroup(volume_type['id'], + name=cg_name)['consistencygroup'] + vol_name = data_utils.rand_name("volume") + self.name_field = self.special_fields['name_field'] + params = {self.name_field: vol_name, + 'volume_type': volume_type['id'], + 'consistencygroup_id': cg['id']} + + # Create volume + volume = self.admin_volume_client.create_volume(**params)['volume'] + waiters.wait_for_volume_status(self.admin_volume_client, + volume['id'], 'available') + self.consistencygroups_adm_client.wait_for_consistencygroup_status( + cg['id'], 'available') + self.assertEqual(cg_name, cg['name']) + + # Create cgsnapshot + cgsnapshot_name = data_utils.rand_name('cgsnapshot') + create_cgsnapshot = ( + self.consistencygroups_adm_client.create_cgsnapshot) + cgsnapshot = create_cgsnapshot(cg['id'], + name=cgsnapshot_name)['cgsnapshot'] + snapshots = self.snapshots_client.list_snapshots( + detail=True)['snapshots'] + for snap in snapshots: + if volume['id'] == snap['volume_id']: + waiters.wait_for_snapshot_status(self.admin_snapshots_client, + snap['id'], 'available') + self.consistencygroups_adm_client.wait_for_cgsnapshot_status( + cgsnapshot['id'], 'available') + self.assertEqual(cgsnapshot_name, cgsnapshot['name']) + + # Create CG from CG snapshot + cg_name2 = data_utils.rand_name('CG_from_snap') + create_consistencygroup2 = ( + self.consistencygroups_adm_client.create_consistencygroup_from_src) + cg2 = create_consistencygroup2(cgsnapshot_id=cgsnapshot['id'], + name=cg_name2)['consistencygroup'] + vols = self.admin_volume_client.list_volumes( + detail=True)['volumes'] + for vol in vols: + if vol['consistencygroup_id'] == cg2['id']: + waiters.wait_for_volume_status(self.admin_volume_client, + vol['id'], 'available') + self.consistencygroups_adm_client.wait_for_consistencygroup_status( + cg2['id'], 'available') + self.assertEqual(cg_name2, cg2['name']) + + # Clean up + self._delete_consistencygroup(cg2['id']) + self._delete_cgsnapshot(cgsnapshot['id'], cg['id']) + self._delete_consistencygroup(cg['id']) + self.admin_volume_types_client.delete_volume_type(volume_type['id']) + + @test.idempotent_id('556121ae-de9c-4342-9897-e54260447a19') + def test_create_consistencygroup_from_consistencygroup(self): + # Create volume type + name = data_utils.rand_name("volume-type") + volume_type = self.admin_volume_types_client.create_volume_type( + name=name)['volume_type'] + + # Create CG + cg_name = data_utils.rand_name('CG') + create_consistencygroup = ( + self.consistencygroups_adm_client.create_consistencygroup) + cg = create_consistencygroup(volume_type['id'], + name=cg_name)['consistencygroup'] + vol_name = data_utils.rand_name("volume") + self.name_field = self.special_fields['name_field'] + params = {self.name_field: vol_name, + 'volume_type': volume_type['id'], + 'consistencygroup_id': cg['id']} + + # Create volume + volume = self.admin_volume_client.create_volume(**params)['volume'] + waiters.wait_for_volume_status(self.admin_volume_client, + volume['id'], 'available') + self.consistencygroups_adm_client.wait_for_consistencygroup_status( + cg['id'], 'available') + self.assertEqual(cg_name, cg['name']) + + # Create CG from CG + cg_name2 = data_utils.rand_name('CG_from_cg') + create_consistencygroup2 = ( + self.consistencygroups_adm_client.create_consistencygroup_from_src) + cg2 = create_consistencygroup2(source_cgid=cg['id'], + name=cg_name2)['consistencygroup'] + vols = self.admin_volume_client.list_volumes( + detail=True)['volumes'] + for vol in vols: + if vol['consistencygroup_id'] == cg2['id']: + waiters.wait_for_volume_status(self.admin_volume_client, + vol['id'], 'available') + self.consistencygroups_adm_client.wait_for_consistencygroup_status( + cg2['id'], 'available') + self.assertEqual(cg_name2, cg2['name']) + + # Clean up + self._delete_consistencygroup(cg2['id']) + self._delete_consistencygroup(cg['id']) + self.admin_volume_types_client.delete_volume_type(volume_type['id']) diff --git a/cinder/tests/tempest/api/volume/test_volume_placeholder.py b/cinder/tests/tempest/api/volume/test_volume_placeholder.py index 4c82675f9..c0d3e32a5 100644 --- a/cinder/tests/tempest/api/volume/test_volume_placeholder.py +++ b/cinder/tests/tempest/api/volume/test_volume_placeholder.py @@ -13,11 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -from tempest import config from tempest.tests import base -CONF = config.CONF - class CinderPlaceholderTest(base.TestCase): """Placeholder test for adding in-tree Cinder tempest tests.""" diff --git a/cinder/tests/tempest/api/volume/test_volume_unicode.py b/cinder/tests/tempest/api/volume/test_volume_unicode.py index d646050ce..8436699d6 100644 --- a/cinder/tests/tempest/api/volume/test_volume_unicode.py +++ b/cinder/tests/tempest/api/volume/test_volume_unicode.py @@ -17,9 +17,6 @@ from tempest.api.volume import base as volume_base from tempest.common.utils import data_utils from tempest.common import waiters -from tempest import config - -CONF = config.CONF class CinderUnicodeTest(volume_base.BaseVolumeTest): diff --git a/cinder/tests/tempest/cinder_clients.py b/cinder/tests/tempest/cinder_clients.py new file mode 100644 index 000000000..8f829ef0b --- /dev/null +++ b/cinder/tests/tempest/cinder_clients.py @@ -0,0 +1,37 @@ +# Copyright (c) 2016 Pure Storage, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import config + +from cinder.tests.tempest.services import consistencygroups_client + +CONF = config.CONF + + +class Manager(object): + def __init__(self, base_manager): + params = { + 'service': CONF.volume.catalog_type, + 'region': CONF.volume.region or CONF.identity.region, + 'endpoint_type': CONF.volume.endpoint_type, + 'build_interval': CONF.volume.build_interval, + 'build_timeout': CONF.volume.build_timeout + } + params.update(base_manager.default_params) + auth_provider = base_manager.auth_provider + + self.consistencygroups_adm_client = ( + consistencygroups_client.ConsistencyGroupsClient(auth_provider, + **params)) diff --git a/cinder/tests/tempest/config.py b/cinder/tests/tempest/config.py index 72fd94158..d1a2db76e 100644 --- a/cinder/tests/tempest/config.py +++ b/cinder/tests/tempest/config.py @@ -24,3 +24,14 @@ ServiceAvailableGroup = [ default=True, help="Whether or not cinder is expected to be available"), ] + +# Use a new config group specific to the cinder in-tree tests to avoid +# any naming confusion with the upstream tempest config options. +cinder_group = cfg.OptGroup(name='cinder', + title='Cinder Tempest Config Options') + +CinderGroup = [ + cfg.BoolOpt('consistency_group', + default=False, + help='Enable to run Cinder volume consistency group tests'), +] diff --git a/cinder/tests/tempest/plugin.py b/cinder/tests/tempest/plugin.py index 7760fb94f..ed7a912b3 100644 --- a/cinder/tests/tempest/plugin.py +++ b/cinder/tests/tempest/plugin.py @@ -17,6 +17,7 @@ import cinder import os from cinder.tests.tempest import config as project_config + from tempest import config from tempest.test_discover import plugins @@ -33,6 +34,15 @@ class CinderTempestPlugin(plugins.TempestPlugin): config.register_opt_group( conf, project_config.service_available_group, project_config.ServiceAvailableGroup) + config.register_opt_group( + conf, project_config.cinder_group, + project_config.CinderGroup + ) def get_opt_lists(self): - pass + return [ + (project_config.service_available_group.name, + project_config.ServiceAvailableGroup), + (project_config.cinder_group.name, + project_config.CinderGroup), + ] diff --git a/cinder/tests/tempest/services/consistencygroups_client.py b/cinder/tests/tempest/services/consistencygroups_client.py new file mode 100644 index 000000000..28853ecd1 --- /dev/null +++ b/cinder/tests/tempest/services/consistencygroups_client.py @@ -0,0 +1,192 @@ +# Copyright (C) 2015 EMC Corporation. +# Copyright (C) 2016 Pure Storage, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from oslo_serialization import jsonutils as json +from tempest import exceptions +from tempest.lib.common import rest_client +from tempest.lib import exceptions as lib_exc + + +class ConsistencyGroupsClient(rest_client.RestClient): + """Client class to send CRUD Volume ConsistencyGroup API requests""" + + def __init__(self, auth_provider, service, region, **kwargs): + super(ConsistencyGroupsClient, self).__init__( + auth_provider, service, region, **kwargs) + + def create_consistencygroup(self, volume_types, **kwargs): + """Creates a consistency group.""" + post_body = {'volume_types': volume_types} + if kwargs.get('availability_zone'): + post_body['availability_zone'] = kwargs.get('availability_zone') + if kwargs.get('name'): + post_body['name'] = kwargs.get('name') + if kwargs.get('description'): + post_body['description'] = kwargs.get('description') + post_body = json.dumps({'consistencygroup': post_body}) + resp, body = self.post('consistencygroups', post_body) + body = json.loads(body) + self.expected_success(202, resp.status) + return rest_client.ResponseBody(resp, body) + + def create_consistencygroup_from_src(self, **kwargs): + """Creates a consistency group from source.""" + post_body = {} + if kwargs.get('cgsnapshot_id'): + post_body['cgsnapshot_id'] = kwargs.get('cgsnapshot_id') + if kwargs.get('source_cgid'): + post_body['source_cgid'] = kwargs.get('source_cgid') + if kwargs.get('name'): + post_body['name'] = kwargs.get('name') + if kwargs.get('description'): + post_body['description'] = kwargs.get('description') + post_body = json.dumps({'consistencygroup-from-src': post_body}) + resp, body = self.post('consistencygroups/create_from_src', post_body) + body = json.loads(body) + self.expected_success(202, resp.status) + return rest_client.ResponseBody(resp, body) + + def delete_consistencygroup(self, cg_id): + """Delete a consistency group.""" + post_body = {'force': True} + post_body = json.dumps({'consistencygroup': post_body}) + resp, body = self.post('consistencygroups/%s/delete' % cg_id, + post_body) + self.expected_success(202, resp.status) + return rest_client.ResponseBody(resp, body) + + def show_consistencygroup(self, cg_id): + """Returns the details of a single consistency group.""" + url = "consistencygroups/%s" % str(cg_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return rest_client.ResponseBody(resp, body) + + def list_consistencygroups(self, detail=False): + """Information for all the tenant's consistency groups.""" + url = "consistencygroups" + if detail: + url += "/detail" + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return rest_client.ResponseBody(resp, body) + + def create_cgsnapshot(self, consistencygroup_id, **kwargs): + """Creates a consistency group snapshot.""" + post_body = {'consistencygroup_id': consistencygroup_id} + if kwargs.get('name'): + post_body['name'] = kwargs.get('name') + if kwargs.get('description'): + post_body['description'] = kwargs.get('description') + post_body = json.dumps({'cgsnapshot': post_body}) + resp, body = self.post('cgsnapshots', post_body) + body = json.loads(body) + self.expected_success(202, resp.status) + return rest_client.ResponseBody(resp, body) + + def delete_cgsnapshot(self, cgsnapshot_id): + """Delete a consistency group snapshot.""" + resp, body = self.delete('cgsnapshots/%s' % (str(cgsnapshot_id))) + self.expected_success(202, resp.status) + return rest_client.ResponseBody(resp, body) + + def show_cgsnapshot(self, cgsnapshot_id): + """Returns the details of a single consistency group snapshot.""" + url = "cgsnapshots/%s" % str(cgsnapshot_id) + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return rest_client.ResponseBody(resp, body) + + def list_cgsnapshots(self, detail=False): + """Information for all the tenant's consistency group snapshotss.""" + url = "cgsnapshots" + if detail: + url += "/detail" + resp, body = self.get(url) + body = json.loads(body) + self.expected_success(200, resp.status) + return rest_client.ResponseBody(resp, body) + + def wait_for_consistencygroup_status(self, cg_id, status): + """Waits for a consistency group to reach a given status.""" + body = self.show_consistencygroup(cg_id)['consistencygroup'] + cg_status = body['status'] + start = int(time.time()) + + while cg_status != status: + time.sleep(self.build_interval) + body = self.show_consistencygroup(cg_id)['consistencygroup'] + cg_status = body['status'] + if cg_status == 'error': + raise exceptions.ConsistencyGroupException(cg_id=cg_id) + + if int(time.time()) - start >= self.build_timeout: + message = ('Consistency group %s failed to reach %s status ' + '(current %s) within the required time (%s s).' % + (cg_id, status, cg_status, + self.build_timeout)) + raise exceptions.TimeoutException(message) + + def wait_for_consistencygroup_deletion(self, cg_id): + """Waits for consistency group deletion""" + start_time = int(time.time()) + while True: + try: + self.show_consistencygroup(cg_id) + except lib_exc.NotFound: + return + if int(time.time()) - start_time >= self.build_timeout: + raise exceptions.TimeoutException + time.sleep(self.build_interval) + + def wait_for_cgsnapshot_status(self, cgsnapshot_id, status): + """Waits for a consistency group snapshot to reach a given status.""" + body = self.show_cgsnapshot(cgsnapshot_id)['cgsnapshot'] + cgsnapshot_status = body['status'] + start = int(time.time()) + + while cgsnapshot_status != status: + time.sleep(self.build_interval) + body = self.show_cgsnapshot(cgsnapshot_id)['cgsnapshot'] + cgsnapshot_status = body['status'] + if cgsnapshot_status == 'error': + raise exceptions.ConsistencyGroupSnapshotException( + cgsnapshot_id=cgsnapshot_id) + + if int(time.time()) - start >= self.build_timeout: + message = ('Consistency group snapshot %s failed to reach ' + '%s status (current %s) within the required time ' + '(%s s).' % + (cgsnapshot_id, status, cgsnapshot_status, + self.build_timeout)) + raise exceptions.TimeoutException(message) + + def wait_for_cgsnapshot_deletion(self, cgsnapshot_id): + """Waits for consistency group snapshot deletion""" + start_time = int(time.time()) + while True: + try: + self.show_cgsnapshot(cgsnapshot_id) + except lib_exc.NotFound: + return + if int(time.time()) - start_time >= self.build_timeout: + raise exceptions.TimeoutException + time.sleep(self.build_interval) diff --git a/cinder/tests/unit/api/contrib/test_admin_actions.py b/cinder/tests/unit/api/contrib/test_admin_actions.py index f01ecf486..6b05baeb1 100644 --- a/cinder/tests/unit/api/contrib/test_admin_actions.py +++ b/cinder/tests/unit/api/contrib/test_admin_actions.py @@ -13,7 +13,6 @@ import fixtures import mock from oslo_concurrency import lockutils -from oslo_config import cfg from oslo_config import fixture as config_fixture import oslo_messaging as messaging from oslo_serialization import jsonutils @@ -22,10 +21,12 @@ import webob from webob import exc from cinder.api.contrib import admin_actions +from cinder.common import constants from cinder import context from cinder import db from cinder import exception from cinder import objects +from cinder.objects import base as obj_base from cinder.objects import fields from cinder import test from cinder.tests.unit.api.contrib import test_backups @@ -35,8 +36,7 @@ from cinder.tests.unit import cast_as_call from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.volume import api as volume_api - -CONF = cfg.CONF +from cinder.volume import rpcapi def app(): @@ -84,6 +84,11 @@ class AdminActionsTest(BaseAdminTest): # start service to handle rpc messages for attach requests self.svc = self.start_service('volume', host='test') + self.patch( + 'cinder.objects.Service.get_minimum_obj_version', + return_value=obj_base.OBJ_VERSIONS.get_current()) + self.patch('cinder.objects.Service.get_minimum_rpc_version', + return_value=rpcapi.VolumeAPI.RPC_API_VERSION) def tearDown(self): self.svc.stop() @@ -468,11 +473,11 @@ class AdminActionsTest(BaseAdminTest): # create volume's current host and the destination host db.service_create(self.ctx, {'host': 'test', - 'topic': CONF.volume_topic, + 'topic': constants.VOLUME_TOPIC, 'created_at': timeutils.utcnow()}) db.service_create(self.ctx, {'host': 'test2', - 'topic': CONF.volume_topic, + 'topic': constants.VOLUME_TOPIC, 'created_at': timeutils.utcnow()}) # current status is available volume = self._create_volume(self.ctx) @@ -659,11 +664,12 @@ class AdminActionsTest(BaseAdminTest): vac.validate_update, {'status': 'creating'}) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.backup.rpcapi.BackupAPI.delete_backup', mock.Mock()) + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.backup.api.API._check_support_to_force_delete') def _force_delete_backup_util(self, test_status, mock_check_support, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + mock_service_get_all): + mock_service_get_all.return_value = [ {'availability_zone': "az1", 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] # admin context @@ -739,24 +745,22 @@ class AdminActionsAttachDetachTest(BaseAdminTest): attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('in-use', volume['status']) + volume = objects.Volume.get_by_id(self.ctx, volume.id) + self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) - admin_metadata = volume['volume_admin_metadata'] + admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - self.assertEqual('attached_mode', admin_metadata[1]['key']) - self.assertEqual('rw', admin_metadata[1]['value']) + self.assertEqual('False', admin_metadata['readonly']) + self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) + fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' @@ -769,17 +773,16 @@ class AdminActionsAttachDetachTest(BaseAdminTest): resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) + volume.refresh() self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' - self.assertEqual('available', volume['status']) - admin_metadata = volume['volume_admin_metadata'] + self.assertEqual('available', volume.status) + admin_metadata = volume.admin_metadata self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key'], 'readonly') - self.assertEqual('False', admin_metadata[0]['value']) + self.assertEqual('False', admin_metadata['readonly']) def test_force_detach_host_attached_volume(self): # current status is available @@ -793,24 +796,22 @@ class AdminActionsAttachDetachTest(BaseAdminTest): attachment = self.volume_api.attach(self.ctx, volume, None, host_name, mountpoint, 'ro') # volume is attached - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('in-use', volume['status']) + volume.refresh() + self.assertEqual('in-use', volume.status) self.assertIsNone(attachment['instance_uuid']) self.assertEqual(host_name, attachment['attached_host']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) - admin_metadata = volume['volume_admin_metadata'] + admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - self.assertEqual('attached_mode', admin_metadata[1]['key']) - self.assertEqual('ro', admin_metadata[1]['value']) + self.assertEqual('False', admin_metadata['readonly']) + self.assertEqual('ro', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) + fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' @@ -823,16 +824,15 @@ class AdminActionsAttachDetachTest(BaseAdminTest): resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) + volume.refresh() self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume['status']) - admin_metadata = volume['volume_admin_metadata'] + admin_metadata = volume['admin_metadata'] self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) + self.assertEqual('False', admin_metadata['readonly']) def test_volume_force_detach_raises_remote_error(self): # current status is available @@ -845,17 +845,15 @@ class AdminActionsAttachDetachTest(BaseAdminTest): attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('in-use', volume['status']) + volume.refresh() + self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) - admin_metadata = volume['volume_admin_metadata'] + admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - self.assertEqual('attached_mode', admin_metadata[1]['key']) - self.assertEqual('rw', admin_metadata[1]['value']) + self.assertEqual('False', admin_metadata['readonly']) + self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) @@ -866,7 +864,7 @@ class AdminActionsAttachDetachTest(BaseAdminTest): with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) + fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID}} @@ -883,7 +881,7 @@ class AdminActionsAttachDetachTest(BaseAdminTest): with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) + fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID}} @@ -901,7 +899,7 @@ class AdminActionsAttachDetachTest(BaseAdminTest): with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) + fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID, @@ -927,17 +925,15 @@ class AdminActionsAttachDetachTest(BaseAdminTest): attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('in-use', volume['status']) + volume.refresh() + self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) - admin_metadata = volume['volume_admin_metadata'] + admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - self.assertEqual('attached_mode', admin_metadata[1]['key']) - self.assertEqual('rw', admin_metadata[1]['value']) + self.assertEqual('False', admin_metadata['readonly']) + self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) @@ -947,7 +943,7 @@ class AdminActionsAttachDetachTest(BaseAdminTest): with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, volume['id'])) + (fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID, diff --git a/cinder/tests/unit/api/contrib/test_backups.py b/cinder/tests/unit/api/contrib/test_backups.py index 17fc97d7d..e4a02d82b 100644 --- a/cinder/tests/unit/api/contrib/test_backups.py +++ b/cinder/tests/unit/api/contrib/test_backups.py @@ -57,6 +57,8 @@ class BackupsAPITestCase(test.TestCase): self.user_context = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.controller = backups.BackupsController() + self.patch('cinder.objects.service.Service._get_minimum_version', + return_value=None) @staticmethod def _create_backup(volume_id=fake.VOLUME_ID, @@ -462,12 +464,12 @@ class BackupsAPITestCase(test.TestCase): fake_auth_context=self.user_context)) self.assertEqual(400, res.status_int) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_create_backup_json(self, mock_validate, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -491,15 +493,17 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['backup']) - self.assertTrue(_mock_service_get_all_by_topic.called) + _mock_service_get_all.assert_called_once_with(mock.ANY, + disabled=False, + topic='cinder-backup') self.assertTrue(mock_validate.called) db.volume_destroy(context.get_admin_context(), volume_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_create_backup_inuse_no_force(self, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -528,9 +532,9 @@ class BackupsAPITestCase(test.TestCase): db.volume_destroy(context.get_admin_context(), volume_id) - @mock.patch('cinder.db.service_get_all_by_topic') - def test_create_backup_inuse_force(self, _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + @mock.patch('cinder.db.service_get_all') + def test_create_backup_inuse_force(self, _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -557,17 +561,19 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['backup']) - self.assertTrue(_mock_service_get_all_by_topic.called) + _mock_service_get_all.assert_called_once_with(mock.ANY, + disabled=False, + topic='cinder-backup') db.backup_destroy(context.get_admin_context(), backup_id) db.volume_destroy(context.get_admin_context(), volume_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_create_backup_snapshot_json(self, mock_validate, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -591,7 +597,9 @@ class BackupsAPITestCase(test.TestCase): res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['backup']) - self.assertTrue(_mock_service_get_all_by_topic.called) + _mock_service_get_all.assert_called_once_with(mock.ANY, + disabled=False, + topic='cinder-backup') self.assertTrue(mock_validate.called) db.volume_destroy(context.get_admin_context(), volume_id) @@ -705,14 +713,14 @@ class BackupsAPITestCase(test.TestCase): req, body) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') @ddt.data(False, True) def test_create_backup_delta(self, backup_from_snapshot, mock_validate, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -746,7 +754,9 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['backup']) - self.assertTrue(_mock_service_get_all_by_topic.called) + _mock_service_get_all.assert_called_once_with(mock.ANY, + disabled=False, + topic='cinder-backup') self.assertTrue(mock_validate.called) db.backup_destroy(context.get_admin_context(), backup_id) @@ -754,10 +764,10 @@ class BackupsAPITestCase(test.TestCase): snapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_create_incremental_backup_invalid_status( - self, _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + self, _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -869,12 +879,12 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_create_backup_WithOUT_enabled_backup_service( self, - _mock_service_get_all_by_topic): + _mock_service_get_all): # need an enabled backup service available - _mock_service_get_all_by_topic.return_value = [] + _mock_service_get_all.return_value = [] volume_id = utils.create_volume(self.context, size=2).id req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) @@ -901,10 +911,10 @@ class BackupsAPITestCase(test.TestCase): volume = self.volume_api.get(context.get_admin_context(), volume_id) self.assertEqual('available', volume['status']) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_create_incremental_backup_invalid_no_full( - self, _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + self, _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] @@ -934,8 +944,8 @@ class BackupsAPITestCase(test.TestCase): db.volume_destroy(context.get_admin_context(), volume_id) - @mock.patch('cinder.db.service_get_all_by_topic') - def test_is_backup_service_enabled(self, _mock_service_get_all_by_topic): + @mock.patch('cinder.db.service_get_all') + def test_is_backup_service_enabled(self, _mock_service_get_all): testhost = 'test_host' alt_host = 'strange_host' @@ -960,12 +970,12 @@ class BackupsAPITestCase(test.TestCase): 'disabled': 0, 'updated_at': timeutils.utcnow()}] # Setup mock to run through the following service cases - _mock_service_get_all_by_topic.side_effect = [empty_service, - host_not_match, - az_not_match, - disabled_service, - dead_service, - multi_services] + _mock_service_get_all.side_effect = [empty_service, + host_not_match, + az_not_match, + disabled_service, + dead_service, + multi_services] volume_id = utils.create_volume(self.context, size=2, host=testhost).id @@ -1006,10 +1016,9 @@ class BackupsAPITestCase(test.TestCase): volume['availability_zone'], testhost)) - @mock.patch('cinder.db.service_get_all_by_topic') - def test_get_available_backup_service(self, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + @mock.patch('cinder.db.service_get_all') + def test_get_available_backup_service(self, _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost1', 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': 'az2', 'host': 'testhost2', @@ -1026,10 +1035,10 @@ class BackupsAPITestCase(test.TestCase): 'testhost4', 'az1') self.assertEqual('testhost1', actual_host) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_get_available_backup_service_with_same_host( - self, _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + self, _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost1', 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': 'az2', 'host': 'testhost2', @@ -1045,10 +1054,9 @@ class BackupsAPITestCase(test.TestCase): self.backup_api._get_available_backup_service_host, 'testhost4', 'az1') - @mock.patch('cinder.db.service_get_all_by_topic') - def test_delete_backup_available( - self, _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + @mock.patch('cinder.db.service_get_all') + def test_delete_backup_available(self, _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) @@ -1065,10 +1073,10 @@ class BackupsAPITestCase(test.TestCase): db.backup_destroy(context.get_admin_context(), backup_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_delete_delta_backup(self, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) @@ -1088,10 +1096,10 @@ class BackupsAPITestCase(test.TestCase): db.backup_destroy(context.get_admin_context(), delta_id) db.backup_destroy(context.get_admin_context(), backup_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_delete_backup_error(self, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(status=fields.BackupStatus.ERROR) @@ -1141,10 +1149,10 @@ class BackupsAPITestCase(test.TestCase): db.backup_destroy(context.get_admin_context(), backup_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_delete_backup_with_InvalidBackup2(self, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] volume_id = utils.create_volume(self.context, size=5).id @@ -1170,10 +1178,10 @@ class BackupsAPITestCase(test.TestCase): db.backup_destroy(context.get_admin_context(), delta_backup_id) db.backup_destroy(context.get_admin_context(), backup_id) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_delete_backup_service_down(self, - _mock_service_get_all_by_topic): - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all): + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': '1775-04-19 05:00:00'}] backup_id = self._create_backup(status='available') @@ -1256,18 +1264,17 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual("Missing required element 'restore' in request body.", res_dict['badRequest']['message']) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.api.API.create') def test_restore_backup_volume_id_unspecified( - self, _mock_volume_api_create, - _mock_service_get_all_by_topic): + self, _mock_volume_api_create, _mock_service_get_all): # intercept volume creation to ensure created volume # has status of available def fake_volume_api_create(context, size, name, description): volume_id = utils.create_volume(self.context, size=size).id return db.volume_get(context, volume_id) - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] _mock_volume_api_create.side_effect = fake_volume_api_create @@ -1288,11 +1295,11 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.api.API.create') def test_restore_backup_name_specified(self, _mock_volume_api_create, - _mock_service_get_all_by_topic): + _mock_service_get_all): # Intercept volume creation to ensure created volume # has status of available def fake_volume_api_create(context, size, name, description): @@ -1301,7 +1308,7 @@ class BackupsAPITestCase(test.TestCase): return db.volume_get(context, volume_id) _mock_volume_api_create.side_effect = fake_volume_api_create - _mock_service_get_all_by_topic.return_value = [ + _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] diff --git a/cinder/tests/unit/api/contrib/test_cgsnapshots.py b/cinder/tests/unit/api/contrib/test_cgsnapshots.py index 6d1c98a16..9f30855e8 100644 --- a/cinder/tests/unit/api/contrib/test_cgsnapshots.py +++ b/cinder/tests/unit/api/contrib/test_cgsnapshots.py @@ -214,6 +214,39 @@ class CgsnapshotsAPITestCase(test.TestCase): context.get_admin_context(), res_dict['cgsnapshot']['id']) cgsnapshot.destroy() + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + def test_create_cgsnapshot_when_volume_in_error_status(self, + mock_validate): + consistencygroup = utils.create_consistencygroup(self.context) + utils.create_volume( + self.context, + status='error', + consistencygroup_id=consistencygroup.id + ) + body = {"cgsnapshot": {"name": "cg1", + "description": + "CG Snapshot 1", + "consistencygroup_id": consistencygroup.id}} + req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = jsonutils.dump_as_bytes(body) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_ctxt)) + res_dict = jsonutils.loads(res.body) + + self.assertEqual(400, res.status_int) + self.assertEqual(400, res_dict['badRequest']['code']) + self.assertEqual( + "Invalid volume: The snapshot cannot be created when the volume " + "is in error status.", + res_dict['badRequest']['message'] + ) + self.assertTrue(mock_validate.called) + + consistencygroup.destroy() + def test_create_cgsnapshot_with_no_body(self): # omit body from the request req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) diff --git a/cinder/tests/unit/api/contrib/test_consistencygroups.py b/cinder/tests/unit/api/contrib/test_consistencygroups.py index 18af0d0e0..75498a24c 100644 --- a/cinder/tests/unit/api/contrib/test_consistencygroups.py +++ b/cinder/tests/unit/api/contrib/test_consistencygroups.py @@ -1205,7 +1205,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase): cg = objects.ConsistencyGroup.get_by_id( self.ctxt, res_dict['consistencygroup']['id']) - cg.destroy + cg.destroy() db.volume_destroy(self.ctxt.elevated(), volume_id) source_cg.destroy() diff --git a/cinder/tests/unit/api/contrib/test_hosts.py b/cinder/tests/unit/api/contrib/test_hosts.py index f482a172d..042dc43d5 100644 --- a/cinder/tests/unit/api/contrib/test_hosts.py +++ b/cinder/tests/unit/api/contrib/test_hosts.py @@ -21,7 +21,7 @@ import webob.exc from cinder.api.contrib import hosts as os_hosts from cinder import context -from cinder import db +from cinder import exception from cinder import test @@ -69,10 +69,6 @@ def stub_utcnow(with_timezone=False): return datetime.datetime(2013, 7, 3, 0, 0, 2, tzinfo=tzinfo) -def stub_service_get_all(context, filters=None): - return SERVICE_LIST - - class FakeRequest(object): environ = {'cinder.context': context.get_admin_context()} GET = {} @@ -90,9 +86,9 @@ class HostTestCase(test.TestCase): super(HostTestCase, self).setUp() self.controller = os_hosts.HostController() self.req = FakeRequest() - self.stubs.Set(db, 'service_get_all', - stub_service_get_all) - self.stubs.Set(timeutils, 'utcnow', stub_utcnow) + self.patch('cinder.db.service_get_all', autospec=True, + return_value=SERVICE_LIST) + self.mock_object(timeutils, 'utcnow', stub_utcnow) def _test_host_update(self, host, key, val, expected_value): body = {key: val} @@ -139,7 +135,7 @@ class HostTestCase(test.TestCase): self.req, 'test.host.1', body=body) def test_bad_host(self): - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.HostNotFound, self.controller.update, self.req, 'bogus_host_name', @@ -157,6 +153,6 @@ class HostTestCase(test.TestCase): """A host given as an argument does not exists.""" self.req.environ['cinder.context'].is_admin = True dest = 'dummydest' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.ServiceNotFound, self.controller.show, self.req, dest) diff --git a/cinder/tests/unit/api/contrib/test_qos_specs_manage.py b/cinder/tests/unit/api/contrib/test_qos_specs_manage.py index 1b8cd12b3..ca672a058 100644 --- a/cinder/tests/unit/api/contrib/test_qos_specs_manage.py +++ b/cinder/tests/unit/api/contrib/test_qos_specs_manage.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import mock import webob @@ -21,6 +22,7 @@ from cinder.api.contrib import qos_specs_manage from cinder import context from cinder import db from cinder import exception +from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake @@ -37,7 +39,7 @@ def stub_qos_specs(id): "key4": "value4", "key5": "value5"} res.update(dict(specs=specs)) - return res + return objects.QualityOfServiceSpecs(**res) def stub_qos_associates(id): @@ -97,14 +99,11 @@ def return_qos_specs_create(context, name, specs): raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs) elif name == 'qos_spec_%s' % fake.INVALID_ID: raise exception.InvalidQoSSpecs(reason=name) - pass - -def return_qos_specs_get_by_name(context, name): - if name == 'qos_spec_%s' % fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=name) - - return stub_qos_specs(name.split("_")[2]) + return objects.QualityOfServiceSpecs(name=name, + specs=specs, + consumer='back-end', + id=fake.QOS_SPEC_ID) def return_get_qos_associations(context, id): @@ -141,6 +140,7 @@ def return_disassociate_all(context, id): type_id=None) +@ddt.ddt class QoSSpecManageApiTest(test.TestCase): def _create_qos_specs(self, name, values=None): @@ -149,8 +149,8 @@ class QoSSpecManageApiTest(test.TestCase): specs = dict(name=name, qos_specs=values) else: specs = {'name': name, - 'qos_specs': { - 'consumer': 'back-end', + 'consumer': 'back-end', + 'specs': { 'key1': 'value1', 'key2': 'value2'}} return db.qos_specs_create(self.ctxt, specs)['id'] @@ -288,8 +288,9 @@ class QoSSpecManageApiTest(test.TestCase): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - req, fake.WILL_NOT_BE_FOUND_ID) + self.assertRaises(exception.QoSSpecsNotFound, + self.controller.delete, req, + fake.WILL_NOT_BE_FOUND_ID) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', @@ -355,7 +356,7 @@ class QoSSpecManageApiTest(test.TestCase): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.QoSSpecsNotFound, self.controller.delete_keys, req, fake.WILL_NOT_BE_FOUND_ID, body) self.assertEqual(1, notifier.get_notification_count()) @@ -369,7 +370,7 @@ class QoSSpecManageApiTest(test.TestCase): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPBadRequest, + self.assertRaises(exception.QoSSpecsKeyNotFound, self.controller.delete_keys, req, fake.IN_USE_ID, body) self.assertEqual(1, notifier.get_notification_count()) @@ -389,11 +390,8 @@ class QoSSpecManageApiTest(test.TestCase): @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) - @mock.patch('cinder.volume.qos_specs.get_qos_specs_by_name', - side_effect=return_qos_specs_get_by_name) - @mock.patch('cinder.api.openstack.wsgi.Controller.validate_string_length') - def test_create(self, mock_validate, mock_qos_get_specs, - mock_qos_spec_create): + @mock.patch('cinder.utils.validate_dictionary_string_length') + def test_create(self, mock_validate, mock_qos_spec_create): body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID, "key1": "value1"}} @@ -423,9 +421,7 @@ class QoSSpecManageApiTest(test.TestCase): @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) - @mock.patch('cinder.volume.qos_specs.get_qos_specs_by_name', - side_effect=return_qos_specs_get_by_name) - def test_create_conflict(self, mock_qos_get_specs, mock_qos_spec_create): + def test_create_conflict(self, mock_qos_spec_create): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ALREADY_EXISTS_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) @@ -438,9 +434,7 @@ class QoSSpecManageApiTest(test.TestCase): @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) - @mock.patch('cinder.volume.qos_specs.get_qos_specs_by_name', - side_effect=return_qos_specs_get_by_name) - def test_create_failed(self, mock_qos_get_specs, mock_qos_spec_create): + def test_create_failed(self, mock_qos_spec_create): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ACTION_FAILED_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) @@ -451,26 +445,39 @@ class QoSSpecManageApiTest(test.TestCase): self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) - def _create_qos_specs_bad_body(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) + @ddt.data({'foo': {'a': 'b'}}, + {'qos_specs': {'a': 'b'}}, + {'qos_specs': 'string'}, + None) + def test_create_invalid_body_bad_request(self, body): + req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, + use_admin_context=True) req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) - def test_create_no_body(self): - self._create_qos_specs_bad_body(body=None) + @ddt.data({'name': 'fake_name', 'a' * 256: 'a'}, + {'name': 'fake_name', 'a': 'a' * 256}, + {'name': 'fake_name', '': 'a'}) + def test_create_qos_with_invalid_specs(self, value): + body = {'qos_specs': value} + req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, + use_admin_context=True) + req.method = 'POST' + self.assertRaises(exception.InvalidInput, + self.controller.create, req, body) - def test_create_invalid_body(self): - body = {'foo': {'a': 'b'}} - self._create_qos_specs_bad_body(body=body) - - def test_create_missing_specs_name(self): - body = {'qos_specs': {'a': 'b'}} - self._create_qos_specs_bad_body(body=body) - - def test_create_malformed_entity(self): - body = {'qos_specs': 'string'} - self._create_qos_specs_bad_body(body=body) + @ddt.data({'name': None}, + {'name': 'n' * 256}, + {'name': ''}, + {'name': ' '}) + def test_create_qos_with_invalid_spec_name(self, value): + body = {'qos_specs': value} + req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, + use_admin_context=True) + req.method = 'POST' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) @@ -495,7 +502,8 @@ class QoSSpecManageApiTest(test.TestCase): fake.WILL_NOT_BE_FOUND_ID)) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, + self.assertRaises(exception.QoSSpecsNotFound, + self.controller.update, req, fake.WILL_NOT_BE_FOUND_ID, body) self.assertEqual(1, notifier.get_notification_count()) @@ -508,7 +516,7 @@ class QoSSpecManageApiTest(test.TestCase): (fake.PROJECT_ID, fake.INVALID_ID)) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} - self.assertRaises(webob.exc.HTTPBadRequest, + self.assertRaises(exception.InvalidQoSSpecs, self.controller.update, req, fake.INVALID_ID, body) self.assertEqual(1, notifier.get_notification_count()) @@ -558,7 +566,7 @@ class QoSSpecManageApiTest(test.TestCase): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associations' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.QoSSpecsNotFound, self.controller.associations, req, fake.WILL_NOT_BE_FOUND_ID) @@ -603,7 +611,7 @@ class QoSSpecManageApiTest(test.TestCase): '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, fake.VOLUME_TYPE_ID)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.QoSSpecsNotFound, self.controller.associate, req, fake.WILL_NOT_BE_FOUND_ID) @@ -611,7 +619,7 @@ class QoSSpecManageApiTest(test.TestCase): '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeTypeNotFound, self.controller.associate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', @@ -658,14 +666,14 @@ class QoSSpecManageApiTest(test.TestCase): '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, fake.VOLUME_TYPE_ID)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.QoSSpecsNotFound, self.controller.disassociate, req, fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % (fake.PROJECT_ID, fake.VOLUME_TYPE_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeTypeNotFound, self.controller.disassociate, req, fake.VOLUME_TYPE_ID) @@ -700,7 +708,7 @@ class QoSSpecManageApiTest(test.TestCase): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.QoSSpecsNotFound, self.controller.disassociate_all, req, fake.WILL_NOT_BE_FOUND_ID) diff --git a/cinder/tests/unit/api/contrib/test_quotas.py b/cinder/tests/unit/api/contrib/test_quotas.py index b38c4b60f..602938a1e 100644 --- a/cinder/tests/unit/api/contrib/test_quotas.py +++ b/cinder/tests/unit/api/contrib/test_quotas.py @@ -80,11 +80,13 @@ class QuotaSetsControllerTestBase(test.TestCase): class FakeProject(object): - def __init__(self, id=fake.PROJECT_ID, parent_id=None): + def __init__(self, id=fake.PROJECT_ID, parent_id=None, + is_admin_project=False): self.id = id self.parent_id = parent_id self.subtree = None self.parents = None + self.is_admin_project = is_admin_project def setUp(self): super(QuotaSetsControllerTestBase, self).setUp() @@ -148,7 +150,7 @@ class QuotaSetsControllerTestBase(test.TestCase): self.C.id: self.C, self.D.id: self.D} def _get_project(self, context, id, subtree_as_ids=False, - parents_as_ids=False): + parents_as_ids=False, is_admin_project=False): return self.project_by_id.get(id, self.FakeProject()) def _create_fake_quota_usages(self, usage_map): @@ -616,6 +618,15 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase): expected = make_subproject_body(tenant_id=self.D.id) self.assertDictMatch(expected, result) + def test_subproject_show_not_in_hierarchy_admin_context(self): + E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None, + is_admin_project=True) + self.project_by_id[E.id] = E + self.req.environ['cinder.context'].project_id = E.id + result = self.controller.show(self.req, self.B.id) + expected = make_subproject_body(tenant_id=self.B.id) + self.assertDictMatch(expected, result) + def test_subproject_show_target_project_equals_to_context_project( self): self.req.environ['cinder.context'].project_id = self.B.id @@ -653,6 +664,27 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase): self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, self.req, F.id, body) + def test_update_subproject_not_in_hierarchy_admin_context(self): + E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None, + is_admin_project=True) + self.project_by_id[E.id] = E + self.req.environ['cinder.context'].project_id = E.id + body = make_body(gigabytes=2000, snapshots=15, + volumes=5, backups=5, tenant_id=None) + # Update the project A quota, not in the project hierarchy + # of E but it will be allowed because E is the cloud admin. + result = self.controller.update(self.req, self.A.id, body) + self.assertDictMatch(body, result) + # Update the quota of B to be equal to its parent A. + result = self.controller.update(self.req, self.B.id, body) + self.assertDictMatch(body, result) + # Remove the admin role from project E + E.is_admin_project = False + # Now updating the quota of B will fail, because it is not + # a member of E's hierarchy and E is no longer a cloud admin. + self.assertRaises(webob.exc.HTTPForbidden, + self.controller.update, self.req, self.B.id, body) + def test_update_subproject(self): # Update the project A quota. self.req.environ['cinder.context'].project_id = self.A.id diff --git a/cinder/tests/unit/api/contrib/test_services.py b/cinder/tests/unit/api/contrib/test_services.py index 9c330060a..96dbf6d10 100644 --- a/cinder/tests/unit/api/contrib/test_services.py +++ b/cinder/tests/unit/api/contrib/test_services.py @@ -17,15 +17,14 @@ import datetime from iso8601 import iso8601 -from oslo_utils import timeutils +import mock import webob.exc from cinder.api.contrib import services from cinder.api import extensions +from cinder.api.openstack import api_version_request as api_version from cinder import context -from cinder import db from cinder import exception -from cinder import policy from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake @@ -34,6 +33,7 @@ from cinder.tests.unit import fake_constants as fake fake_services_list = [ {'binary': 'cinder-scheduler', 'host': 'host1', + 'cluster_name': None, 'availability_zone': 'cinder', 'id': 1, 'disabled': True, @@ -43,6 +43,7 @@ fake_services_list = [ 'modified_at': ''}, {'binary': 'cinder-volume', 'host': 'host1', + 'cluster_name': None, 'availability_zone': 'cinder', 'id': 2, 'disabled': True, @@ -52,6 +53,7 @@ fake_services_list = [ 'modified_at': ''}, {'binary': 'cinder-scheduler', 'host': 'host2', + 'cluster_name': 'cluster1', 'availability_zone': 'cinder', 'id': 3, 'disabled': False, @@ -61,6 +63,7 @@ fake_services_list = [ 'modified_at': ''}, {'binary': 'cinder-volume', 'host': 'host2', + 'cluster_name': 'cluster1', 'availability_zone': 'cinder', 'id': 4, 'disabled': True, @@ -70,6 +73,7 @@ fake_services_list = [ 'modified_at': ''}, {'binary': 'cinder-volume', 'host': 'host2', + 'cluster_name': 'cluster2', 'availability_zone': 'cinder', 'id': 5, 'disabled': True, @@ -79,6 +83,7 @@ fake_services_list = [ 'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', + 'cluster_name': 'cluster2', 'availability_zone': 'cinder', 'id': 6, 'disabled': False, @@ -88,8 +93,9 @@ fake_services_list = [ 'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38)}, {'binary': 'cinder-scheduler', 'host': 'host2', + 'cluster_name': None, 'availability_zone': 'cinder', - 'id': 6, + 'id': 7, 'disabled': False, 'updated_at': None, 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), @@ -100,53 +106,65 @@ fake_services_list = [ class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} - GET = {} + + def __init__(self, version='3.0', **kwargs): + self.GET = kwargs + self.headers = {'OpenStack-API-Version': 'volume ' + version} + self.api_version_request = api_version.APIVersionRequest(version) # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. -class FakeRequestWithService(object): - environ = {"cinder.context": context.get_admin_context()} - GET = {"service": "cinder-volume"} +class FakeRequestWithService(FakeRequest): + def __init__(self, **kwargs): + kwargs.setdefault('service', 'cinder-volume') + super(FakeRequestWithService, self).__init__(**kwargs) -class FakeRequestWithBinary(object): - environ = {"cinder.context": context.get_admin_context()} - GET = {"binary": "cinder-volume"} +class FakeRequestWithBinary(FakeRequest): + def __init__(self, **kwargs): + kwargs.setdefault('binary', 'cinder-volume') + super(FakeRequestWithBinary, self).__init__(**kwargs) -class FakeRequestWithHost(object): - environ = {"cinder.context": context.get_admin_context()} - GET = {"host": "host1"} +class FakeRequestWithHost(FakeRequest): + def __init__(self, **kwargs): + kwargs.setdefault('host', 'host1') + super(FakeRequestWithHost, self).__init__(**kwargs) # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. -class FakeRequestWithHostService(object): - environ = {"cinder.context": context.get_admin_context()} - GET = {"host": "host1", "service": "cinder-volume"} +class FakeRequestWithHostService(FakeRequestWithService): + def __init__(self, **kwargs): + kwargs.setdefault('host', 'host1') + super(FakeRequestWithHostService, self).__init__(**kwargs) -class FakeRequestWithHostBinary(object): - environ = {"cinder.context": context.get_admin_context()} - GET = {"host": "host1", "binary": "cinder-volume"} +class FakeRequestWithHostBinary(FakeRequestWithBinary): + def __init__(self, **kwargs): + kwargs.setdefault('host', 'host1') + super(FakeRequestWithHostBinary, self).__init__(**kwargs) -def fake_service_get_all(context, filters=None): - filters = filters or {} - host = filters.get('host') - binary = filters.get('binary') - return [s for s in fake_services_list - if (not host or s['host'] == host or - s['host'].startswith(host + '@')) - and (not binary or s['binary'] == binary)] - - -def fake_service_get_by_host_binary(context, host, binary): +def fake_service_get_all(context, **filters): + result = [] + host = filters.pop('host', None) for service in fake_services_list: - if service['host'] == host and service['binary'] == binary: - return service - return None + if (host and service['host'] != host and + not service['host'].startswith(host + '@')): + continue + + if all(v is None or service.get(k) == v for k, v in filters.items()): + result.append(service) + return result + + +def fake_service_get(context, service_id=None, **filters): + result = fake_service_get_all(context, id=service_id, **filters) + if not result: + raise exception.ServiceNotFound(service_id=service_id) + return result[0] def fake_service_get_by_id(value): @@ -174,18 +192,16 @@ def fake_utcnow(with_timezone=False): return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo) +@mock.patch('cinder.db.service_get_all', fake_service_get_all) +@mock.patch('cinder.db.service_get', fake_service_get) +@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) +@mock.patch('cinder.db.sqlalchemy.api.service_update', fake_service_update) +@mock.patch('cinder.policy.enforce', fake_policy_enforce) class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() - self.stubs.Set(db, "service_get_all", fake_service_get_all) - self.stubs.Set(timeutils, "utcnow", fake_utcnow) - self.stubs.Set(db, "service_get_by_args", - fake_service_get_by_host_binary) - self.stubs.Set(db, "service_update", fake_service_update) - self.stubs.Set(policy, "enforce", fake_policy_enforce) - self.context = context.get_admin_context() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} @@ -237,6 +253,59 @@ class ServicesTest(test.TestCase): ]} self.assertEqual(response, res_dict) + def test_services_list_with_cluster_name(self): + req = FakeRequest(version='3.7') + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'cluster': None, + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime.datetime( + 2012, 10, 29, 13, 42, 2)}, + {'binary': 'cinder-volume', + 'cluster': None, + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime.datetime( + 2012, 10, 29, 13, 42, 5)}, + {'binary': 'cinder-scheduler', + 'cluster': 'cluster1', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'enabled', 'state': 'down', + 'updated_at': datetime.datetime( + 2012, 9, 19, 6, 55, 34)}, + {'binary': 'cinder-volume', + 'cluster': 'cluster1', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'down', + 'updated_at': datetime.datetime( + 2012, 9, 18, 8, 3, 38)}, + {'binary': 'cinder-volume', + 'cluster': 'cluster2', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'down', + 'updated_at': datetime.datetime( + 2012, 10, 29, 13, 42, 5)}, + {'binary': 'cinder-volume', + 'cluster': 'cluster2', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'enabled', 'state': 'down', + 'updated_at': datetime.datetime( + 2012, 9, 18, 8, 3, 38)}, + {'binary': 'cinder-scheduler', + 'cluster': None, + 'host': 'host2', + 'zone': 'cinder', + 'status': 'enabled', 'state': 'down', + 'updated_at': None}, + ]} + self.assertEqual(response, res_dict) + def test_services_detail(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) diff --git a/cinder/tests/unit/api/contrib/test_snapshot_manage.py b/cinder/tests/unit/api/contrib/test_snapshot_manage.py index 5c4592400..8d55f5912 100644 --- a/cinder/tests/unit/api/contrib/test_snapshot_manage.py +++ b/cinder/tests/unit/api/contrib/test_snapshot_manage.py @@ -24,6 +24,7 @@ import webob from cinder import context from cinder import exception +from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake @@ -42,8 +43,9 @@ def app(): def volume_get(self, context, volume_id, viewable_admin_meta=False): if volume_id == fake.VOLUME_ID: - return {'id': fake.VOLUME_ID, 'name': 'fake_volume_name', - 'host': 'fake_host'} + return objects.Volume(context, id=fake.VOLUME_ID, + _name_id=fake.VOLUME2_ID, + host='fake_host') raise exception.VolumeNotFound(volume_id=volume_id) @@ -107,10 +109,10 @@ class SnapshotManageTest(test.TestCase): @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db') - @mock.patch('cinder.db.service_get_by_args') + @mock.patch('cinder.db.service_get') def test_manage_snapshot_ok(self, mock_db, mock_create_snapshot, mock_rpcapi): - """Test successful manage volume execution. + """Test successful manage snapshot execution. Tests for correct operation when valid arguments are passed in the request body. We ensure that cinder.volume.api.API.manage_existing got @@ -124,11 +126,9 @@ class SnapshotManageTest(test.TestCase): res = self._get_resp_post(body) self.assertEqual(202, res.status_int, res) - # Check the db.service_get_by_host_and_topic was called with correct - # arguments. - self.assertEqual(1, mock_db.call_count) - args = mock_db.call_args[0] - self.assertEqual('fake_host', args[1]) + # Check the db.service_get was called with correct arguments. + mock_db.assert_called_once_with( + mock.ANY, host='fake_host', binary='cinder-volume') # Check the create_snapshot_in_db was called with correct arguments. self.assertEqual(1, mock_create_snapshot.call_count) @@ -144,6 +144,41 @@ class SnapshotManageTest(test.TestCase): args = mock_rpcapi.call_args[0] self.assertEqual('fake_ref', args[2]) + @mock.patch('cinder.utils.service_is_up', return_value=True) + @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') + @mock.patch('cinder.volume.api.API.create_snapshot_in_db') + @mock.patch('cinder.db.service_get') + def test_manage_snapshot_disabled(self, mock_db, mock_create_snapshot, + mock_rpcapi, mock_is_up): + """Test manage snapshot failure due to disabled service.""" + mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt, + disabled=True) + body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + mock_create_snapshot.assert_not_called() + mock_rpcapi.assert_not_called() + mock_is_up.assert_not_called() + + @mock.patch('cinder.utils.service_is_up', return_value=False) + @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') + @mock.patch('cinder.volume.api.API.create_snapshot_in_db') + @mock.patch('cinder.db.service_get') + def test_manage_snapshot_is_down(self, mock_db, mock_create_snapshot, + mock_rpcapi, mock_is_up): + """Test manage snapshot failure due to down service.""" + mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt) + body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + mock_create_snapshot.assert_not_called() + mock_rpcapi.assert_not_called() + self.assertTrue(mock_is_up.called) + def test_manage_snapshot_missing_volume_id(self): """Test correct failure when volume_id is not specified.""" body = {'snapshot': {'ref': 'fake_ref'}} @@ -240,3 +275,24 @@ class SnapshotManageTest(test.TestCase): mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', limit=10, marker='1234', offset=4, sort_dirs=['asc'], sort_keys=['reference']) + + @mock.patch('cinder.utils.service_is_up', return_value=True) + @mock.patch('cinder.db.service_get') + def test_get_manageable_snapshots_disabled(self, mock_db, mock_is_up): + mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt, + disabled=True) + res = self._get_resp_get('host_ok', False, True) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + mock_is_up.assert_not_called() + + @mock.patch('cinder.utils.service_is_up', return_value=False) + @mock.patch('cinder.db.service_get') + def test_get_manageable_snapshots_is_down(self, mock_db, mock_is_up): + mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt) + res = self._get_resp_get('host_ok', False, True) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + self.assertTrue(mock_is_up.called) diff --git a/cinder/tests/unit/api/contrib/test_types_extra_specs.py b/cinder/tests/unit/api/contrib/test_types_extra_specs.py index d6f9b9a67..b76b76a68 100644 --- a/cinder/tests/unit/api/contrib/test_types_extra_specs.py +++ b/cinder/tests/unit/api/contrib/test_types_extra_specs.py @@ -104,8 +104,8 @@ class VolumeTypesExtraSpecsTest(test.TestCase): return_empty_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key6') - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, - req, fake.VOLUME_ID, 'key6') + self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, + self.controller.show, req, fake.VOLUME_ID, 'key6') def test_delete(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', @@ -121,8 +121,8 @@ class VolumeTypesExtraSpecsTest(test.TestCase): delete_volume_type_extra_specs_not_found) req = fakes.HTTPRequest.blank(self.api_path + '/key6') - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - req, fake.VOLUME_ID, 'key6') + self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, + self.controller.delete, req, fake.VOLUME_ID, 'key6') @mock.patch('cinder.utils.check_string_length') def test_create(self, mock_check): diff --git a/cinder/tests/unit/api/contrib/test_types_manage.py b/cinder/tests/unit/api/contrib/test_types_manage.py index 95bbb38c9..824d58a3f 100644 --- a/cinder/tests/unit/api/contrib/test_types_manage.py +++ b/cinder/tests/unit/api/contrib/test_types_manage.py @@ -168,8 +168,8 @@ class VolumeTypesManageApiTest(test.TestCase): self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( fake.PROJECT_ID, NOT_FOUND_VOLUME_TYPE)) - self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, - req, NOT_FOUND_VOLUME_TYPE) + self.assertRaises(exception.VolumeTypeNotFound, + self.controller._delete, req, NOT_FOUND_VOLUME_TYPE) self.assertEqual(1, len(self.notifier.notifications)) def test_volume_types_with_volumes_destroy(self): @@ -429,7 +429,7 @@ class VolumeTypesManageApiTest(test.TestCase): req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeTypeNotFound, self.controller._update, req, NOT_FOUND_VOLUME_TYPE, body) self.assertEqual(1, len(self.notifier.notifications)) diff --git a/cinder/tests/unit/api/contrib/test_volume_actions.py b/cinder/tests/unit/api/contrib/test_volume_actions.py index b78d47877..caf056252 100644 --- a/cinder/tests/unit/api/contrib/test_volume_actions.py +++ b/cinder/tests/unit/api/contrib/test_volume_actions.py @@ -573,13 +573,13 @@ class VolumeRetypeActionsTest(test.TestCase): self.retype_mocks['reserve'].side_effect = exc self._retype_volume_exec(413, vol_type_new.id, vol.id) - def _retype_volume_qos(self, vol_status, consumer, expected_status, + def _retype_volume_qos(self, vol_status, consumer_pass, expected_status, same_qos=False, has_qos=True, has_type=True): admin_ctxt = context.get_admin_context() if has_qos: qos_old = utils.create_qos(admin_ctxt, self, name='old', - qos_specs={'consumer': consumer})['id'] + consumer=consumer_pass)['id'] else: qos_old = None @@ -588,7 +588,7 @@ class VolumeRetypeActionsTest(test.TestCase): else: qos_new = utils.create_qos(admin_ctxt, self, name='new', - qos_specs={'consumer': consumer})['id'] + consumer=consumer_pass)['id'] if has_type: vol_type_old = utils.create_volume_type(admin_ctxt, self, @@ -739,16 +739,16 @@ class VolumeRetypeActionsTest(test.TestCase): self._retype_volume_encryption('available', 202, False, False, False) def test_retype_volume_orig_no_type_dest_enc(self): - self._retype_volume_encryption('available', 400, False, False) + self._retype_volume_encryption('available', 202, False, False) def test_retype_volume_orig_type_no_enc_dest_no_enc(self): self._retype_volume_encryption('available', 202, True, False, False) def test_retype_volume_orig_type_no_enc_dest_enc(self): - self._retype_volume_encryption('available', 400, True, False) + self._retype_volume_encryption('available', 202, True, False) def test_retype_volume_orig_type_enc_dest_enc(self): - self._retype_volume_encryption('available', 400) + self._retype_volume_encryption('available', 202) def stub_volume_get(self, context, volume_id): @@ -878,7 +878,7 @@ class VolumeImageActionsTest(test.TestCase): body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller._volume_upload_image, req, id, @@ -1304,3 +1304,49 @@ class VolumeImageActionsTest(test.TestCase): expected['os-volume_upload_image'].update(visibility='public', protected=True) self.assertDictMatch(expected, res_dict) + + @mock.patch.object(volume_api.API, "get_volume_image_metadata") + @mock.patch.object(glance.GlanceImageService, "create") + @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") + def test_copy_volume_to_image_vhd( + self, mock_copy_to_image, mock_create, mock_get_image_metadata): + """Test create image from volume with vhd disk format""" + volume, expected = self._create_volume_with_type() + mock_get_image_metadata.return_value = {} + mock_create.side_effect = self.fake_image_service_create + req = fakes.HTTPRequest.blank( + '/v2/fakeproject/volumes/%s/action' % volume.id) + body = self._get_os_volume_upload_image() + body['os-volume_upload_image']['force'] = True + body['os-volume_upload_image']['container_format'] = 'bare' + body['os-volume_upload_image']['disk_format'] = 'vhd' + + res_dict = self.controller._volume_upload_image(req, volume.id, body) + + self.assertDictMatch(expected, res_dict) + vol_db = objects.Volume.get_by_id(self.context, volume.id) + self.assertEqual('uploading', vol_db.status) + self.assertEqual('available', vol_db.previous_status) + + @mock.patch.object(volume_api.API, "get_volume_image_metadata") + @mock.patch.object(glance.GlanceImageService, "create") + @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") + def test_copy_volume_to_image_vhdx( + self, mock_copy_to_image, mock_create, mock_get_image_metadata): + """Test create image from volume with vhdx disk format""" + volume, expected = self._create_volume_with_type() + mock_get_image_metadata.return_value = {} + mock_create.side_effect = self.fake_image_service_create + req = fakes.HTTPRequest.blank( + '/v2/fakeproject/volumes/%s/action' % volume.id) + body = self._get_os_volume_upload_image() + body['os-volume_upload_image']['force'] = True + body['os-volume_upload_image']['container_format'] = 'bare' + body['os-volume_upload_image']['disk_format'] = 'vhdx' + + res_dict = self.controller._volume_upload_image(req, volume.id, body) + + self.assertDictMatch(expected, res_dict) + vol_db = objects.Volume.get_by_id(self.context, volume.id) + self.assertEqual('uploading', vol_db.status) + self.assertEqual('available', vol_db.previous_status) diff --git a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py index 8555cce16..7cdb60234 100644 --- a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py +++ b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py @@ -255,7 +255,7 @@ class VolumeImageMetadataTest(test.TestCase): "metadata": {"image_name": "fake"}} } req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.create, req, fake.VOLUME_ID, body) def test_invalid_metadata_items_on_create(self): @@ -318,7 +318,7 @@ class VolumeImageMetadataTest(test.TestCase): req.body = jsonutils.dump_as_bytes(data) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.GlanceMetadataNotFound, self.controller.delete, req, fake.VOLUME_ID, data) def test_delete_nonexistent_volume(self): @@ -334,7 +334,7 @@ class VolumeImageMetadataTest(test.TestCase): req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.GlanceMetadataNotFound, self.controller.delete, req, fake.VOLUME_ID, body) def test_show_image_metadata(self): diff --git a/cinder/tests/unit/api/contrib/test_volume_manage.py b/cinder/tests/unit/api/contrib/test_volume_manage.py index 69e34d406..f77fe29be 100644 --- a/cinder/tests/unit/api/contrib/test_volume_manage.py +++ b/cinder/tests/unit/api/contrib/test_volume_manage.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import mock from oslo_config import cfg from oslo_serialization import jsonutils @@ -40,16 +41,18 @@ def app(): return mapper -def db_service_get_by_host_and_topic(context, host, topic): - """Replacement for db.service_get_by_host_and_topic. +def service_get(context, host, binary): + """Replacement for Service.service_get_by_host_and_topic. - We stub the db.service_get_by_host_and_topic method to return something - for a specific host, and raise an exception for anything else. We don't - use the returned data (the code under test just use the call to check for - existence of a host, so the content returned doesn't matter. + We mock the Service.service_get_by_host_and_topic method to return + something for a specific host, and raise an exception for anything else. + We don't use the returned data (the code under test just use the call to + check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': - return {} + return {'disabled': False} + if host == 'host_disabled': + return {'disabled': True} raise exception.ServiceNotFound(service_id=host) # Some of the tests check that volume types are correctly validated during a @@ -126,8 +129,8 @@ def api_get_manageable_volumes(*args, **kwargs): return vols -@mock.patch('cinder.db.service_get_by_host_and_topic', - db_service_get_by_host_and_topic) +@ddt.ddt +@mock.patch('cinder.db.service_get', service_get) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', vt_get_volume_type_by_name) @mock.patch('cinder.volume.volume_types.get_volume_type', @@ -179,13 +182,13 @@ class VolumeManageTest(test.TestCase): body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp_post(body) - self.assertEqual(202, res.status_int, res) + self.assertEqual(202, res.status_int) # Check that the manage API was called with the correct arguments. self.assertEqual(1, mock_api_manage.call_count) args = mock_api_manage.call_args[0] - self.assertEqual(args[1], body['volume']['host']) - self.assertEqual(args[2], body['volume']['ref']) + self.assertEqual(body['volume']['host'], args[1]) + self.assertEqual(body['volume']['ref'], args[2]) self.assertTrue(mock_validate.called) def test_manage_volume_missing_host(self): @@ -199,7 +202,6 @@ class VolumeManageTest(test.TestCase): body = {'volume': {'host': 'host_ok'}} res = self._get_resp_post(body) self.assertEqual(400, res.status_int) - pass def test_manage_volume_with_invalid_bootable(self): """Test correct failure when invalid bool value is specified.""" @@ -209,6 +211,26 @@ class VolumeManageTest(test.TestCase): res = self._get_resp_post(body) self.assertEqual(400, res.status_int) + @mock.patch('cinder.utils.service_is_up', return_value=True) + def test_manage_volume_disabled(self, mock_is_up): + """Test manage volume failure due to disabled service.""" + body = {'volume': {'host': 'host_disabled', 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + mock_is_up.assert_not_called() + + @mock.patch('cinder.utils.service_is_up', return_value=False) + def test_manage_volume_is_down(self, mock_is_up): + """Test manage volume failure due to down service.""" + body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + self.assertTrue(mock_is_up.called) + @mock.patch('cinder.volume.api.API.manage_existing', api_manage) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') @@ -223,9 +245,8 @@ class VolumeManageTest(test.TestCase): 'volume_type': fake.VOLUME_TYPE_ID, 'bootable': True}} res = self._get_resp_post(body) - self.assertEqual(202, res.status_int, res) + self.assertEqual(202, res.status_int) self.assertTrue(mock_validate.called) - pass @mock.patch('cinder.volume.api.API.manage_existing', api_manage) @mock.patch( @@ -240,9 +261,8 @@ class VolumeManageTest(test.TestCase): 'ref': 'fake_ref', 'volume_type': 'good_fakevt'}} res = self._get_resp_post(body) - self.assertEqual(202, res.status_int, res) + self.assertEqual(202, res.status_int) self.assertTrue(mock_validate.called) - pass def test_manage_volume_bad_volume_type_by_uuid(self): """Test failure on nonexistent volume type specified by ID.""" @@ -250,8 +270,7 @@ class VolumeManageTest(test.TestCase): 'ref': 'fake_ref', 'volume_type': fake.WILL_NOT_BE_FOUND_ID}} res = self._get_resp_post(body) - self.assertEqual(404, res.status_int, res) - pass + self.assertEqual(404, res.status_int) def test_manage_volume_bad_volume_type_by_name(self): """Test failure on nonexistent volume type specified by name.""" @@ -259,8 +278,7 @@ class VolumeManageTest(test.TestCase): 'ref': 'fake_ref', 'volume_type': 'bad_fakevt'}} res = self._get_resp_post(body) - self.assertEqual(404, res.status_int, res) - pass + self.assertEqual(404, res.status_int) def _get_resp_get(self, host, detailed, paging, admin=True): """Helper to execute a GET os-volume-manage API call.""" @@ -287,10 +305,10 @@ class VolumeManageTest(test.TestCase): def test_get_manageable_volumes_non_admin(self, mock_api_manageable): res = self._get_resp_get('fakehost', False, False, admin=False) self.assertEqual(403, res.status_int) - self.assertEqual(False, mock_api_manageable.called) + mock_api_manageable.assert_not_called() res = self._get_resp_get('fakehost', True, False, admin=False) self.assertEqual(403, res.status_int) - self.assertEqual(False, mock_api_manageable.called) + mock_api_manageable.assert_not_called() @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=api_get_manageable_volumes) @@ -304,7 +322,7 @@ class VolumeManageTest(test.TestCase): {'reference': {'source-name': 'myvol'}, 'size': 5, 'safe_to_manage': True}]} self.assertEqual(200, res.status_int) - self.assertEqual(jsonutils.loads(res.body), exp) + self.assertEqual(exp, jsonutils.loads(res.body)) mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', limit=10, marker='1234', offset=4, sort_dirs=['asc'], sort_keys=['reference']) @@ -322,8 +340,35 @@ class VolumeManageTest(test.TestCase): 'size': 5, 'reason_not_safe': None, 'safe_to_manage': True, 'extra_info': 'qos_setting:low'}]} self.assertEqual(200, res.status_int) - self.assertEqual(jsonutils.loads(res.body), exp) + self.assertEqual(exp, jsonutils.loads(res.body)) mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', limit=CONF.osapi_max_limit, marker=None, offset=0, sort_dirs=['desc'], sort_keys=['reference']) + + @ddt.data({'a' * 256: 'a'}, + {'a': 'a' * 256}, + {'': 'a'}, + ) + def test_manage_volume_with_invalid_metadata(self, value): + body = {'volume': {'host': 'host_ok', + 'ref': 'fake_ref', + "metadata": value}} + res = self._get_resp_post(body) + self.assertEqual(400, res.status_int) + + @mock.patch('cinder.utils.service_is_up', return_value=True) + def test_get_manageable_volumes_disabled(self, mock_is_up): + res = self._get_resp_get('host_disabled', False, True) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + mock_is_up.assert_not_called() + + @mock.patch('cinder.utils.service_is_up', return_value=False) + def test_get_manageable_volumes_is_down(self, mock_is_up): + res = self._get_resp_get('host_ok', False, True) + self.assertEqual(400, res.status_int, res) + self.assertEqual(exception.ServiceUnavailable.message, + res.json['badRequest']['message']) + self.assertTrue(mock_is_up.called) diff --git a/cinder/tests/unit/api/contrib/test_volume_transfer.py b/cinder/tests/unit/api/contrib/test_volume_transfer.py index fac3a956b..d9c98d185 100644 --- a/cinder/tests/unit/api/contrib/test_volume_transfer.py +++ b/cinder/tests/unit/api/contrib/test_volume_transfer.py @@ -40,7 +40,7 @@ class VolumeTransferAPITestCase(test.TestCase): self.volume_transfer_api = cinder.transfer.API() self.controller = volume_transfer.VolumeTransferController() self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True, admin=True) + fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) def _create_transfer(self, volume_id=fake.VOLUME_ID, display_name='test_transfer'): diff --git a/cinder/tests/unit/api/contrib/test_volume_type_access.py b/cinder/tests/unit/api/contrib/test_volume_type_access.py index 5a0b41bed..65e62c032 100644 --- a/cinder/tests/unit/api/contrib/test_volume_type_access.py +++ b/cinder/tests/unit/api/contrib/test_volume_type_access.py @@ -132,7 +132,7 @@ class VolumeTypeAccessTest(test.TestCase): req = fakes.HTTPRequest.blank('/v2/%s/types/os-volume-type-access' % fake.PROJECT_ID, use_admin_context=True) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeTypeAccessNotFound, self.type_access_controller.index, req, fake.VOLUME_TYPE2_ID) @@ -330,7 +330,7 @@ class VolumeTypeAccessTest(test.TestCase): body = {'removeProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeTypeAccessNotFound, self.type_action_controller._removeProjectAccess, req, fake.VOLUME_TYPE4_ID, body) diff --git a/cinder/tests/unit/api/contrib/test_volume_type_encryption.py b/cinder/tests/unit/api/contrib/test_volume_type_encryption.py index 2779166b9..24bf14d81 100644 --- a/cinder/tests/unit/api/contrib/test_volume_type_encryption.py +++ b/cinder/tests/unit/api/contrib/test_volume_type_encryption.py @@ -147,7 +147,8 @@ class VolumeTypeEncryptionTest(test.TestCase): expected = { 'itemNotFound': { 'code': 404, - 'message': ('The resource could not be found.') + 'message': ('Volume type encryption for type %s does not ' + 'exist.' % volume_type['id']) } } self.assertEqual(expected, res_dict) diff --git a/cinder/tests/unit/api/openstack/test_wsgi.py b/cinder/tests/unit/api/openstack/test_wsgi.py index 6e6f6c58c..ed5d24099 100644 --- a/cinder/tests/unit/api/openstack/test_wsgi.py +++ b/cinder/tests/unit/api/openstack/test_wsgi.py @@ -76,12 +76,8 @@ class RequestTest(test.TestCase): accepted = 'unknown-lang' request.headers = {'Accept-Language': accepted} - def fake_best_match(self, offers, default_match=None): - # Match would return None, if requested lang is not found - return None - - self.stubs.SmartSet(request.accept_language, - 'best_match', fake_best_match) + self.mock_object(request.accept_language, + 'best_match', return_value=None) self.assertIsNone(request.best_match_language()) # If accept-language is not included or empty, match should be None diff --git a/cinder/tests/unit/api/test_versions.py b/cinder/tests/unit/api/test_versions.py index 1d3dc9c84..d254de1db 100644 --- a/cinder/tests/unit/api/test_versions.py +++ b/cinder/tests/unit/api/test_versions.py @@ -262,7 +262,8 @@ class VersionsControllerTestCase(test.TestCase): ('3.2', 'index', 'child 3.2', 'ControllerChild'), ('3.2', 'show', 404, 'ControllerChild'), ('3.3', 'index', 'child 3.3', 'ControllerChild'), - ('3.3', 'show', 'show', 'ControllerChild')) + ('3.3', 'show', 'show', 'ControllerChild'), + ('3.4', 'index', 'child 3.4', 'ControllerChild')) @ddt.unpack def test_versions_inheritance_of_non_base_controller(self, version, call, expected, controller): @@ -287,12 +288,14 @@ class VersionsControllerTestCase(test.TestCase): def index(self, req): return 'child 3.2' - # TODO(geguileo): Figure out a way to make microversions work in a - # way that doesn't raise complaints from duplicated method. - @wsgi.Controller.api_version('3.3') # noqa + @index.api_version('3.3') def index(self, req): return 'child 3.3' + @index.api_version('3.4') + def index(self, req): + return 'child 3.4' + @wsgi.Controller.api_version('3.3') def show(self, req, *args, **kwargs): return 'show' diff --git a/cinder/tests/unit/api/v1/stubs.py b/cinder/tests/unit/api/v1/stubs.py index 80a4db480..628d3f136 100644 --- a/cinder/tests/unit/api/v1/stubs.py +++ b/cinder/tests/unit/api/v1/stubs.py @@ -190,7 +190,7 @@ def stub_snapshot_update(self, context, *args, **param): pass -def stub_service_get_all_by_topic(context, topic, disabled=None): +def stub_service_get_all(context, **filters): return [{'availability_zone': "zone1:host1", "disabled": 0}] diff --git a/cinder/tests/unit/api/v1/test_snapshot_metadata.py b/cinder/tests/unit/api/v1/test_snapshot_metadata.py index 646da16d2..a0318558d 100644 --- a/cinder/tests/unit/api/v1/test_snapshot_metadata.py +++ b/cinder/tests/unit/api/v1/test_snapshot_metadata.py @@ -171,7 +171,7 @@ class SnapshotMetaDataTest(test.TestCase): exc.SnapshotNotFound(snapshot_id=fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotNotFound, self.controller.index, req, self.url) @mock.patch('cinder.objects.Snapshot.get_by_id') @@ -211,7 +211,7 @@ class SnapshotMetaDataTest(test.TestCase): exc.SnapshotNotFound(snapshot_id=fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank(self.url + '/key2') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotNotFound, self.controller.show, req, fake.SNAPSHOT_ID, 'key2') @mock.patch('cinder.objects.Snapshot.get_by_id') @@ -225,7 +225,7 @@ class SnapshotMetaDataTest(test.TestCase): snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotMetadataNotFound, self.controller.show, req, fake.SNAPSHOT_ID, 'key6') @mock.patch('cinder.db.snapshot_metadata_delete') @@ -249,7 +249,7 @@ class SnapshotMetaDataTest(test.TestCase): def test_delete_nonexistent_snapshot(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotNotFound, self.controller.delete, req, fake.WILL_NOT_BE_FOUND_ID, 'key1') @@ -265,7 +265,7 @@ class SnapshotMetaDataTest(test.TestCase): req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotMetadataNotFound, self.controller.delete, req, fake.SNAPSHOT_ID, 'key6') @@ -375,7 +375,7 @@ class SnapshotMetaDataTest(test.TestCase): req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotNotFound, self.controller.create, req, fake.WILL_NOT_BE_FOUND_ID, body) @@ -504,7 +504,7 @@ class SnapshotMetaDataTest(test.TestCase): body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotNotFound, self.controller.update_all, req, fake.WILL_NOT_BE_FOUND_ID, body) @@ -538,7 +538,7 @@ class SnapshotMetaDataTest(test.TestCase): req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.SnapshotNotFound, self.controller.update, req, fake.WILL_NOT_BE_FOUND_ID, 'key1', body) diff --git a/cinder/tests/unit/api/v1/test_snapshots.py b/cinder/tests/unit/api/v1/test_snapshots.py index fdfa2b7ad..88ef9f031 100644 --- a/cinder/tests/unit/api/v1/test_snapshots.py +++ b/cinder/tests/unit/api/v1/test_snapshots.py @@ -202,7 +202,7 @@ class SnapshotApiTest(test.TestCase): } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid') - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, + self.assertRaises(exception.NotFound, self.controller.update, req, 'not-the-uuid', body) @mock.patch.object(volume.api.API, "delete_snapshot", @@ -236,7 +236,7 @@ class SnapshotApiTest(test.TestCase): self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.delete, req, snapshot_id) @@ -270,7 +270,7 @@ class SnapshotApiTest(test.TestCase): def test_snapshot_show_invalid_id(self): snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.show, req, snapshot_id) diff --git a/cinder/tests/unit/api/v1/test_types.py b/cinder/tests/unit/api/v1/test_types.py index a3c34aec8..428167401 100644 --- a/cinder/tests/unit/api/v1/test_types.py +++ b/cinder/tests/unit/api/v1/test_types.py @@ -16,7 +16,6 @@ import uuid from oslo_utils import timeutils -import webob from cinder.api.v1 import types from cinder.api.views import types as views_types @@ -108,7 +107,7 @@ class VolumeTypesApiTest(test.TestCase): req = fakes.HTTPRequest.blank('/v1/%s/types/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + self.assertRaises(exception.VolumeTypeNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) def test_view_builder_show(self): diff --git a/cinder/tests/unit/api/v1/test_volume_metadata.py b/cinder/tests/unit/api/v1/test_volume_metadata.py index 31fcc32a4..4ed756672 100644 --- a/cinder/tests/unit/api/v1/test_volume_metadata.py +++ b/cinder/tests/unit/api/v1/test_volume_metadata.py @@ -145,8 +145,10 @@ class volumeMetaDataTest(test.TestCase): self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) self.stubs.Set(cinder.db, 'volume_metadata_get', return_volume_metadata) - self.stubs.Set(cinder.db, 'service_get_all_by_topic', - stubs.stub_service_get_all_by_topic) + self.patch( + 'cinder.db.service_get_all', autospec=True, + return_value=stubs.stub_service_get_all(None)) + self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.volume_controller = volumes.VolumeController(self.ext_mgr) @@ -178,7 +180,7 @@ class volumeMetaDataTest(test.TestCase): def test_index_nonexistent_volume(self): req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.index, req, fake.WILL_NOT_BE_FOUND_ID) @@ -198,7 +200,7 @@ class volumeMetaDataTest(test.TestCase): def test_show_nonexistent_volume(self): req = fakes.HTTPRequest.blank(self.url + '/key2') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID, 'key2') @@ -206,7 +208,7 @@ class volumeMetaDataTest(test.TestCase): self.stubs.Set(cinder.db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeMetadataNotFound, self.controller.show, req, fake.VOLUME_ID, 'key6') @mock.patch.object(cinder.db, 'volume_metadata_delete') @@ -235,7 +237,7 @@ class volumeMetaDataTest(test.TestCase): req.method = 'DELETE' req.environ['cinder.context'] = fake_context - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + self.assertRaises(exc.VolumeNotFound, self.controller.delete, req, fake.WILL_NOT_BE_FOUND_ID, 'key1') def test_delete_meta_not_found(self): @@ -243,7 +245,7 @@ class volumeMetaDataTest(test.TestCase): return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeMetadataNotFound, self.controller.delete, req, fake.VOLUME_ID, 'key6') @mock.patch.object(cinder.db, 'volume_metadata_update') @@ -347,7 +349,7 @@ class volumeMetaDataTest(test.TestCase): req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.create, req, fake.WILL_NOT_BE_FOUND_ID, body) @@ -494,7 +496,7 @@ class volumeMetaDataTest(test.TestCase): body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.update_all, req, fake.WILL_NOT_BE_FOUND_ID, body) @@ -528,7 +530,7 @@ class volumeMetaDataTest(test.TestCase): req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.update, req, fake.WILL_NOT_BE_FOUND_ID, 'key1', body) diff --git a/cinder/tests/unit/api/v1/test_volumes.py b/cinder/tests/unit/api/v1/test_volumes.py index f941759d7..9ff3d83bd 100644 --- a/cinder/tests/unit/api/v1/test_volumes.py +++ b/cinder/tests/unit/api/v1/test_volumes.py @@ -44,12 +44,13 @@ class VolumeApiTest(test.TestCase): super(VolumeApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} - fake_image.stub_out_image_service(self.stubs) + fake_image.mock_image_service(self) self.controller = volumes.VolumeController(self.ext_mgr) self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) - self.stubs.Set(db, 'service_get_all_by_topic', - stubs.stub_service_get_all_by_topic) + self.patch( + 'cinder.db.service_get_all', autospec=True, + return_value=stubs.stub_service_get_all(None)) self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) def test_volume_create(self): @@ -97,8 +98,8 @@ class VolumeApiTest(test.TestCase): body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') # Raise 404 when type name isn't valid - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - req, body) + self.assertRaises(exc.VolumeTypeNotFoundByName, + self.controller.create, req, body) # Use correct volume type name vol.update(dict(volume_type=CONF.default_volume_type)) body.update(dict(volume=vol)) @@ -384,7 +385,7 @@ class VolumeApiTest(test.TestCase): req = fakes.HTTPRequest.blank( '/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.update, req, fake.WILL_NOT_BE_FOUND_ID, body) @@ -649,7 +650,7 @@ class VolumeApiTest(test.TestCase): req = fakes.HTTPRequest.blank( '/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) @@ -767,7 +768,7 @@ class VolumeApiTest(test.TestCase): def test_volume_delete_no_volume(self): req = fakes.HTTPRequest.blank( '/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exc.VolumeNotFound, self.controller.delete, req, fake.WILL_NOT_BE_FOUND_ID) diff --git a/cinder/tests/unit/api/v2/stubs.py b/cinder/tests/unit/api/v2/stubs.py index daa90ab25..e49cde5da 100644 --- a/cinder/tests/unit/api/v2/stubs.py +++ b/cinder/tests/unit/api/v2/stubs.py @@ -21,6 +21,7 @@ from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume +from cinder import utils DEFAULT_VOL_NAME = "displayname" @@ -89,6 +90,7 @@ def stub_volume_create(self, context, size, name, description, snapshot=None, vol['source_volid'] = source_volume.get('id') vol['bootable'] = False vol['volume_attachment'] = [] + vol['multiattach'] = utils.get_bool_param('multiattach', param) try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): @@ -220,6 +222,10 @@ def stub_snapshot_update(self, context, *args, **param): pass +def stub_service_get_all(*args, **kwargs): + return [{'availability_zone': "zone1:host1", "disabled": 0}] + + def stub_service_get_all_by_topic(context, topic, disabled=None): return [{'availability_zone': "zone1:host1", "disabled": 0}] diff --git a/cinder/tests/unit/api/v2/test_snapshot_metadata.py b/cinder/tests/unit/api/v2/test_snapshot_metadata.py index 482303cb6..9051d5433 100644 --- a/cinder/tests/unit/api/v2/test_snapshot_metadata.py +++ b/cinder/tests/unit/api/v2/test_snapshot_metadata.py @@ -169,7 +169,7 @@ class SnapshotMetaDataTest(test.TestCase): exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.index, req, self.url) @mock.patch('cinder.objects.Snapshot.get_by_id') @@ -209,7 +209,7 @@ class SnapshotMetaDataTest(test.TestCase): exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url + '/key2') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.show, req, self.req_id, 'key2') @mock.patch('cinder.objects.Snapshot.get_by_id') @@ -223,7 +223,7 @@ class SnapshotMetaDataTest(test.TestCase): snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotMetadataNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_metadata_delete') @@ -249,7 +249,7 @@ class SnapshotMetaDataTest(test.TestCase): return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.delete, req, self.req_id, 'key1') @mock.patch('cinder.objects.Snapshot.get_by_id') @@ -264,7 +264,7 @@ class SnapshotMetaDataTest(test.TestCase): req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotMetadataNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_update') @@ -375,7 +375,7 @@ class SnapshotMetaDataTest(test.TestCase): req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.create, req, self.req_id, body) @mock.patch('cinder.db.snapshot_update') @@ -503,7 +503,7 @@ class SnapshotMetaDataTest(test.TestCase): body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.update_all, req, '100', body) @mock.patch('cinder.db.snapshot_metadata_update', return_value=dict()) @@ -538,7 +538,7 @@ class SnapshotMetaDataTest(test.TestCase): req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.update, req, self.req_id, 'key1', body) diff --git a/cinder/tests/unit/api/v2/test_snapshots.py b/cinder/tests/unit/api/v2/test_snapshots.py index 52f3a14c5..79af73c15 100644 --- a/cinder/tests/unit/api/v2/test_snapshots.py +++ b/cinder/tests/unit/api/v2/test_snapshots.py @@ -230,8 +230,8 @@ class SnapshotApiTest(test.TestCase): } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid') - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, - 'not-the-uuid', body) + self.assertRaises(exception.SnapshotNotFound, self.controller.update, + req, 'not-the-uuid', body) @mock.patch.object(volume.api.API, "delete_snapshot", side_effect=stubs.stub_snapshot_update) @@ -264,7 +264,7 @@ class SnapshotApiTest(test.TestCase): self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + self.assertRaises(exception.SnapshotNotFound, self.controller.delete, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @@ -296,7 +296,7 @@ class SnapshotApiTest(test.TestCase): def test_snapshot_show_invalid_id(self): snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.SnapshotNotFound, self.controller.show, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) diff --git a/cinder/tests/unit/api/v2/test_types.py b/cinder/tests/unit/api/v2/test_types.py index 001b9d8b5..4940a13bc 100644 --- a/cinder/tests/unit/api/v2/test_types.py +++ b/cinder/tests/unit/api/v2/test_types.py @@ -254,7 +254,7 @@ class VolumeTypesApiTest(test.TestCase): req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + self.assertRaises(exception.VolumeTypeNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) def test_get_default(self): @@ -274,7 +274,7 @@ class VolumeTypesApiTest(test.TestCase): req = fakes.HTTPRequest.blank('/v2/%s/types/default' % fake.PROJECT_ID) req.method = 'GET' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeTypeNotFound, self.controller.show, req, 'default') def test_view_builder_show(self): diff --git a/cinder/tests/unit/api/v2/test_volume_metadata.py b/cinder/tests/unit/api/v2/test_volume_metadata.py index 80e9c85d3..f413ee250 100644 --- a/cinder/tests/unit/api/v2/test_volume_metadata.py +++ b/cinder/tests/unit/api/v2/test_volume_metadata.py @@ -132,8 +132,9 @@ class volumeMetaDataTest(test.TestCase): self.stubs.Set(volume.api.API, 'get', get_volume) self.stubs.Set(db, 'volume_metadata_get', return_volume_metadata) - self.stubs.Set(db, 'service_get_all_by_topic', - stubs.stub_service_get_all_by_topic) + self.patch( + 'cinder.db.service_get_all', autospec=True, + return_value=stubs.stub_service_get_all_by_topic(None, None)) self.stubs.Set(self.volume_api, 'update_volume_metadata', fake_update_volume_metadata) @@ -172,7 +173,7 @@ class volumeMetaDataTest(test.TestCase): self.stubs.Set(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.index, req, self.url) def test_index_no_data(self): @@ -193,14 +194,14 @@ class volumeMetaDataTest(test.TestCase): self.stubs.Set(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key2') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.show, req, self.req_id, 'key2') def test_show_meta_not_found(self): self.stubs.Set(db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeMetadataNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch.object(db, 'volume_metadata_delete') @@ -254,7 +255,7 @@ class volumeMetaDataTest(test.TestCase): with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.delete, req, self.req_id, 'key1') get_volume.assert_called_once_with(fake_context, self.req_id) @@ -264,7 +265,7 @@ class volumeMetaDataTest(test.TestCase): return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeMetadataNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch.object(db, 'volume_metadata_update') @@ -392,7 +393,7 @@ class volumeMetaDataTest(test.TestCase): req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.create, req, self.req_id, body) @mock.patch.object(db, 'volume_metadata_update') @@ -537,7 +538,7 @@ class volumeMetaDataTest(test.TestCase): body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.update_all, req, '100', body) @mock.patch.object(db, 'volume_metadata_update') @@ -591,7 +592,7 @@ class volumeMetaDataTest(test.TestCase): req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.update, req, self.req_id, 'key1', body) diff --git a/cinder/tests/unit/api/v2/test_volumes.py b/cinder/tests/unit/api/v2/test_volumes.py index 903c16ed8..d97acfb16 100644 --- a/cinder/tests/unit/api/v2/test_volumes.py +++ b/cinder/tests/unit/api/v2/test_volumes.py @@ -17,13 +17,13 @@ import datetime import iso8601 +import ddt import mock from oslo_config import cfg import six from six.moves import range from six.moves import urllib import webob -from webob import exc from cinder.api import common from cinder.api import extensions @@ -49,18 +49,20 @@ NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}' DEFAULT_AZ = "zone1:host1" +@ddt.ddt class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} - fake_image.stub_out_image_service(self.stubs) + fake_image.mock_image_service(self) self.controller = volumes.VolumeController(self.ext_mgr) self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) - self.stubs.Set(db, 'service_get_all_by_topic', - stubs.stub_service_get_all_by_topic) + self.patch( + 'cinder.db.service_get_all', autospec=True, + return_value=stubs.stub_service_get_all_by_topic(None, None)) self.maxDiff = None self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) @@ -95,8 +97,8 @@ class VolumeApiTest(test.TestCase): body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when type name isn't valid - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - req, body) + self.assertRaises(exception.VolumeTypeNotFoundByName, + self.controller.create, req, body) # Use correct volume type name vol.update(dict(volume_type=CONF.default_volume_type)) @@ -127,7 +129,8 @@ class VolumeApiTest(test.TestCase): res_dict = self.controller.detail(req) self.assertTrue(mock_validate.called) - def _vol_in_request_body(self, + @classmethod + def _vol_in_request_body(cls, size=stubs.DEFAULT_VOL_SIZE, name=stubs.DEFAULT_VOL_NAME, description=stubs.DEFAULT_VOL_DESCRIPTION, @@ -138,7 +141,8 @@ class VolumeApiTest(test.TestCase): consistencygroup_id=None, volume_type=None, image_ref=None, - image_id=None): + image_id=None, + multiattach=False): vol = {"size": size, "name": name, "description": description, @@ -148,6 +152,7 @@ class VolumeApiTest(test.TestCase): "source_replica": source_replica, "consistencygroup_id": consistencygroup_id, "volume_type": volume_type, + "multiattach": multiattach, } if image_id is not None: @@ -170,7 +175,8 @@ class VolumeApiTest(test.TestCase): attachments=None, volume_type=stubs.DEFAULT_VOL_TYPE, status=stubs.DEFAULT_VOL_STATUS, - with_migration_status=False): + with_migration_status=False, + multiattach=False): metadata = metadata or {} attachments = attachments or [] volume = {'volume': @@ -194,7 +200,7 @@ class VolumeApiTest(test.TestCase): 'metadata': metadata, 'name': name, 'replication_status': 'disabled', - 'multiattach': False, + 'multiattach': multiattach, 'size': size, 'snapshot_id': snapshot_id, 'source_volid': source_volid, @@ -260,7 +266,7 @@ class VolumeApiTest(test.TestCase): body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when snapshot cannot be found. - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, + self.assertRaises(exception.SnapshotNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, @@ -308,7 +314,7 @@ class VolumeApiTest(test.TestCase): body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when source volume cannot be found. - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, + self.assertRaises(exception.VolumeNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] @@ -326,7 +332,7 @@ class VolumeApiTest(test.TestCase): body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when source replica cannot be found. - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, + self.assertRaises(exception.VolumeNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] @@ -363,8 +369,8 @@ class VolumeApiTest(test.TestCase): body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when consistency group is not found. - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - req, body) + self.assertRaises(exception.ConsistencyGroupNotFound, + self.controller.create, req, body) context = req.environ['cinder.context'] get_cg.assert_called_once_with(self.controller.consistencygroup_api, @@ -564,6 +570,55 @@ class VolumeApiTest(test.TestCase): req, body) + def test_volume_create_with_invalid_multiattach(self): + vol = self._vol_in_request_body(multiattach="InvalidBool") + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + + self.assertRaises(exception.InvalidParameterValue, + self.controller.create, + req, + body) + + @mock.patch.object(volume_api.API, 'create', autospec=True) + @mock.patch.object(volume_api.API, 'get', autospec=True) + @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', + autospec=True) + def test_volume_create_with_valid_multiattach(self, + volume_type_get, + get, create): + create.side_effect = stubs.stub_volume_api_create + get.side_effect = stubs.stub_volume_get + volume_type_get.side_effect = stubs.stub_volume_type_get + + vol = self._vol_in_request_body(multiattach=True) + body = {"volume": vol} + + ex = self._expected_vol_from_controller(multiattach=True) + + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.create(req, body) + + self.assertEqual(ex, res_dict) + + @ddt.data({'a' * 256: 'a'}, + {'a': 'a' * 256}, + {'': 'a'}) + def test_volume_create_with_invalid_metadata(self, value): + vol = self._vol_in_request_body() + vol['metadata'] = value + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + + if len(list(value.keys())[0]) == 0: + exc = exception.InvalidVolumeMetadata + else: + exc = exception.InvalidVolumeMetadataSize + self.assertRaises(exc, + self.controller.create, + req, + body) + @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update(self, mock_validate): @@ -646,7 +701,7 @@ class VolumeApiTest(test.TestCase): stubs.stub_volume_type_get) updates = { - "metadata": {"qos_max_iops": 2000} + "metadata": {"qos_max_iops": '2000'} } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) @@ -660,48 +715,6 @@ class VolumeApiTest(test.TestCase): self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_metadata_value_too_long(self, mock_validate): - self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) - - updates = { - "metadata": {"key1": ("a" * 260)} - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(exc.HTTPRequestEntityTooLarge, - self.controller.update, req, fake.VOLUME_ID, body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_metadata_key_too_long(self, mock_validate): - self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) - - updates = { - "metadata": {("a" * 260): "value1"} - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(exc.HTTPRequestEntityTooLarge, - self.controller.update, req, fake.VOLUME_ID, body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_metadata_empty_key(self, mock_validate): - self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) - - updates = { - "metadata": {"": "value1"} - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(exc.HTTPBadRequest, - self.controller.update, req, fake.VOLUME_ID, body) - @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_with_admin_metadata(self, mock_validate): @@ -755,6 +768,26 @@ class VolumeApiTest(test.TestCase): self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) + @ddt.data({'a' * 256: 'a'}, + {'a': 'a' * 256}, + {'': 'a'}) + @mock.patch.object(volume_api.API, 'get', + side_effect=stubs.stub_volume_api_get, autospec=True) + def test_volume_update_with_invalid_metadata(self, value, get): + updates = { + "metadata": value + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) + + if len(list(value.keys())[0]) == 0: + exc = exception.InvalidVolumeMetadata + else: + exc = webob.exc.HTTPRequestEntityTooLarge + self.assertRaises(exc, + self.controller.update, + req, fake.VOLUME_ID, body) + def test_update_empty_body(self): body = {} req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) @@ -778,7 +811,7 @@ class VolumeApiTest(test.TestCase): } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(webob.exc.HTTPNotFound, + self.assertRaises(exception.VolumeNotFound, self.controller.update, req, fake.VOLUME_ID, body) @@ -1278,7 +1311,7 @@ class VolumeApiTest(test.TestCase): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + self.assertRaises(exception.VolumeNotFound, self.controller.show, req, 1) # Finally test that nothing was cached self.assertIsNone(req.cached_resource_by_id(fake.VOLUME_ID)) @@ -1371,7 +1404,7 @@ class VolumeApiTest(test.TestCase): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + self.assertRaises(exception.VolumeNotFound, self.controller.delete, req, 1) def test_admin_list_volumes_limited_to_project(self): diff --git a/cinder/tests/unit/api/v3/stubs.py b/cinder/tests/unit/api/v3/stubs.py index f7e10318b..aef67a39a 100644 --- a/cinder/tests/unit/api/v3/stubs.py +++ b/cinder/tests/unit/api/v3/stubs.py @@ -15,9 +15,17 @@ import iso8601 from cinder.message import defined_messages from cinder.tests.unit import fake_constants as fake - +from cinder.tests.unit import fake_volume +from cinder import utils FAKE_UUID = fake.OBJECT_ID +DEFAULT_VOL_NAME = "displayname" +DEFAULT_VOL_DESCRIPTION = "displaydesc" +DEFAULT_VOL_SIZE = 1 +DEFAULT_VOL_TYPE = "vol_type_name" +DEFAULT_VOL_STATUS = "fakestatus" +DEFAULT_VOL_ID = fake.VOLUME_ID +DEFAULT_AZ = "fakeaz" def stub_message(id, **kwargs): @@ -40,3 +48,68 @@ def stub_message(id, **kwargs): def stub_message_get(self, context, message_id): return stub_message(message_id) + + +def stub_volume(id, **kwargs): + volume = { + 'id': id, + 'user_id': fake.USER_ID, + 'project_id': fake.PROJECT_ID, + 'host': 'fakehost', + 'size': DEFAULT_VOL_SIZE, + 'availability_zone': DEFAULT_AZ, + 'status': DEFAULT_VOL_STATUS, + 'migration_status': None, + 'attach_status': 'attached', + 'name': 'vol name', + 'display_name': DEFAULT_VOL_NAME, + 'display_description': DEFAULT_VOL_DESCRIPTION, + 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, + tzinfo=iso8601.iso8601.Utc()), + 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, + tzinfo=iso8601.iso8601.Utc()), + 'snapshot_id': None, + 'source_volid': None, + 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', + 'encryption_key_id': None, + 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, + {'key': 'readonly', 'value': 'False'}], + 'bootable': False, + 'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1, + tzinfo=iso8601.iso8601.Utc()), + 'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE), + 'replication_status': 'disabled', + 'replication_extended_status': None, + 'replication_driver_data': None, + 'volume_attachment': [], + 'multiattach': False, + 'group_id': fake.GROUP_ID, + } + + volume.update(kwargs) + if kwargs.get('volume_glance_metadata', None): + volume['bootable'] = True + if kwargs.get('attach_status') == 'detached': + del volume['volume_admin_metadata'][0] + return volume + + +def stub_volume_create(self, context, size, name, description, snapshot=None, + group_id=None, **param): + vol = stub_volume(DEFAULT_VOL_ID) + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + source_volume = param.get('source_volume') or {} + vol['source_volid'] = source_volume.get('id') + vol['bootable'] = False + vol['volume_attachment'] = [] + vol['multiattach'] = utils.get_bool_param('multiattach', param) + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None + vol['availability_zone'] = param.get('availability_zone', 'fakeaz') + if group_id: + vol['group_id'] = group_id + return vol diff --git a/cinder/tests/unit/api/v3/test_backups.py b/cinder/tests/unit/api/v3/test_backups.py new file mode 100644 index 000000000..37feebdfe --- /dev/null +++ b/cinder/tests/unit/api/v3/test_backups.py @@ -0,0 +1,104 @@ +# Copyright (c) 2016 Intel, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The backups V3 api.""" + +import webob + +from cinder.api.openstack import api_version_request as api_version +from cinder.api.v3 import backups +import cinder.backup +from cinder import context +from cinder import exception +from cinder.objects import fields +from cinder import test +from cinder.tests.unit.api import fakes +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import utils as test_utils + + +class BackupsControllerAPITestCase(test.TestCase): + """Test cases for backups API.""" + + def setUp(self): + super(BackupsControllerAPITestCase, self).setUp() + self.backup_api = cinder.backup.API() + self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, + auth_token=True, + is_admin=True) + self.controller = backups.BackupsController() + + def _fake_update_request(self, backup_id, version='3.9'): + req = fakes.HTTPRequest.blank('/v3/%s/backups/%s/update' % + (fake.PROJECT_ID, backup_id)) + req.environ['cinder.context'].is_admin = True + req.headers['Content-Type'] = 'application/json' + req.headers['OpenStack-API-Version'] = 'volume ' + version + req.api_version_request = api_version.APIVersionRequest(version) + return req + + def test_update_wrong_version(self): + req = self._fake_update_request(fake.BACKUP_ID, version='3.6') + body = {"backup": {"name": "Updated Test Name", }} + self.assertRaises(exception.VersionNotFoundForAPIMethod, + self.controller.update, req, fake.BACKUP_ID, + body) + + def test_backup_update_with_no_body(self): + # omit body from the request + req = self._fake_update_request(fake.BACKUP_ID) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, fake.BACKUP_ID, None) + + def test_backup_update_with_unsupported_field(self): + req = self._fake_update_request(fake.BACKUP_ID) + body = {"backup": {"id": fake.BACKUP2_ID, + "description": "", }} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, fake.BACKUP_ID, body) + + def test_backup_update_with_backup_not_found(self): + req = self._fake_update_request(fake.BACKUP_ID) + updates = { + "name": "Updated Test Name", + "description": "Updated Test description.", + } + body = {"backup": updates} + self.assertRaises(exception.NotFound, + self.controller.update, + req, fake.BACKUP_ID, body) + + def test_backup_update(self): + backup = test_utils.create_backup( + self.ctxt, + status=fields.BackupStatus.AVAILABLE) + req = self._fake_update_request(fake.BACKUP_ID) + new_name = "updated_test_name" + new_description = "Updated Test description." + updates = { + "name": new_name, + "description": new_description, + } + body = {"backup": updates} + self.controller.update(req, + backup.id, + body) + + backup.refresh() + self.assertEqual(new_name, backup.display_name) + self.assertEqual(new_description, + backup.display_description) diff --git a/cinder/tests/unit/api/v3/test_cluster.py b/cinder/tests/unit/api/v3/test_cluster.py new file mode 100644 index 000000000..07a4f020d --- /dev/null +++ b/cinder/tests/unit/api/v3/test_cluster.py @@ -0,0 +1,251 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import ddt +from iso8601 import iso8601 +import mock + +from cinder.api import extensions +from cinder.api.openstack import api_version_request as api_version +from cinder.api.v3 import clusters +from cinder import context +from cinder import exception +from cinder import test +from cinder.tests.unit import fake_cluster + + +CLUSTERS = [ + fake_cluster.fake_db_cluster( + id=1, + last_heartbeat=datetime.datetime(2016, 6, 1, 2, 46, 28), + updated_at=datetime.datetime(2016, 6, 1, 2, 46, 28), + created_at=datetime.datetime(2016, 6, 1, 2, 46, 28)), + fake_cluster.fake_db_cluster( + id=2, name='cluster2', num_hosts=2, num_down_hosts=1, disabled=True, + updated_at=datetime.datetime(2016, 6, 1, 1, 46, 28), + created_at=datetime.datetime(2016, 6, 1, 1, 46, 28)) +] + +CLUSTERS_ORM = [fake_cluster.fake_cluster_orm(**kwargs) for kwargs in CLUSTERS] + +EXPECTED = [{'created_at': datetime.datetime(2016, 6, 1, 2, 46, 28), + 'disabled_reason': None, + 'last_heartbeat': datetime.datetime(2016, 6, 1, 2, 46, 28), + 'name': 'cluster_name', + 'binary': 'cinder-volume', + 'num_down_hosts': 0, + 'num_hosts': 0, + 'state': 'up', + 'status': 'enabled', + 'updated_at': datetime.datetime(2016, 6, 1, 2, 46, 28)}, + {'created_at': datetime.datetime(2016, 6, 1, 1, 46, 28), + 'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28), + 'disabled_reason': None, + 'last_heartbeat': '', + 'name': 'cluster2', + 'binary': 'cinder-volume', + 'num_down_hosts': 1, + 'num_hosts': 2, + 'state': 'down', + 'status': 'disabled', + 'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28)}] + + +class FakeRequest(object): + def __init__(self, is_admin=True, version='3.7', **kwargs): + self.GET = kwargs + self.headers = {'OpenStack-API-Version': 'volume ' + version} + self.api_version_request = api_version.APIVersionRequest(version) + self.environ = { + 'cinder.context': context.RequestContext(user_id=None, + project_id=None, + is_admin=is_admin, + read_deleted='no', + overwrite=False) + } + + +def fake_utcnow(with_timezone=False): + tzinfo = iso8601.Utc() if with_timezone else None + return datetime.datetime(2016, 6, 1, 2, 46, 30, tzinfo=tzinfo) + + +@ddt.ddt +@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) +class ClustersTestCase(test.TestCase): + """Test Case for Clusters.""" + LIST_FILTERS = ({}, {'is_up': True}, {'disabled': False}, {'num_hosts': 2}, + {'num_down_hosts': 1}, {'binary': 'cinder-volume'}, + {'is_up': True, 'disabled': False, 'num_hosts': 2, + 'num_down_hosts': 1, 'binary': 'cinder-volume'}) + + def setUp(self): + super(ClustersTestCase, self).setUp() + + self.context = context.get_admin_context() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = clusters.ClusterController(self.ext_mgr) + + @mock.patch('cinder.db.cluster_get_all', return_value=CLUSTERS_ORM) + def _test_list(self, get_all_mock, detailed, filters, expected=None): + req = FakeRequest(**filters) + method = getattr(self.controller, 'detail' if detailed else 'index') + clusters = method(req) + + filters = filters.copy() + filters.setdefault('is_up', None) + filters.setdefault('read_deleted', 'no') + self.assertEqual(expected, clusters) + get_all_mock.assert_called_once_with( + req.environ['cinder.context'], + get_services=False, + services_summary=detailed, + **filters) + + @ddt.data(*LIST_FILTERS) + def test_index_detail(self, filters): + """Verify that we get all clusters with detailed data.""" + expected = {'clusters': EXPECTED} + self._test_list(detailed=True, filters=filters, expected=expected) + + @ddt.data(*LIST_FILTERS) + def test_index_summary(self, filters): + """Verify that we get all clusters with summary data.""" + expected = {'clusters': [{'name': 'cluster_name', + 'binary': 'cinder-volume', + 'state': 'up', + 'status': 'enabled'}, + {'name': 'cluster2', + 'binary': 'cinder-volume', + 'state': 'down', + 'status': 'disabled'}]} + self._test_list(detailed=False, filters=filters, expected=expected) + + @ddt.data(True, False) + def test_index_unauthorized(self, detailed): + """Verify that unauthorized user can't list clusters.""" + self.assertRaises(exception.PolicyNotAuthorized, + self._test_list, detailed=detailed, + filters={'is_admin': False}) + + @ddt.data(True, False) + def test_index_wrong_version(self, detailed): + """Verify that unauthorized user can't list clusters.""" + self.assertRaises(exception.VersionNotFoundForAPIMethod, + self._test_list, detailed=detailed, + filters={'version': '3.5'}) + + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', + return_value=CLUSTERS_ORM[0]) + def test_show(self, get_mock): + req = FakeRequest() + expected = {'cluster': EXPECTED[0]} + cluster = self.controller.show(req, mock.sentinel.name, + mock.sentinel.binary) + self.assertEqual(expected, cluster) + get_mock.assert_called_once_with( + req.environ['cinder.context'], + None, + services_summary=True, + name=mock.sentinel.name, + binary=mock.sentinel.binary) + + def test_show_unauthorized(self): + req = FakeRequest(is_admin=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.show, req, 'name') + + def test_show_wrong_version(self): + req = FakeRequest(version='3.5') + self.assertRaises(exception.VersionNotFoundForAPIMethod, + self.controller.show, req, 'name') + + @mock.patch('cinder.db.sqlalchemy.api.cluster_update') + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', + return_value=CLUSTERS_ORM[1]) + def test_update_enable(self, get_mock, update_mock): + req = FakeRequest() + expected = {'cluster': {'name': u'cluster2', + 'binary': 'cinder-volume', + 'state': 'down', + 'status': 'enabled', + 'disabled_reason': None}} + res = self.controller.update(req, 'enable', + {'name': mock.sentinel.name, + 'binary': mock.sentinel.binary}) + self.assertEqual(expected, res) + ctxt = req.environ['cinder.context'] + get_mock.assert_called_once_with(ctxt, + None, binary=mock.sentinel.binary, + name=mock.sentinel.name) + update_mock.assert_called_once_with(ctxt, get_mock.return_value.id, + {'disabled': False, + 'disabled_reason': None}) + + @mock.patch('cinder.db.sqlalchemy.api.cluster_update') + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', + return_value=CLUSTERS_ORM[0]) + def test_update_disable(self, get_mock, update_mock): + req = FakeRequest() + disabled_reason = 'For testing' + expected = {'cluster': {'name': u'cluster_name', + 'state': 'up', + 'binary': 'cinder-volume', + 'status': 'disabled', + 'disabled_reason': disabled_reason}} + res = self.controller.update(req, 'disable', + {'name': mock.sentinel.name, + 'binary': mock.sentinel.binary, + 'disabled_reason': disabled_reason}) + self.assertEqual(expected, res) + ctxt = req.environ['cinder.context'] + get_mock.assert_called_once_with(ctxt, + None, binary=mock.sentinel.binary, + name=mock.sentinel.name) + update_mock.assert_called_once_with( + ctxt, get_mock.return_value.id, + {'disabled': True, 'disabled_reason': disabled_reason}) + + def test_update_wrong_action(self): + req = FakeRequest() + self.assertRaises(exception.NotFound, self.controller.update, req, + 'action', {}) + + @ddt.data('enable', 'disable') + def test_update_missing_name(self, action): + req = FakeRequest() + self.assertRaises(exception.MissingRequired, self.controller.update, + req, action, {'binary': mock.sentinel.binary}) + + def test_update_wrong_disabled_reason(self): + req = FakeRequest() + self.assertRaises(exception.InvalidInput, self.controller.update, req, + 'disable', {'name': mock.sentinel.name, + 'disabled_reason': ' '}) + + @ddt.data('enable', 'disable') + def test_update_unauthorized(self, action): + req = FakeRequest(is_admin=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.update, req, action, {}) + + @ddt.data('enable', 'disable') + def test_update_wrong_version(self, action): + req = FakeRequest(version='3.5') + self.assertRaises(exception.VersionNotFoundForAPIMethod, + self.controller.update, req, action, {}) diff --git a/cinder/tests/unit/api/v3/test_group_snapshots.py b/cinder/tests/unit/api/v3/test_group_snapshots.py new file mode 100644 index 000000000..93f520b1b --- /dev/null +++ b/cinder/tests/unit/api/v3/test_group_snapshots.py @@ -0,0 +1,420 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for group_snapshot code. +""" + +import mock +import webob + +from cinder.api.v3 import group_snapshots as v3_group_snapshots +from cinder import context +from cinder import db +from cinder import exception +from cinder.group import api as group_api +from cinder import objects +from cinder import test +from cinder.tests.unit.api import fakes +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import utils +import cinder.volume + +GROUP_MICRO_VERSION = '3.14' + + +class GroupSnapshotsAPITestCase(test.TestCase): + """Test Case for group_snapshots API.""" + + def setUp(self): + super(GroupSnapshotsAPITestCase, self).setUp() + self.controller = v3_group_snapshots.GroupSnapshotsController() + self.volume_api = cinder.volume.API() + self.context = context.get_admin_context() + self.context.project_id = fake.PROJECT_ID + self.context.user_id = fake.USER_ID + self.user_ctxt = context.RequestContext( + fake.USER_ID, fake.PROJECT_ID, auth_token=True) + + def test_show_group_snapshot(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + group_snapshot = utils.create_group_snapshot( + self.context, group_id=group.id) + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % + (fake.PROJECT_ID, group_snapshot.id), + version=GROUP_MICRO_VERSION) + res_dict = self.controller.show(req, group_snapshot.id) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('this is a test group snapshot', + res_dict['group_snapshot']['description']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshot']['name']) + self.assertEqual('creating', res_dict['group_snapshot']['status']) + + group_snapshot.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + group.destroy() + + def test_show_group_snapshot_with_group_snapshot_NotFound(self): + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % + (fake.PROJECT_ID, + fake.WILL_NOT_BE_FOUND_ID), + version=GROUP_MICRO_VERSION) + self.assertRaises(exception.GroupSnapshotNotFound, + self.controller.show, + req, fake.WILL_NOT_BE_FOUND_ID) + + def test_list_group_snapshots_json(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + group_snapshot1 = utils.create_group_snapshot( + self.context, group_id=group.id) + group_snapshot2 = utils.create_group_snapshot( + self.context, group_id=group.id) + group_snapshot3 = utils.create_group_snapshot( + self.context, group_id=group.id) + + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(group_snapshot1.id, + res_dict['group_snapshots'][0]['id']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshots'][0]['name']) + self.assertEqual(group_snapshot2.id, + res_dict['group_snapshots'][1]['id']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshots'][1]['name']) + self.assertEqual(group_snapshot3.id, + res_dict['group_snapshots'][2]['id']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshots'][2]['name']) + + group_snapshot3.destroy() + group_snapshot2.destroy() + group_snapshot1.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + group.destroy() + + def test_list_group_snapshots_detail_json(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + group_snapshot1 = utils.create_group_snapshot( + self.context, group_id=group.id) + group_snapshot2 = utils.create_group_snapshot( + self.context, group_id=group.id) + group_snapshot3 = utils.create_group_snapshot( + self.context, group_id=group.id) + + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/detail' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + res_dict = self.controller.detail(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(3, len(res_dict['group_snapshots'])) + self.assertEqual('this is a test group snapshot', + res_dict['group_snapshots'][0]['description']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshots'][0]['name']) + self.assertEqual(group_snapshot1.id, + res_dict['group_snapshots'][0]['id']) + self.assertEqual('creating', + res_dict['group_snapshots'][0]['status']) + + self.assertEqual('this is a test group snapshot', + res_dict['group_snapshots'][1]['description']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshots'][1]['name']) + self.assertEqual(group_snapshot2.id, + res_dict['group_snapshots'][1]['id']) + self.assertEqual('creating', + res_dict['group_snapshots'][1]['status']) + + self.assertEqual('this is a test group snapshot', + res_dict['group_snapshots'][2]['description']) + self.assertEqual('test_group_snapshot', + res_dict['group_snapshots'][2]['name']) + self.assertEqual(group_snapshot3.id, + res_dict['group_snapshots'][2]['id']) + self.assertEqual('creating', + res_dict['group_snapshots'][2]['status']) + + group_snapshot3.destroy() + group_snapshot2.destroy() + group_snapshot1.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + group.destroy() + + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + @mock.patch('cinder.db.volume_type_get') + @mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve') + def test_create_group_snapshot_json(self, mock_quota, mock_vol_type, + mock_validate): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + body = {"group_snapshot": {"name": "group_snapshot1", + "description": + "Group Snapshot 1", + "group_id": group.id}} + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + res_dict = self.controller.create(req, body) + + self.assertEqual(1, len(res_dict)) + self.assertIn('id', res_dict['group_snapshot']) + self.assertTrue(mock_validate.called) + + group.destroy() + group_snapshot = objects.GroupSnapshot.get_by_id( + context.get_admin_context(), res_dict['group_snapshot']['id']) + db.volume_destroy(context.get_admin_context(), + volume_id) + group_snapshot.destroy() + + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + @mock.patch('cinder.db.volume_type_get') + def test_create_group_snapshot_when_volume_in_error_status( + self, mock_vol_type, mock_validate): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + status='error', + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + body = {"group_snapshot": {"name": "group_snapshot1", + "description": + "Group Snapshot 1", + "group_id": group.id}} + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, body) + self.assertTrue(mock_validate.called) + + group.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + + def test_create_group_snapshot_with_no_body(self): + # omit body from the request + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, None) + + @mock.patch.object(group_api.API, 'create_group_snapshot', + side_effect=exception.InvalidGroupSnapshot( + reason='Invalid group snapshot')) + def test_create_with_invalid_group_snapshot(self, mock_create_group_snap): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + status='error', + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + body = {"group_snapshot": {"name": "group_snapshot1", + "description": + "Group Snapshot 1", + "group_id": group.id}} + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, body) + + group.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + + @mock.patch.object(group_api.API, 'create_group_snapshot', + side_effect=exception.GroupSnapshotNotFound( + group_snapshot_id='invalid_id')) + def test_create_with_group_snapshot_not_found(self, mock_create_grp_snap): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + status='error', + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + body = {"group_snapshot": {"name": "group_snapshot1", + "description": + "Group Snapshot 1", + "group_id": group.id}} + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(exception.GroupSnapshotNotFound, + self.controller.create, + req, body) + + group.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + + def test_create_group_snapshot_from_empty_group(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + body = {"group_snapshot": {"name": "group_snapshot1", + "description": + "Group Snapshot 1", + "group_id": group.id}} + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, body) + + group.destroy() + + def test_delete_group_snapshot_available(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + group_snapshot = utils.create_group_snapshot( + self.context, + group_id=group.id, + status='available') + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % + (fake.PROJECT_ID, group_snapshot.id), + version=GROUP_MICRO_VERSION) + res_dict = self.controller.delete(req, group_snapshot.id) + + group_snapshot = objects.GroupSnapshot.get_by_id(self.context, + group_snapshot.id) + self.assertEqual(202, res_dict.status_int) + self.assertEqual('deleting', group_snapshot.status) + + group_snapshot.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + group.destroy() + + def test_delete_group_snapshot_available_used_as_source(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + group_snapshot = utils.create_group_snapshot( + self.context, + group_id=group.id, + status='available') + + group2 = utils.create_group( + self.context, status='creating', + group_snapshot_id=group_snapshot.id, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % + (fake.PROJECT_ID, group_snapshot.id), + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, + req, group_snapshot.id) + + group_snapshot.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + group.destroy() + group2.destroy() + + def test_delete_group_snapshot_with_group_snapshot_NotFound(self): + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % + (fake.PROJECT_ID, + fake.WILL_NOT_BE_FOUND_ID), + version=GROUP_MICRO_VERSION) + self.assertRaises(exception.GroupSnapshotNotFound, + self.controller.delete, + req, fake.WILL_NOT_BE_FOUND_ID) + + def test_delete_group_snapshot_with_Invalid_group_snapshot(self): + group = utils.create_group( + self.context, + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID],) + volume_id = utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID)['id'] + group_snapshot = utils.create_group_snapshot( + self.context, + group_id=group.id, + status='invalid') + req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % + (fake.PROJECT_ID, group_snapshot.id), + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, + req, group_snapshot.id) + + group_snapshot.destroy() + db.volume_destroy(context.get_admin_context(), + volume_id) + group.destroy() diff --git a/cinder/tests/unit/api/v3/test_group_types.py b/cinder/tests/unit/api/v3/test_group_types.py new file mode 100644 index 000000000..c49e4f149 --- /dev/null +++ b/cinder/tests/unit/api/v3/test_group_types.py @@ -0,0 +1,553 @@ +# Copyright 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import mock +from oslo_utils import timeutils +import six +import webob + +import cinder.api.common as common +from cinder.api.v3 import group_specs as v3_group_specs +from cinder.api.v3 import group_types as v3_group_types +from cinder.api.v3.views import group_types as views_types +from cinder import context +from cinder import exception +from cinder import test +from cinder.tests.unit.api import fakes +from cinder.tests.unit import fake_constants as fake +from cinder.volume import group_types + +GROUP_TYPE_MICRO_VERSION = '3.11' + + +def stub_group_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5" + } + return dict( + id=id, + name='group_type_%s' % six.text_type(id), + description='group_type_desc_%s' % six.text_type(id), + group_specs=specs, + ) + + +def return_group_types_get_all_types(context, filters=None, marker=None, + limit=None, sort_keys=None, + sort_dirs=None, offset=None, + list_result=False): + result = dict(group_type_1=stub_group_type(1), + group_type_2=stub_group_type(2), + group_type_3=stub_group_type(3) + ) + if list_result: + return list(result.values()) + return result + + +def return_empty_group_types_get_all_types(context, filters=None, marker=None, + limit=None, sort_keys=None, + sort_dirs=None, offset=None, + list_result=False): + if list_result: + return [] + return {} + + +def return_group_types_get_group_type(context, id): + if id == fake.WILL_NOT_BE_FOUND_ID: + raise exception.GroupTypeNotFound(group_type_id=id) + return stub_group_type(id) + + +def return_group_types_get_default(): + return stub_group_type(1) + + +def return_group_types_get_default_not_found(): + return {} + + +class GroupTypesApiTest(test.TestCase): + + def _create_group_type(self, group_type_name, group_specs=None, + is_public=True, projects=None): + return group_types.create(self.ctxt, group_type_name, group_specs, + is_public, projects).get('id') + + def setUp(self): + super(GroupTypesApiTest, self).setUp() + self.controller = v3_group_types.GroupTypesController() + self.specs_controller = v3_group_specs.GroupTypeSpecsController() + self.ctxt = context.RequestContext(user_id=fake.USER_ID, + project_id=fake.PROJECT_ID, + is_admin=True) + self.user_ctxt = context.RequestContext(user_id=fake.USER2_ID, + project_id=fake.PROJECT2_ID, + is_admin=False) + self.type_id1 = self._create_group_type('group_type1', + {'key1': 'value1'}) + self.type_id2 = self._create_group_type('group_type2', + {'key2': 'value2'}) + self.type_id3 = self._create_group_type('group_type3', + {'key3': 'value3'}, False, + [fake.PROJECT_ID]) + + def test_group_types_index(self): + self.stubs.Set(group_types, 'get_all_group_types', + return_group_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID, + use_admin_context=True, + version=GROUP_TYPE_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(3, len(res_dict['group_types'])) + + expected_names = ['group_type_1', 'group_type_2', 'group_type_3'] + actual_names = map(lambda e: e['name'], res_dict['group_types']) + self.assertEqual(set(expected_names), set(actual_names)) + for entry in res_dict['group_types']: + self.assertEqual('value1', entry['group_specs']['key1']) + + def test_group_types_index_no_data(self): + self.stubs.Set(group_types, 'get_all_group_types', + return_empty_group_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(0, len(res_dict['group_types'])) + + def test_group_types_index_with_limit(self): + req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' % + fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + + self.assertEqual(1, len(res['group_types'])) + self.assertEqual(self.type_id3, res['group_types'][0]['id']) + + expect_next_link = ('http://localhost/v3/%s/group_types?limit=1' + '&marker=%s' % + (fake.PROJECT_ID, res['group_types'][0]['id'])) + self.assertEqual(expect_next_link, res['group_type_links'][0]['href']) + + def test_group_types_index_with_offset(self): + req = fakes.HTTPRequest.blank( + '/v3/%s/group_types?offset=1' % fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + + self.assertEqual(2, len(res['group_types'])) + + def test_group_types_index_with_offset_out_of_range(self): + url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID + req = fakes.HTTPRequest.blank(url, version=GROUP_TYPE_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.index, req) + + def test_group_types_index_with_limit_and_offset(self): + req = fakes.HTTPRequest.blank( + '/v3/%s/group_types?limit=2&offset=1' % fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + + self.assertEqual(2, len(res['group_types'])) + self.assertEqual(self.type_id2, res['group_types'][0]['id']) + self.assertEqual(self.type_id1, res['group_types'][1]['id']) + + def test_group_types_index_with_limit_and_marker(self): + req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' + '&marker=%s' % + (fake.PROJECT_ID, + self.type_id2), + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + + self.assertEqual(1, len(res['group_types'])) + self.assertEqual(self.type_id1, res['group_types'][0]['id']) + + def test_group_types_index_with_valid_filter(self): + req = fakes.HTTPRequest.blank( + '/v3/%s/group_types?is_public=True' % fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + + self.assertEqual(3, len(res['group_types'])) + self.assertEqual(self.type_id3, res['group_types'][0]['id']) + self.assertEqual(self.type_id2, res['group_types'][1]['id']) + self.assertEqual(self.type_id1, res['group_types'][2]['id']) + + def test_group_types_index_with_invalid_filter(self): + req = fakes.HTTPRequest.blank( + '/v3/%s/group_types?id=%s' % (fake.PROJECT_ID, self.type_id1), + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + + self.assertEqual(3, len(res['group_types'])) + + def test_group_types_index_with_sort_keys(self): + req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' % + fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + expect_result = [self.type_id1, self.type_id2, self.type_id3] + expect_result.sort(reverse=True) + + self.assertEqual(3, len(res['group_types'])) + self.assertEqual(expect_result[0], res['group_types'][0]['id']) + self.assertEqual(expect_result[1], res['group_types'][1]['id']) + self.assertEqual(expect_result[2], res['group_types'][2]['id']) + + def test_group_types_index_with_sort_and_limit(self): + req = fakes.HTTPRequest.blank( + '/v3/%s/group_types?sort=id&limit=2' % fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + expect_result = [self.type_id1, self.type_id2, self.type_id3] + expect_result.sort(reverse=True) + + self.assertEqual(2, len(res['group_types'])) + self.assertEqual(expect_result[0], res['group_types'][0]['id']) + self.assertEqual(expect_result[1], res['group_types'][1]['id']) + + def test_group_types_index_with_sort_keys_and_sort_dirs(self): + req = fakes.HTTPRequest.blank( + '/v3/%s/group_types?sort=id:asc' % fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.environ['cinder.context'] = self.ctxt + res = self.controller.index(req) + expect_result = [self.type_id1, self.type_id2, self.type_id3] + expect_result.sort() + + self.assertEqual(3, len(res['group_types'])) + self.assertEqual(expect_result[0], res['group_types'][0]['id']) + self.assertEqual(expect_result[1], res['group_types'][1]['id']) + self.assertEqual(expect_result[2], res['group_types'][2]['id']) + + def test_group_types_show(self): + self.stubs.Set(group_types, 'get_group_type', + return_group_types_get_group_type) + + type_id = six.text_type(uuid.uuid4()) + req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID + + type_id, + version=GROUP_TYPE_MICRO_VERSION) + res_dict = self.controller.show(req, type_id) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(type_id, res_dict['group_type']['id']) + type_name = 'group_type_' + type_id + self.assertEqual(type_name, res_dict['group_type']['name']) + + def test_group_types_show_pre_microversion(self): + self.stubs.Set(group_types, 'get_group_type', + return_group_types_get_group_type) + + type_id = six.text_type(uuid.uuid4()) + req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID + + type_id, + version='3.5') + + self.assertRaises(exception.VersionNotFoundForAPIMethod, + self.controller.show, req, type_id) + + def test_group_types_show_not_found(self): + self.stubs.Set(group_types, 'get_group_type', + return_group_types_get_group_type) + + req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % + (fake.PROJECT_ID, + fake.WILL_NOT_BE_FOUND_ID), + version=GROUP_TYPE_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, fake.WILL_NOT_BE_FOUND_ID) + + def test_get_default(self): + self.stubs.Set(group_types, 'get_default_group_type', + return_group_types_get_default) + req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' % + fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.method = 'GET' + res_dict = self.controller.show(req, 'default') + self.assertEqual(1, len(res_dict)) + self.assertEqual('group_type_1', res_dict['group_type']['name']) + self.assertEqual('group_type_desc_1', + res_dict['group_type']['description']) + + def test_get_default_not_found(self): + self.stubs.Set(group_types, 'get_default_group_type', + return_group_types_get_default_not_found) + req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' % + fake.PROJECT_ID, + version=GROUP_TYPE_MICRO_VERSION) + req.method = 'GET' + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, 'default') + + def test_view_builder_show(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.utcnow().isoformat() + raw_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + group_specs={}, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v3", + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.show(request, raw_group_type) + + self.assertIn('group_type', output) + expected_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + id=42, + ) + self.assertDictMatch(expected_group_type, output['group_type']) + + def test_view_builder_show_admin(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.utcnow().isoformat() + raw_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + group_specs={}, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v3", use_admin_context=True, + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.show(request, raw_group_type) + + self.assertIn('group_type', output) + expected_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + group_specs={}, + id=42, + ) + self.assertDictMatch(expected_group_type, output['group_type']) + + def __test_view_builder_show_qos_specs_id_policy(self): + with mock.patch.object(common, + 'validate_policy', + side_effect=[False, True]): + view_builder = views_types.ViewBuilder() + now = timeutils.utcnow().isoformat() + raw_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v3", + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.show(request, raw_group_type) + + self.assertIn('group_type', output) + expected_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + id=42, + ) + self.assertDictMatch(expected_group_type, output['group_type']) + + def test_view_builder_show_group_specs_policy(self): + with mock.patch.object(common, + 'validate_policy', + side_effect=[True, False]): + view_builder = views_types.ViewBuilder() + now = timeutils.utcnow().isoformat() + raw_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + group_specs={}, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v3", + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.show(request, raw_group_type) + + self.assertIn('group_type', output) + expected_group_type = dict( + name='new_type', + description='new_type_desc', + group_specs={}, + is_public=True, + id=42, + ) + self.assertDictMatch(expected_group_type, output['group_type']) + + def test_view_builder_show_pass_all_policy(self): + with mock.patch.object(common, + 'validate_policy', + side_effect=[True, True]): + view_builder = views_types.ViewBuilder() + now = timeutils.utcnow().isoformat() + raw_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + group_specs={}, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v3", + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.show(request, raw_group_type) + + self.assertIn('group_type', output) + expected_group_type = dict( + name='new_type', + description='new_type_desc', + group_specs={}, + is_public=True, + id=42, + ) + self.assertDictMatch(expected_group_type, output['group_type']) + + def test_view_builder_list(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.utcnow().isoformat() + raw_group_types = [] + for i in range(0, 10): + raw_group_types.append( + dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + group_specs={}, + deleted_at=None, + id=42 + i + ) + ) + + request = fakes.HTTPRequest.blank("/v3", + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.index(request, raw_group_types) + + self.assertIn('group_types', output) + for i in range(0, 10): + expected_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + id=42 + i + ) + self.assertDictMatch(expected_group_type, + output['group_types'][i]) + + def test_view_builder_list_admin(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.utcnow().isoformat() + raw_group_types = [] + for i in range(0, 10): + raw_group_types.append( + dict( + name='new_type', + description='new_type_desc', + is_public=True, + deleted=False, + created_at=now, + updated_at=now, + group_specs={}, + deleted_at=None, + id=42 + i + ) + ) + + request = fakes.HTTPRequest.blank("/v3", use_admin_context=True, + version=GROUP_TYPE_MICRO_VERSION) + output = view_builder.index(request, raw_group_types) + + self.assertIn('group_types', output) + for i in range(0, 10): + expected_group_type = dict( + name='new_type', + description='new_type_desc', + is_public=True, + group_specs={}, + id=42 + i + ) + self.assertDictMatch(expected_group_type, + output['group_types'][i]) + + def test_check_policy(self): + self.controller._check_policy(self.ctxt) + + self.assertRaises(exception.PolicyNotAuthorized, + self.controller._check_policy, + self.user_ctxt) + + self.specs_controller._check_policy(self.ctxt) + + self.assertRaises(exception.PolicyNotAuthorized, + self.specs_controller._check_policy, + self.user_ctxt) diff --git a/cinder/tests/unit/api/v3/test_groups.py b/cinder/tests/unit/api/v3/test_groups.py new file mode 100644 index 000000000..8619fcbe7 --- /dev/null +++ b/cinder/tests/unit/api/v3/test_groups.py @@ -0,0 +1,876 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for group code. +""" + +import ddt +import mock +import webob + +from cinder.api.v3 import groups as v3_groups +from cinder import context +from cinder import db +from cinder import exception +import cinder.group +from cinder import objects +from cinder.objects import fields +from cinder import test +from cinder.tests.unit.api import fakes +from cinder.tests.unit.api.v3 import stubs +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import utils +from cinder.volume import api as volume_api + +GROUP_MICRO_VERSION = '3.13' +GROUP_FROM_SRC_MICRO_VERSION = '3.14' + + +@ddt.ddt +class GroupsAPITestCase(test.TestCase): + """Test Case for groups API.""" + + def setUp(self): + super(GroupsAPITestCase, self).setUp() + self.controller = v3_groups.GroupsController() + self.group_api = cinder.group.API() + self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, + auth_token=True, + is_admin=True) + self.user_ctxt = context.RequestContext( + fake.USER_ID, fake.PROJECT_ID, auth_token=True) + self.volume_type1 = self._create_volume_type(id=fake.VOLUME_TYPE_ID) + self.group1 = self._create_group() + self.group2 = self._create_group() + self.group3 = self._create_group(ctxt=self.user_ctxt) + self.addCleanup(self._cleanup) + + def _cleanup(self): + self.group1.destroy() + self.group2.destroy() + self.group3.destroy() + db.volume_type_destroy(self.ctxt, self.volume_type1.id) + + def _create_group( + self, + ctxt=None, + name='test_group', + description='this is a test group', + group_type_id=fake.GROUP_TYPE_ID, + volume_type_ids=[fake.VOLUME_TYPE_ID], + availability_zone='az1', + host='fakehost', + status=fields.GroupStatus.CREATING, + **kwargs): + """Create a group object.""" + ctxt = ctxt or self.ctxt + group = objects.Group(ctxt) + group.user_id = fake.USER_ID + group.project_id = fake.PROJECT_ID + group.availability_zone = availability_zone + group.name = name + group.description = description + group.group_type_id = group_type_id + group.volume_type_ids = volume_type_ids + group.host = host + group.status = status + group.update(kwargs) + group.create() + return group + + def _create_volume_type( + self, + ctxt=None, + id=fake.VOLUME_TYPE_ID, + name='test_volume_type', + description='this is a test volume type', + extra_specs={"test_key": "test_val"}, + testcase_instance=None, + **kwargs): + """Create a volume type.""" + ctxt = ctxt or self.ctxt + vol_type = utils.create_volume_type( + ctxt, + testcase_instance=testcase_instance, + id=id, + name=name, + description=description, + extra_specs=extra_specs, + **kwargs) + return vol_type + + @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') + @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') + def test_show_group(self, mock_vol_get_all_by_group, + mock_vol_type_get_all_by_group): + volume_objs = [objects.Volume(context=self.ctxt, id=i) + for i in [fake.VOLUME_ID]] + volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) + mock_vol_get_all_by_group.return_value = volumes + + vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) + for i in [fake.VOLUME_TYPE_ID]] + vol_types = objects.VolumeTypeList(context=self.ctxt, + objects=vol_type_objs) + mock_vol_type_get_all_by_group.return_value = vol_types + + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + res_dict = self.controller.show(req, self.group1.id) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('az1', + res_dict['group']['availability_zone']) + self.assertEqual('this is a test group', + res_dict['group']['description']) + self.assertEqual('test_group', + res_dict['group']['name']) + self.assertEqual('creating', + res_dict['group']['status']) + self.assertEqual([fake.VOLUME_TYPE_ID], + res_dict['group']['volume_types']) + + def test_show_group_with_group_NotFound(self): + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % + (fake.PROJECT_ID, + fake.WILL_NOT_BE_FOUND_ID), + version=GROUP_MICRO_VERSION) + self.assertRaises(exception.GroupNotFound, self.controller.show, + req, fake.WILL_NOT_BE_FOUND_ID) + + def test_list_groups_json(self): + self.group2.group_type_id = fake.GROUP_TYPE2_ID + self.group2.volume_type_ids = [fake.VOLUME_TYPE2_ID] + self.group2.save() + + self.group3.group_type_id = fake.GROUP_TYPE3_ID + self.group3.volume_type_ids = [fake.VOLUME_TYPE3_ID] + self.group3.save() + + req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(self.group3.id, + res_dict['groups'][0]['id']) + self.assertEqual('test_group', + res_dict['groups'][0]['name']) + self.assertEqual(self.group2.id, + res_dict['groups'][1]['id']) + self.assertEqual('test_group', + res_dict['groups'][1]['name']) + self.assertEqual(self.group1.id, + res_dict['groups'][2]['id']) + self.assertEqual('test_group', + res_dict['groups'][2]['name']) + + @ddt.data(False, True) + def test_list_groups_with_limit(self, is_detail): + url = '/v3/%s/groups?limit=1' % fake.PROJECT_ID + if is_detail: + url = '/v3/%s/groups/detail?limit=1' % fake.PROJECT_ID + req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(2, len(res_dict)) + self.assertEqual(1, len(res_dict['groups'])) + self.assertEqual(self.group3.id, + res_dict['groups'][0]['id']) + next_link = ( + 'http://localhost/v3/%s/groups?limit=' + '1&marker=%s' % + (fake.PROJECT_ID, res_dict['groups'][0]['id'])) + self.assertEqual(next_link, + res_dict['group_links'][0]['href']) + + @ddt.data(False, True) + def test_list_groups_with_offset(self, is_detail): + url = '/v3/%s/groups?offset=1' % fake.PROJECT_ID + if is_detail: + url = '/v3/%s/groups/detail?offset=1' % fake.PROJECT_ID + req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(2, len(res_dict['groups'])) + self.assertEqual(self.group2.id, + res_dict['groups'][0]['id']) + self.assertEqual(self.group1.id, + res_dict['groups'][1]['id']) + + @ddt.data(False, True) + def test_list_groups_with_offset_out_of_range(self, is_detail): + url = ('/v3/%s/groups?offset=234523423455454' % + fake.PROJECT_ID) + if is_detail: + url = ('/v3/%s/groups/detail?offset=234523423455454' % + fake.PROJECT_ID) + req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, + req) + + @ddt.data(False, True) + def test_list_groups_with_limit_and_offset(self, is_detail): + url = '/v3/%s/groups?limit=2&offset=1' % fake.PROJECT_ID + if is_detail: + url = ('/v3/%s/groups/detail?limit=2&offset=1' % + fake.PROJECT_ID) + req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) + res_dict = self.controller.index(req) + + self.assertEqual(2, len(res_dict)) + self.assertEqual(2, len(res_dict['groups'])) + self.assertEqual(self.group2.id, + res_dict['groups'][0]['id']) + self.assertEqual(self.group1.id, + res_dict['groups'][1]['id']) + + @ddt.data(False, True) + def test_list_groups_with_filter(self, is_detail): + # Create a group with user context + url = ('/v3/%s/groups?' + 'all_tenants=True&id=%s') % (fake.PROJECT_ID, + self.group3.id) + if is_detail: + url = ('/v3/%s/groups/detail?' + 'all_tenants=True&id=%s') % (fake.PROJECT_ID, + self.group3.id) + req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION, + use_admin_context=True) + res_dict = self.controller.index(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(1, len(res_dict['groups'])) + self.assertEqual(self.group3.id, + res_dict['groups'][0]['id']) + + @ddt.data(False, True) + def test_list_groups_with_sort(self, is_detail): + url = '/v3/%s/groups?sort=id:asc' % fake.PROJECT_ID + if is_detail: + url = ('/v3/%s/groups/detail?sort=id:asc' % + fake.PROJECT_ID) + req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) + expect_result = [self.group1.id, self.group2.id, + self.group3.id] + expect_result.sort() + res_dict = self.controller.index(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual(3, len(res_dict['groups'])) + self.assertEqual(expect_result[0], + res_dict['groups'][0]['id']) + self.assertEqual(expect_result[1], + res_dict['groups'][1]['id']) + self.assertEqual(expect_result[2], + res_dict['groups'][2]['id']) + + @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') + def test_list_groups_detail_json(self, mock_vol_type_get_all_by_group): + volume_type_ids = [fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID] + vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) + for i in volume_type_ids] + vol_types = objects.VolumeTypeList(context=self.ctxt, + objects=vol_type_objs) + mock_vol_type_get_all_by_group.return_value = vol_types + + self.group1.volume_type_ids = volume_type_ids + self.group1.save() + self.group2.volume_type_ids = volume_type_ids + self.group2.save() + self.group3.volume_type_ids = volume_type_ids + self.group3.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/detail' % + fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + res_dict = self.controller.detail(req) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('az1', + res_dict['groups'][0]['availability_zone']) + self.assertEqual('this is a test group', + res_dict['groups'][0]['description']) + self.assertEqual('test_group', + res_dict['groups'][0]['name']) + self.assertEqual(self.group3.id, + res_dict['groups'][0]['id']) + self.assertEqual('creating', + res_dict['groups'][0]['status']) + self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID], + res_dict['groups'][0]['volume_types']) + + self.assertEqual('az1', + res_dict['groups'][1]['availability_zone']) + self.assertEqual('this is a test group', + res_dict['groups'][1]['description']) + self.assertEqual('test_group', + res_dict['groups'][1]['name']) + self.assertEqual(self.group2.id, + res_dict['groups'][1]['id']) + self.assertEqual('creating', + res_dict['groups'][1]['status']) + self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID], + res_dict['groups'][1]['volume_types']) + + self.assertEqual('az1', + res_dict['groups'][2]['availability_zone']) + self.assertEqual('this is a test group', + res_dict['groups'][2]['description']) + self.assertEqual('test_group', + res_dict['groups'][2]['name']) + self.assertEqual(self.group1.id, + res_dict['groups'][2]['id']) + self.assertEqual('creating', + res_dict['groups'][2]['status']) + self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID], + res_dict['groups'][2]['volume_types']) + + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + def test_create_group_json(self, mock_validate): + # Create volume types and group type + vol_type = 'test' + vol_type_id = db.volume_type_create( + self.ctxt, + {'name': vol_type, 'extra_specs': {}}).get('id') + grp_type = 'grp_type' + grp_type_id = db.group_type_create( + self.ctxt, + {'name': grp_type, 'group_specs': {}}).get('id') + body = {"group": {"name": "group1", + "volume_types": [vol_type_id], + "group_type": grp_type_id, + "description": + "Group 1", }} + req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + res_dict = self.controller.create(req, body) + + self.assertEqual(1, len(res_dict)) + self.assertIn('id', res_dict['group']) + self.assertTrue(mock_validate.called) + + group_id = res_dict['group']['id'] + objects.Group.get_by_id(self.ctxt, group_id) + + def test_create_group_with_no_body(self): + # omit body from the request + req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, None) + + def test_delete_group_available(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": False}} + res_dict = self.controller.delete_group( + req, self.group1.id, body) + + group = objects.Group.get_by_id( + self.ctxt, self.group1.id) + self.assertEqual(202, res_dict.status_int) + self.assertEqual('deleting', group.status) + + def test_delete_group_available_no_delete_volumes(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": False}} + res_dict = self.controller.delete_group( + req, self.group1.id, body) + + group = objects.Group.get_by_id( + self.ctxt, self.group1.id) + self.assertEqual(202, res_dict.status_int) + self.assertEqual(fields.GroupStatus.DELETING, + group.status) + + def test_delete_group_with_group_NotFound(self): + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, + fake.WILL_NOT_BE_FOUND_ID), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": False}} + self.assertRaises(exception.GroupNotFound, + self.controller.delete_group, + req, fake.WILL_NOT_BE_FOUND_ID, body) + + def test_delete_group_with_invalid_group(self): + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, + self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": False}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_group, + req, self.group1.id, body) + + def test_delete_group_invalid_delete_volumes(self): + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, + self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": True}} + res_dict = self.controller.delete_group( + req, self.group1.id, body) + + group = objects.Group.get_by_id( + self.ctxt, self.group1.id) + self.assertEqual(202, res_dict.status_int) + self.assertEqual('deleting', group.status) + + def test_delete_group_no_host(self): + self.group1.host = None + self.group1.status = fields.GroupStatus.ERROR + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, + self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": True}} + res_dict = self.controller.delete_group( + req, self.group1.id, body) + + self.assertEqual(202, res_dict.status_int) + group = objects.Group.get_by_id( + context.get_admin_context(read_deleted='yes'), + self.group1.id) + self.assertEqual(fields.GroupStatus.DELETED, group.status) + self.assertIsNone(group.host) + + def test_create_delete_group_update_quota(self): + name = 'mygroup' + description = 'group 1' + grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'group_type'} + fake_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_type'} + self.mock_object(db, 'volume_types_get_by_name_or_id', + mock.Mock(return_value=[fake_type])) + self.mock_object(db, 'group_type_get', + mock.Mock(return_value=grp_type)) + self.mock_object(self.group_api, + '_cast_create_group', + mock.Mock()) + self.mock_object(self.group_api, 'update_quota', + mock.Mock()) + group = self.group_api.create(self.ctxt, name, description, + grp_type['id'], [fake_type['id']]) + self.group_api.update_quota.assert_called_once_with( + self.ctxt, group, 1) + + self.assertEqual(fields.GroupStatus.CREATING, group.status) + self.assertIsNone(group.host) + self.group_api.update_quota.reset_mock() + group.status = fields.GroupStatus.ERROR + self.group_api.delete(self.ctxt, group) + + self.group_api.update_quota.assert_called_once_with( + self.ctxt, group, -1, self.ctxt.project_id) + group = objects.Group.get_by_id( + context.get_admin_context(read_deleted='yes'), + group.id) + self.assertEqual(fields.GroupStatus.DELETED, group.status) + + def test_delete_group_with_invalid_body(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"invalid_request_element": {"delete-volumes": False}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_group, + req, self.group1.id, body) + + def test_delete_group_with_invalid_delete_volumes_value_in_body(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": "abcd"}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_group, + req, self.group1.id, body) + + def test_delete_group_with_empty_delete_volumes_value_in_body(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": ""}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_group, + req, self.group1.id, body) + + def test_delete_group_delete_volumes(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + vol = utils.create_volume(self.ctxt, group_id=self.group1.id) + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": True}} + res_dict = self.controller.delete_group( + req, self.group1.id, body) + + group = objects.Group.get_by_id( + self.ctxt, self.group1.id) + self.assertEqual(202, res_dict.status_int) + self.assertEqual('deleting', group.status) + + vol.destroy() + + def test_delete_group_delete_volumes_with_attached_volumes(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + vol = utils.create_volume(self.ctxt, group_id=self.group1.id, + attach_status='attached') + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": True}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_group, + req, self.group1.id, body) + + vol.destroy() + + def test_delete_group_delete_volumes_with_snapshots(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + vol = utils.create_volume(self.ctxt, group_id=self.group1.id) + utils.create_snapshot(self.ctxt, vol.id) + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": True}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_group, + req, self.group1.id, body) + + vol.destroy() + + def test_delete_group_delete_volumes_with_deleted_snapshots(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + vol = utils.create_volume(self.ctxt, group_id=self.group1.id) + utils.create_snapshot(self.ctxt, vol.id, status='deleted', + deleted=True) + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"delete": {"delete-volumes": True}} + res_dict = self.controller.delete_group( + req, self.group1.id, body) + + group = objects.Group.get_by_id( + self.ctxt, self.group1.id) + self.assertEqual(202, res_dict.status_int) + self.assertEqual('deleting', group.status) + + vol.destroy() + + def test_create_group_failed_no_group_type(self): + name = 'group1' + body = {"group": {"volume_types": [fake.VOLUME_TYPE_ID], + "name": name, + "description": + "Group 1", }} + req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, body) + + def test_create_group_failed_no_volume_types(self): + name = 'group1' + body = {"group": {"group_type": fake.GROUP_TYPE_ID, + "name": name, + "description": + "Group 1", }} + req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, + version=GROUP_MICRO_VERSION) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, body) + + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + def test_update_group_success(self, mock_validate): + volume_type_id = fake.VOLUME_TYPE_ID + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.host = 'test_host' + self.group1.volume_type_ids = [volume_type_id] + self.group1.save() + + remove_volume = utils.create_volume( + self.ctxt, + volume_type_id=volume_type_id, + group_id=self.group1.id) + remove_volume2 = utils.create_volume( + self.ctxt, + volume_type_id=volume_type_id, + group_id=self.group1.id, + status='error') + remove_volume3 = utils.create_volume( + self.ctxt, + volume_type_id=volume_type_id, + group_id=self.group1.id, + status='error_deleting') + + self.assertEqual(fields.GroupStatus.AVAILABLE, + self.group1.status) + + group_volumes = db.volume_get_all_by_generic_group( + self.ctxt.elevated(), + self.group1.id) + group_vol_ids = [group_vol['id'] for group_vol in group_volumes] + self.assertIn(remove_volume.id, group_vol_ids) + self.assertIn(remove_volume2.id, group_vol_ids) + self.assertIn(remove_volume3.id, group_vol_ids) + + add_volume = utils.create_volume( + self.ctxt, + volume_type_id=volume_type_id) + add_volume2 = utils.create_volume( + self.ctxt, + volume_type_id=volume_type_id) + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + name = 'newgroup' + description = 'New Group Description' + add_volumes = add_volume.id + "," + add_volume2.id + remove_volumes = ','.join( + [remove_volume.id, remove_volume2.id, remove_volume3.id]) + body = {"group": {"name": name, + "description": description, + "add_volumes": add_volumes, + "remove_volumes": remove_volumes, }} + res_dict = self.controller.update( + req, self.group1.id, body) + + group = objects.Group.get_by_id( + self.ctxt, self.group1.id) + self.assertEqual(202, res_dict.status_int) + self.assertTrue(mock_validate.called) + self.assertEqual(fields.GroupStatus.UPDATING, + group.status) + + remove_volume.destroy() + remove_volume2.destroy() + remove_volume3.destroy() + add_volume.destroy() + add_volume2.destroy() + + def test_update_group_add_volume_not_found(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"group": {"name": None, + "description": None, + "add_volumes": "fake-volume-uuid", + "remove_volumes": None, }} + + self.assertRaises(exception.InvalidVolume, + self.controller.update, + req, self.group1.id, body) + + def test_update_group_remove_volume_not_found(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"group": {"name": None, + "description": "new description", + "add_volumes": None, + "remove_volumes": "fake-volume-uuid", }} + + self.assertRaises(exception.InvalidVolume, + self.controller.update, + req, self.group1.id, body) + + def test_update_group_empty_parameters(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"group": {"name": None, + "description": None, + "add_volumes": None, + "remove_volumes": None, }} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, self.group1.id, body) + + def test_update_group_add_volume_invalid_state(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + add_volume = utils.create_volume( + self.ctxt, + volume_type_id=fake.VOLUME_TYPE_ID, + status='wrong_status') + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + add_volumes = add_volume.id + body = {"group": {"name": "group1", + "description": "", + "add_volumes": add_volumes, + "remove_volumes": None, }} + + self.assertRaises(exception.InvalidVolume, + self.controller.update, + req, self.group1.id, body) + + add_volume.destroy() + + def test_update_group_add_volume_invalid_volume_type(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + wrong_type = fake.VOLUME_TYPE2_ID + add_volume = utils.create_volume( + self.ctxt, + volume_type_id=wrong_type) + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + add_volumes = add_volume.id + body = {"group": {"name": "group1", + "description": "", + "add_volumes": add_volumes, + "remove_volumes": None, }} + + self.assertRaises(exception.InvalidVolume, + self.controller.update, + req, self.group1.id, body) + + add_volume.destroy() + + def test_update_group_add_volume_already_in_group(self): + self.group1.status = fields.GroupStatus.AVAILABLE + self.group1.save() + add_volume = utils.create_volume( + self.ctxt, + group_id=fake.GROUP2_ID) + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + add_volumes = add_volume.id + body = {"group": {"name": "group1", + "description": "", + "add_volumes": add_volumes, + "remove_volumes": None, }} + + self.assertRaises(exception.InvalidVolume, + self.controller.update, + req, self.group1.id, body) + + add_volume.destroy() + + def test_update_group_invalid_state(self): + req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % + (fake.PROJECT_ID, self.group1.id), + version=GROUP_MICRO_VERSION) + body = {"group": {"name": "new name", + "description": None, + "add_volumes": None, + "remove_volumes": None, }} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, self.group1.id, body) + + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + def test_create_group_from_src_snap(self, mock_validate): + self.mock_object(volume_api.API, "create", stubs.stub_volume_create) + + group = utils.create_group(self.ctxt, + group_type_id=fake.GROUP_TYPE_ID) + volume = utils.create_volume( + self.ctxt, + group_id=group.id) + group_snapshot = utils.create_group_snapshot( + self.ctxt, group_id=group.id) + snapshot = utils.create_snapshot( + self.ctxt, + volume.id, + group_snapshot_id=group_snapshot.id, + status=fields.SnapshotStatus.AVAILABLE) + + test_grp_name = 'test grp' + body = {"create-from-src": {"name": test_grp_name, + "description": "Group 1", + "group_snapshot_id": group_snapshot.id}} + req = fakes.HTTPRequest.blank('/v3/%s/groups/action' % + fake.PROJECT_ID, + version=GROUP_FROM_SRC_MICRO_VERSION) + res_dict = self.controller.create_from_src(req, body) + + self.assertIn('id', res_dict['group']) + self.assertEqual(test_grp_name, res_dict['group']['name']) + self.assertTrue(mock_validate.called) + + grp_ref = objects.Group.get_by_id( + self.ctxt.elevated(), res_dict['group']['id']) + + grp_ref.destroy() + snapshot.destroy() + volume.destroy() + group.destroy() + group_snapshot.destroy() + + def test_create_group_from_src_grp(self): + self.mock_object(volume_api.API, "create", stubs.stub_volume_create) + + source_grp = utils.create_group(self.ctxt, + group_type_id=fake.GROUP_TYPE_ID) + volume = utils.create_volume( + self.ctxt, + group_id=source_grp.id) + + test_grp_name = 'test cg' + body = {"create-from-src": {"name": test_grp_name, + "description": "Consistency Group 1", + "source_group_id": source_grp.id}} + req = fakes.HTTPRequest.blank('/v3/%s/groups/action' % + fake.PROJECT_ID, + version=GROUP_FROM_SRC_MICRO_VERSION) + res_dict = self.controller.create_from_src(req, body) + + self.assertIn('id', res_dict['group']) + self.assertEqual(test_grp_name, res_dict['group']['name']) + + grp = objects.Group.get_by_id( + self.ctxt, res_dict['group']['id']) + grp.destroy() + volume.destroy() + source_grp.destroy() diff --git a/cinder/tests/unit/api/v3/test_messages.py b/cinder/tests/unit/api/v3/test_messages.py index a722ae3c0..d3944d686 100644 --- a/cinder/tests/unit/api/v3/test_messages.py +++ b/cinder/tests/unit/api/v3/test_messages.py @@ -11,8 +11,6 @@ # under the License. import mock -from oslo_config import cfg -import webob from cinder.api import extensions from cinder.api.v3 import messages @@ -24,7 +22,6 @@ from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import stubs -CONF = cfg.CONF NS = '{http://docs.openstack.org/api/openstack-block-storage/3.0/content}' @@ -84,7 +81,7 @@ class MessageApiTest(test.TestCase): version=messages.MESSAGES_BASE_MICRO_VERSION) req.environ['cinder.context'] = self.ctxt - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + self.assertRaises(exception.MessageNotFound, self.controller.show, req, fakes.FAKE_UUID) def test_show_pre_microversion(self): @@ -120,7 +117,7 @@ class MessageApiTest(test.TestCase): '/v3/messages/%s' % fakes.FAKE_UUID, version=messages.MESSAGES_BASE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + self.assertRaises(exception.MessageNotFound, self.controller.delete, req, fakes.FAKE_UUID) def test_index(self): diff --git a/cinder/tests/unit/api/v3/test_snapshot_manage.py b/cinder/tests/unit/api/v3/test_snapshot_manage.py new file mode 100644 index 000000000..58d2ee0af --- /dev/null +++ b/cinder/tests/unit/api/v3/test_snapshot_manage.py @@ -0,0 +1,134 @@ +# Copyright (c) 2016 Stratoscale, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_serialization import jsonutils +try: + from urllib import urlencode +except ImportError: + from urllib.parse import urlencode +import webob + +from cinder.api.v3 import router as router_v3 +from cinder import context +from cinder import test +from cinder.tests.unit.api.contrib import test_snapshot_manage as test_contrib +from cinder.tests.unit.api import fakes +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_service + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = router_v3.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v3'] = api + return mapper + + +@mock.patch('cinder.volume.api.API.get', test_contrib.volume_get) +class SnapshotManageTest(test.TestCase): + """Test cases for cinder/api/v3/snapshot_manage.py""" + def setUp(self): + super(SnapshotManageTest, self).setUp() + self._admin_ctxt = context.RequestContext(fake.USER_ID, + fake.PROJECT_ID, + True) + + def _get_resp_post(self, body, version="3.8"): + """Helper to execute a POST manageable_snapshots API call.""" + req = webob.Request.blank('/v3/%s/manageable_snapshots' % + fake.PROJECT_ID) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['OpenStack-API-Version'] = 'volume ' + version + req.environ['cinder.context'] = self._admin_ctxt + req.body = jsonutils.dump_as_bytes(body) + res = req.get_response(app()) + return res + + @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') + @mock.patch('cinder.volume.api.API.create_snapshot_in_db') + @mock.patch('cinder.objects.service.Service.get_by_args') + def test_manage_snapshot_route(self, mock_service_get, + mock_create_snapshot, mock_rpcapi): + """Test call to manage snapshot. + + There is currently no change between the API in contrib and the API in + v3, so here we simply check that the call is routed properly, rather + than copying all the tests. + """ + mock_service_get.return_value = fake_service.fake_service_obj( + self._admin_ctxt, + binary='cinder-volume') + + body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(202, res.status_int, res) + + def test_manage_snapshot_previous_version(self): + body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} + res = self._get_resp_post(body, version="3.7") + self.assertEqual(404, res.status_int, res) + + def _get_resp_get(self, host, detailed, paging, version="3.8"): + """Helper to execute a GET os-snapshot-manage API call.""" + params = {'host': host} + if paging: + params.update({'marker': '1234', 'limit': 10, + 'offset': 4, 'sort': 'reference:asc'}) + query_string = "?%s" % urlencode(params) + detail = "" + if detailed: + detail = "/detail" + req = webob.Request.blank('/v3/%s/manageable_snapshots%s%s' % + (fake.PROJECT_ID, detail, query_string)) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + req.headers['OpenStack-API-Version'] = 'volume ' + version + req.environ['cinder.context'] = self._admin_ctxt + res = req.get_response(app()) + return res + + @mock.patch('cinder.volume.api.API.get_manageable_snapshots', + wraps=test_contrib.api_get_manageable_snapshots) + def test_get_manageable_snapshots_route(self, mock_api_manageable): + """Test call to get manageable volumes. + + There is currently no change between the API in contrib and the API in + v3, so here we simply check that the call is routed properly, rather + than copying all the tests. + """ + res = self._get_resp_get('fakehost', False, False) + self.assertEqual(200, res.status_int) + + def test_get_manageable_snapshots_previous_version(self): + res = self._get_resp_get('fakehost', False, False, version="3.7") + self.assertEqual(404, res.status_int) + + @mock.patch('cinder.volume.api.API.get_manageable_snapshots', + wraps=test_contrib.api_get_manageable_snapshots) + def test_get_manageable_snapshots_detail_route(self, mock_api_manageable): + """Test call to get manageable volumes (detailed). + + There is currently no change between the API in contrib and the API in + v3, so here we simply check that the call is routed properly, rather + than copying all the tests. + """ + res = self._get_resp_get('fakehost', True, True) + self.assertEqual(200, res.status_int) + + def test_get_manageable_snapshots_detail_previous_version(self): + res = self._get_resp_get('fakehost', True, True, version="3.7") + self.assertEqual(404, res.status_int) diff --git a/cinder/tests/unit/api/v3/test_snapshots.py b/cinder/tests/unit/api/v3/test_snapshots.py new file mode 100644 index 000000000..3e035ce76 --- /dev/null +++ b/cinder/tests/unit/api/v3/test_snapshots.py @@ -0,0 +1,79 @@ +# Copyright 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ddt + +import mock + +from cinder.api.openstack import api_version_request as api_version +from cinder.api.v3 import snapshots +from cinder import context +from cinder import exception +from cinder.objects import fields +from cinder import test +from cinder.tests.unit.api import fakes +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume + +UUID = '00000000-0000-0000-0000-000000000001' +INVALID_UUID = '00000000-0000-0000-0000-000000000002' + + +@ddt.ddt +class SnapshotApiTest(test.TestCase): + def setUp(self): + super(SnapshotApiTest, self).setUp() + self.controller = snapshots.SnapshotsController() + self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) + + @ddt.data('3.14', '3.13') + @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) + @mock.patch('cinder.objects.Volume.get_by_id') + @mock.patch('cinder.objects.Snapshot.get_by_id') + def test_snapshot_show(self, max_ver, snapshot_get_by_id, volume_get_by_id, + snapshot_metadata_get): + snapshot = { + 'id': UUID, + 'volume_id': fake.VOLUME_ID, + 'status': fields.SnapshotStatus.AVAILABLE, + 'volume_size': 100, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'expected_attrs': ['metadata'], + 'group_snapshot_id': None, + } + ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) + snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) + fake_volume_obj = fake_volume.fake_volume_obj(ctx) + snapshot_get_by_id.return_value = snapshot_obj + volume_get_by_id.return_value = fake_volume_obj + req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) + req.api_version_request = api_version.APIVersionRequest(max_ver) + resp_dict = self.controller.show(req, UUID) + + self.assertIn('snapshot', resp_dict) + self.assertEqual(UUID, resp_dict['snapshot']['id']) + self.assertIn('updated_at', resp_dict['snapshot']) + if max_ver == '3.14': + self.assertIn('group_snapshot_id', resp_dict['snapshot']) + elif max_ver == '3.13': + self.assertNotIn('group_snapshot_id', resp_dict['snapshot']) + + def test_snapshot_show_invalid_id(self): + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id) + self.assertRaises(exception.SnapshotNotFound, + self.controller.show, req, snapshot_id) diff --git a/cinder/tests/unit/api/v3/test_volume_manage.py b/cinder/tests/unit/api/v3/test_volume_manage.py new file mode 100644 index 000000000..8874b667b --- /dev/null +++ b/cinder/tests/unit/api/v3/test_volume_manage.py @@ -0,0 +1,136 @@ +# Copyright (c) 2016 Stratoscale, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_serialization import jsonutils +try: + from urllib import urlencode +except ImportError: + from urllib.parse import urlencode +import webob + +from cinder.api.v3 import router as router_v3 +from cinder import context +from cinder import test +from cinder.tests.unit.api.contrib import test_volume_manage as test_contrib +from cinder.tests.unit.api import fakes +from cinder.tests.unit import fake_constants as fake + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = router_v3.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v3'] = api + return mapper + + +@mock.patch('cinder.objects.service.Service.get_by_host_and_topic', + test_contrib.service_get) +@mock.patch('cinder.volume.volume_types.get_volume_type_by_name', + test_contrib.vt_get_volume_type_by_name) +@mock.patch('cinder.volume.volume_types.get_volume_type', + test_contrib.vt_get_volume_type) +class VolumeManageTest(test.TestCase): + """Test cases for cinder/api/v3/volume_manage.py""" + + def setUp(self): + super(VolumeManageTest, self).setUp() + self._admin_ctxt = context.RequestContext(fake.USER_ID, + fake.PROJECT_ID, + True) + + def _get_resp_post(self, body, version="3.8"): + """Helper to execute a POST manageable_volumes API call.""" + req = webob.Request.blank('/v3/%s/manageable_volumes' % + fake.PROJECT_ID) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['OpenStack-API-Version'] = 'volume ' + version + req.environ['cinder.context'] = self._admin_ctxt + req.body = jsonutils.dump_as_bytes(body) + res = req.get_response(app()) + return res + + @mock.patch('cinder.volume.api.API.manage_existing', + wraps=test_contrib.api_manage) + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + def test_manage_volume_route(self, mock_validate, mock_api_manage): + """Test call to manage volume. + + There is currently no change between the API in contrib and the API in + v3, so here we simply check that the call is routed properly, rather + than copying all the tests. + """ + body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(202, res.status_int, res) + + def test_manage_volume_previous_version(self): + body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} + res = self._get_resp_post(body) + self.assertEqual(404, res.status_int, res) + + def _get_resp_get(self, host, detailed, paging, version="3.8"): + """Helper to execute a GET os-volume-manage API call.""" + params = {'host': host} + if paging: + params.update({'marker': '1234', 'limit': 10, + 'offset': 4, 'sort': 'reference:asc'}) + query_string = "?%s" % urlencode(params) + detail = "" + if detailed: + detail = "/detail" + + req = webob.Request.blank('/v3/%s/manageable_volumes%s%s' % + (fake.PROJECT_ID, detail, query_string)) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + req.headers['OpenStack-API-Version'] = 'volume ' + version + req.environ['cinder.context'] = self._admin_ctxt + res = req.get_response(app()) + return res + + @mock.patch('cinder.volume.api.API.get_manageable_volumes', + wraps=test_contrib.api_get_manageable_volumes) + def test_get_manageable_volumes_route(self, mock_api_manageable): + """Test call to get manageable volumes. + + There is currently no change between the API in contrib and the API in + v3, so here we simply check that the call is routed properly, rather + than copying all the tests. + """ + res = self._get_resp_get('fakehost', False, True) + self.assertEqual(200, res.status_int) + + def test_get_manageable_volumes_previous_version(self): + res = self._get_resp_get('fakehost', False, True, version="3.7") + self.assertEqual(404, res.status_int) + + @mock.patch('cinder.volume.api.API.get_manageable_volumes', + wraps=test_contrib.api_get_manageable_volumes) + def test_get_manageable_volumes_detail_route(self, mock_api_manageable): + """Test call to get manageable volumes (detailed). + + There is currently no change between the API in contrib and the API in + v3, so here we simply check that the call is routed properly, rather + than copying all the tests. + """ + res = self._get_resp_get('fakehost', True, False) + self.assertEqual(200, res.status_int) + + def test_get_manageable_volumes_detail_previous_version(self): + res = self._get_resp_get('fakehost', True, False, version="3.7") + self.assertEqual(404, res.status_int) diff --git a/cinder/tests/unit/api/v3/test_volume_metadata.py b/cinder/tests/unit/api/v3/test_volume_metadata.py new file mode 100644 index 000000000..4c6603198 --- /dev/null +++ b/cinder/tests/unit/api/v3/test_volume_metadata.py @@ -0,0 +1,240 @@ +# Copyright 2016 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import mock +from oslo_config import cfg +from oslo_serialization import jsonutils +import six + +from cinder.api import extensions +from cinder.api.v3 import volume_metadata +from cinder.api.v3 import volumes +from cinder import db +from cinder import exception +from cinder import test +from cinder.tests.unit.api import fakes +from cinder.tests.unit.api.v2 import stubs +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_volume +from cinder import volume +from cinder.volume import api as volume_api + + +CONF = cfg.CONF + + +def return_create_volume_metadata_max(context, volume_id, metadata, delete): + return stub_max_volume_metadata() + + +def return_create_volume_metadata(context, volume_id, metadata, + delete, meta_type): + return stub_volume_metadata() + + +def return_new_volume_metadata(context, volume_id, metadata, + delete, meta_type): + return stub_new_volume_metadata() + + +def return_create_volume_metadata_insensitive(context, snapshot_id, + metadata, delete, + meta_type): + return stub_volume_metadata_insensitive() + + +def return_volume_metadata(context, volume_id): + return stub_volume_metadata() + + +def return_empty_volume_metadata(context, volume_id): + return {} + + +def return_empty_container_metadata(context, volume_id, metadata, + delete, meta_type): + return {} + + +def stub_volume_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_new_volume_metadata(): + metadata = { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + } + return metadata + + +def stub_volume_metadata_insensitive(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4", + } + return metadata + + +def stub_max_volume_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def get_volume(*args, **kwargs): + vol = {'name': 'fake', + 'metadata': {}} + return fake_volume.fake_volume_obj(args[0], **vol) + + +def return_volume_nonexistent(*args, **kwargs): + raise exception.VolumeNotFound('bogus test message') + + +def fake_update_volume_metadata(self, context, volume, diff): + pass + + +class volumeMetaDataTest(test.TestCase): + + def setUp(self): + super(volumeMetaDataTest, self).setUp() + self.volume_api = volume_api.API() + self.mock_object(volume.api.API, 'get', get_volume) + self.mock_object(db, 'volume_metadata_get', + return_volume_metadata) + self.patch( + 'cinder.db.service_get_all', autospec=True, + return_value=stubs.stub_service_get_all_by_topic(None, None)) + + self.mock_object(self.volume_api, 'update_volume_metadata', + fake_update_volume_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.volume_controller = volumes.VolumeController(self.ext_mgr) + self.controller = volume_metadata.Controller() + self.req_id = str(uuid.uuid4()) + self.url = '/v2/%s/volumes/%s/metadata' % ( + fake.PROJECT_ID, self.req_id) + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "metadata": {}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.volume_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url, version="3.15") + data = self.controller.index(req, self.req_id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + expected = jsonutils.dumps(expected) + if six.PY3: + expected = expected.encode('utf-8') + self.assertEqual(expected, data.body) + + def test_index_nonexistent_volume(self): + self.mock_object(db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url, version="3.15") + self.assertRaises(exception.VolumeNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.mock_object(db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url, version="3.15") + data = self.controller.index(req, self.req_id) + expected = {'metadata': {}} + expected = jsonutils.dumps(expected) + if six.PY3: + expected = expected.encode('utf-8') + self.assertEqual(expected, data.body) + + def test_validate_etag_true(self): + self.mock_object(db, 'volume_metadata_get', + mock.Mock(return_value={'key1': 'vanue1', + 'key2': 'value2'})) + req = fakes.HTTPRequest.blank(self.url, version="3.15") + req.environ['cinder.context'] = mock.Mock() + req.if_match.etags = ['d5103bf7b26ff0310200d110da3ed186'] + self.assertTrue(self.controller._validate_etag(req, self.req_id)) + + @mock.patch.object(db, 'volume_metadata_update') + def test_update_all(self, metadata_update): + fake_volume = {'id': self.req_id, 'status': 'available'} + fake_context = mock.Mock() + metadata_update.side_effect = return_new_volume_metadata + req = fakes.HTTPRequest.blank(self.url, version="3.15") + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dump_as_bytes(expected) + req.environ['cinder.context'] = fake_context + + with mock.patch.object(self.controller.volume_api, + 'get') as get_volume: + get_volume.return_value = fake_volume + res_dict = self.controller.update_all(req, self.req_id, expected) + self.assertEqual(expected, res_dict) + get_volume.assert_called_once_with(fake_context, self.req_id) + + @mock.patch.object(db, 'volume_metadata_update') + def test_update_item(self, metadata_update): + fake_volume = {'id': self.req_id, 'status': 'available'} + fake_context = mock.Mock() + metadata_update.side_effect = return_create_volume_metadata + req = fakes.HTTPRequest.blank(self.url + '/key1', version="3.15") + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dump_as_bytes(body) + req.headers["content-type"] = "application/json" + req.environ['cinder.context'] = fake_context + + with mock.patch.object(self.controller.volume_api, + 'get') as get_volume: + get_volume.return_value = fake_volume + res_dict = self.controller.update(req, self.req_id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + get_volume.assert_called_once_with(fake_context, self.req_id) diff --git a/cinder/tests/unit/api/v3/test_volumes.py b/cinder/tests/unit/api/v3/test_volumes.py index a0a069296..483651ce6 100644 --- a/cinder/tests/unit/api/v3/test_volumes.py +++ b/cinder/tests/unit/api/v3/test_volumes.py @@ -11,25 +11,33 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime +import ddt +import iso8601 import mock -from oslo_config import cfg from cinder.api import extensions from cinder.api.openstack import api_version_request as api_version from cinder.api.v3 import volumes from cinder import context from cinder import db +from cinder import exception +from cinder.group import api as group_api from cinder import test from cinder.tests.unit.api import fakes +from cinder.tests.unit.api.v2 import stubs +from cinder.tests.unit.api.v2 import test_volumes as v2_test_volumes from cinder.tests.unit import fake_constants as fake -from cinder.volume.api import API as vol_get +from cinder.volume import api as volume_api +from cinder.volume import api as vol_get version_header_name = 'OpenStack-API-Version' -CONF = cfg.CONF +DEFAULT_AZ = "zone1:host1" +@ddt.ddt class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() @@ -41,7 +49,7 @@ class VolumeApiTest(test.TestCase): self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) def test_check_volume_filters_called(self): - with mock.patch.object(vol_get, + with mock.patch.object(vol_get.API, 'check_volume_filters') as volume_get: req = fakes.HTTPRequest.blank('/v3/volumes?bootable=True') req.method = 'GET' @@ -57,7 +65,7 @@ class VolumeApiTest(test.TestCase): def test_check_volume_filters_strict_called(self): - with mock.patch.object(vol_get, + with mock.patch.object(vol_get.API, 'check_volume_filters') as volume_get: req = fakes.HTTPRequest.blank('/v3/volumes?bootable=True') req.method = 'GET' @@ -87,6 +95,19 @@ class VolumeApiTest(test.TestCase): 'qcow2') return [vol1, vol2] + def _create_volume_with_consistency_group(self): + vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', + 'project_id': + self.ctxt.project_id, + 'consistencygroup_id': + fake.CONSISTENCY_GROUP_ID}) + vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', + 'project_id': + self.ctxt.project_id, + 'consistencygroup_id': + fake.CONSISTENCY_GROUP2_ID}) + return [vol1, vol2] + def test_volume_index_filter_by_glance_metadata(self): vols = self._create_volume_with_glance_metadata() req = fakes.HTTPRequest.blank("/v3/volumes?glance_metadata=" @@ -109,3 +130,231 @@ class VolumeApiTest(test.TestCase): res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) + + def test_volume_index_filter_by_group_id(self): + vols = self._create_volume_with_consistency_group() + req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") % + fake.CONSISTENCY_GROUP_ID) + req.headers["OpenStack-API-Version"] = "volume 3.10" + req.api_version_request = api_version.APIVersionRequest('3.10') + req.environ['cinder.context'] = self.ctxt + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(1, len(volumes)) + self.assertEqual(vols[0].id, volumes[0]['id']) + + def test_volume_index_filter_by_group_id_in_unsupport_version(self): + self._create_volume_with_consistency_group() + req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") % + fake.CONSISTENCY_GROUP2_ID) + req.headers["OpenStack-API-Version"] = "volume 3.9" + req.api_version_request = api_version.APIVersionRequest('3.9') + req.environ['cinder.context'] = self.ctxt + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(2, len(volumes)) + + def _fake_volumes_summary_request(self, version='3.12'): + req = fakes.HTTPRequest.blank('/v3/volumes/summary') + req.headers = {'OpenStack-API-Version': 'volume ' + version} + req.api_version_request = api_version.APIVersionRequest(version) + return req + + def test_volumes_summary_in_unsupport_version(self): + """Function call to test summary volumes API in unsupported version""" + req = self._fake_volumes_summary_request(version='3.7') + self.assertRaises(exception.VersionNotFoundForAPIMethod, + self.controller.summary, req) + + def test_volumes_summary_in_supported_version(self): + """Function call to test the summary volumes API for version v3.""" + req = self._fake_volumes_summary_request() + res_dict = self.controller.summary(req) + expected = {'volume-summary': {'total_size': 0.0, 'total_count': 0}} + self.assertEqual(expected, res_dict) + + vol = v2_test_volumes.VolumeApiTest._vol_in_request_body( + availability_zone="nova") + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v3/volumes') + res_dict = self.controller.create(req, body) + + req = self._fake_volumes_summary_request() + res_dict = self.controller.summary(req) + expected = {'volume-summary': {'total_size': 1.0, 'total_count': 1}} + self.assertEqual(expected, res_dict) + + def _vol_in_request_body(self, + size=stubs.DEFAULT_VOL_SIZE, + name=stubs.DEFAULT_VOL_NAME, + description=stubs.DEFAULT_VOL_DESCRIPTION, + availability_zone=DEFAULT_AZ, + snapshot_id=None, + source_volid=None, + source_replica=None, + consistencygroup_id=None, + volume_type=None, + image_ref=None, + image_id=None, + group_id=None): + vol = {"size": size, + "name": name, + "description": description, + "availability_zone": availability_zone, + "snapshot_id": snapshot_id, + "source_volid": source_volid, + "source_replica": source_replica, + "consistencygroup_id": consistencygroup_id, + "volume_type": volume_type, + "group_id": group_id, + } + + if image_id is not None: + vol['image_id'] = image_id + elif image_ref is not None: + vol['imageRef'] = image_ref + + return vol + + def _expected_vol_from_controller( + self, + size=stubs.DEFAULT_VOL_SIZE, + availability_zone=DEFAULT_AZ, + description=stubs.DEFAULT_VOL_DESCRIPTION, + name=stubs.DEFAULT_VOL_NAME, + consistencygroup_id=None, + source_volid=None, + snapshot_id=None, + metadata=None, + attachments=None, + volume_type=stubs.DEFAULT_VOL_TYPE, + status=stubs.DEFAULT_VOL_STATUS, + with_migration_status=False, + group_id=None, + req_version=None): + metadata = metadata or {} + attachments = attachments or [] + volume = {'volume': + {'attachments': attachments, + 'availability_zone': availability_zone, + 'bootable': 'false', + 'consistencygroup_id': consistencygroup_id, + 'group_id': group_id, + 'created_at': datetime.datetime( + 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), + 'updated_at': datetime.datetime( + 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), + 'description': description, + 'id': stubs.DEFAULT_VOL_ID, + 'links': + [{'href': 'http://localhost/v3/%s/volumes/%s' % ( + fake.PROJECT_ID, fake.VOLUME_ID), + 'rel': 'self'}, + {'href': 'http://localhost/%s/volumes/%s' % ( + fake.PROJECT_ID, fake.VOLUME_ID), + 'rel': 'bookmark'}], + 'metadata': metadata, + 'name': name, + 'replication_status': 'disabled', + 'multiattach': False, + 'size': size, + 'snapshot_id': snapshot_id, + 'source_volid': source_volid, + 'status': status, + 'user_id': fake.USER_ID, + 'volume_type': volume_type, + 'encrypted': False}} + + if with_migration_status: + volume['volume']['migration_status'] = None + + # Remove group_id if max version is less than 3.13. + if req_version and req_version.matches(None, "3.12"): + volume['volume'].pop('group_id') + + return volume + + def _expected_volume_api_create_kwargs(self, snapshot=None, + availability_zone=DEFAULT_AZ, + source_volume=None, + test_group=None, + req_version=None): + volume = { + 'metadata': None, + 'snapshot': snapshot, + 'source_volume': source_volume, + 'source_replica': None, + 'consistencygroup': None, + 'availability_zone': availability_zone, + 'scheduler_hints': None, + 'multiattach': False, + 'group': test_group, + } + + # Remove group_id if max version is less than 3.13. + if req_version and req_version.matches(None, "3.12"): + volume.pop('group') + + return volume + + @ddt.data('3.13', '3.12') + @mock.patch( + 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') + def test_volume_create(self, max_ver, mock_validate): + self.mock_object(volume_api.API, 'get', stubs.stub_volume_get) + self.mock_object(volume_api.API, "create", + stubs.stub_volume_api_create) + self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', + stubs.stub_volume_type_get) + + vol = self._vol_in_request_body() + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v3/volumes') + req.api_version_request = api_version.APIVersionRequest(max_ver) + res_dict = self.controller.create(req, body) + ex = self._expected_vol_from_controller( + req_version=req.api_version_request) + self.assertEqual(ex, res_dict) + self.assertTrue(mock_validate.called) + + @ddt.data('3.14', '3.13') + @mock.patch.object(group_api.API, 'get') + @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', + autospec=True) + @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) + @mock.patch.object(volume_api.API, 'create', autospec=True) + def test_volume_creation_from_snapshot(self, max_ver, create, get_snapshot, + volume_type_get, group_get): + create.side_effect = stubs.stub_volume_api_create + get_snapshot.side_effect = stubs.stub_snapshot_get + volume_type_get.side_effect = stubs.stub_volume_type_get + fake_group = { + 'id': fake.GROUP_ID, + 'group_type_id': fake.GROUP_TYPE_ID, + 'name': 'fake_group' + } + group_get.return_value = fake_group + + snapshot_id = fake.SNAPSHOT_ID + vol = self._vol_in_request_body(snapshot_id=snapshot_id, + group_id=fake.GROUP_ID) + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v3/volumes') + req.api_version_request = api_version.APIVersionRequest(max_ver) + res_dict = self.controller.create(req, body) + ex = self._expected_vol_from_controller( + snapshot_id=snapshot_id, + req_version=req.api_version_request) + self.assertEqual(ex, res_dict) + + context = req.environ['cinder.context'] + get_snapshot.assert_called_once_with(self.controller.volume_api, + context, snapshot_id) + + kwargs = self._expected_volume_api_create_kwargs( + stubs.stub_snapshot(snapshot_id), + test_group=fake_group, + req_version=req.api_version_request) + create.assert_called_once_with(self.controller.volume_api, context, + vol['size'], stubs.DEFAULT_VOL_NAME, + stubs.DEFAULT_VOL_DESCRIPTION, **kwargs) diff --git a/cinder/tests/unit/backup/drivers/test_backup_ceph.py b/cinder/tests/unit/backup/drivers/test_backup_ceph.py index de046968e..d66d3e3ee 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_ceph.py +++ b/cinder/tests/unit/backup/drivers/test_backup_ceph.py @@ -644,7 +644,7 @@ class BackupCephTestCase(test.TestCase): @common_mocks def test_discard_bytes(self): - # Lower the chunksize to a memory managable number + # Lower the chunksize to a memory manageable number self.service.chunk_size = 1024 image = self.mock_rbd.Image.return_value wrapped_rbd = self._get_wrapped_rbd_io(image) @@ -763,7 +763,7 @@ class BackupCephTestCase(test.TestCase): self.assertTrue(rbd.list.called) self.assertTrue(rbd.remove.called) - self.assertTrue(MockImageBusyException in RAISED_EXCEPTIONS) + self.assertIn(MockImageBusyException, RAISED_EXCEPTIONS) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) diff --git a/cinder/tests/unit/backup/drivers/test_backup_driver_base.py b/cinder/tests/unit/backup/drivers/test_backup_driver_base.py index e3dfe7e1b..255830d9a 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_driver_base.py +++ b/cinder/tests/unit/backup/drivers/test_backup_driver_base.py @@ -23,6 +23,7 @@ from cinder.backup import driver from cinder import context from cinder import db from cinder import exception +from cinder import keymgr as key_manager from cinder import objects from cinder import test from cinder.tests.unit.backup import fake_service @@ -286,8 +287,9 @@ class BackupMetadataAPITestCase(test.TestCase): def _create_encrypted_volume_db_entry(self, id, type_id, encrypted): if encrypted: + key_id = key_manager.API().key_id vol = {'id': id, 'size': 1, 'status': 'available', - 'volume_type_id': type_id, 'encryption_key_id': 'fake_id'} + 'volume_type_id': type_id, 'encryption_key_id': key_id} else: vol = {'id': id, 'size': 1, 'status': 'available', 'volume_type_id': type_id, 'encryption_key_id': None} diff --git a/cinder/tests/unit/backup/drivers/test_backup_google.py b/cinder/tests/unit/backup/drivers/test_backup_google.py index 954dfde4d..c8dde288f 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_google.py +++ b/cinder/tests/unit/backup/drivers/test_backup_google.py @@ -204,6 +204,20 @@ class GoogleBackupDriverTestCase(test.TestCase): service.backup(backup, self.volume_file) self.assertEqual('gcscinderbucket', backup.container) + @gcs_client + @mock.patch('httplib2.proxy_info_from_url') + def test_backup_proxy_configured(self, mock_proxy_info): + google_dr.CONF.set_override("backup_gcs_proxy_url", + "http://myproxy.example.com") + google_dr.GoogleBackupDriver(self.ctxt) + mock_proxy_info.assert_called_with("http://myproxy.example.com") + + @gcs_client + @mock.patch('httplib2.proxy_info_from_environment') + def test_backup_proxy_environment(self, mock_proxy_env): + google_dr.GoogleBackupDriver(self.ctxt) + mock_proxy_env.assert_called_once_with() + @gcs_client @mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.' '_send_progress_end') diff --git a/cinder/tests/unit/backup/drivers/test_backup_nfs.py b/cinder/tests/unit/backup/drivers/test_backup_nfs.py index 3b7696720..f868952b7 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_nfs.py +++ b/cinder/tests/unit/backup/drivers/test_backup_nfs.py @@ -141,7 +141,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): super(BackupNFSSwiftBasedTestCase, self).setUp() self.ctxt = context.get_admin_context() - self.stubs.Set(hashlib, 'md5', fake_md5) + self.mock_object(hashlib, 'md5', fake_md5) self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) @@ -265,10 +265,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(nfs.NFSBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) @@ -296,10 +295,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(nfs.NFSBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) @@ -342,10 +340,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(nfs.NFSBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) self.flags(backup_file_size=(8 * 1024)) self.flags(backup_sha_block_size_bytes=1024) @@ -396,10 +393,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(nfs.NFSBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) self.flags(backup_file_size=(8 * 1024)) self.flags(backup_sha_block_size_bytes=1024) @@ -458,8 +454,8 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata', - fake_backup_metadata) + self.mock_object(nfs.NFSBackupDriver, '_backup_metadata', + fake_backup_metadata) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, @@ -485,14 +481,14 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata', - fake_backup_metadata) + self.mock_object(nfs.NFSBackupDriver, '_backup_metadata', + fake_backup_metadata) def fake_delete(self, backup): raise exception.BackupOperationError() # Raise a pseudo exception.BackupOperationError. - self.stubs.Set(nfs.NFSBackupDriver, 'delete', fake_delete) + self.mock_object(nfs.NFSBackupDriver, 'delete', fake_delete) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, @@ -563,10 +559,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(nfs.NFSBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) self.flags(backup_file_size =(1024 * 8)) self.flags(backup_sha_block_size_bytes=1024) diff --git a/cinder/tests/unit/backup/drivers/test_backup_swift.py b/cinder/tests/unit/backup/drivers/test_backup_swift.py index 8d3f34534..a7f46f744 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_swift.py +++ b/cinder/tests/unit/backup/drivers/test_backup_swift.py @@ -101,9 +101,9 @@ class BackupSwiftTestCase(test.TestCase): self.ctxt = context.get_admin_context() self.ctxt.service_catalog = service_catalog - self.stubs.Set(swift, 'Connection', - fake_swift_client.FakeSwiftClient.Connection) - self.stubs.Set(hashlib, 'md5', fake_md5) + self.mock_object(swift, 'Connection', + fake_swift_client.FakeSwiftClient.Connection) + self.mock_object(hashlib, 'md5', fake_md5) self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() @@ -178,13 +178,12 @@ class BackupSwiftTestCase(test.TestCase): u'endpoints': [{ u'adminURL': u'http://example.com'}]}] + self.ctxt.project_id = fake.PROJECT_ID self.override_config("backup_swift_auth_url", - "http://public.example.com/") + "http://public.example.com") backup = swift_dr.SwiftBackupDriver(self.ctxt) - self.assertEqual("%s%s" % (CONF.backup_swift_auth_url, - self.ctxt.project_id), - backup.auth_url) + self.assertEqual(CONF.backup_swift_auth_url, backup.auth_url) def test_backup_swift_info(self): self.override_config("swift_catalog_info", "dummy") @@ -409,17 +408,16 @@ class BackupSwiftTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(swift_dr.SwiftBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) @@ -442,18 +440,17 @@ class BackupSwiftTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(swift_dr.SwiftBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) @@ -466,8 +463,8 @@ class BackupSwiftTestCase(test.TestCase): container=container_name, backup_id=fake.BACKUP2_ID, parent_id= fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) @@ -492,10 +489,9 @@ class BackupSwiftTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(swift_dr.SwiftBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) @@ -505,8 +501,8 @@ class BackupSwiftTestCase(test.TestCase): self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) @@ -524,8 +520,8 @@ class BackupSwiftTestCase(test.TestCase): container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) @@ -550,10 +546,9 @@ class BackupSwiftTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(swift_dr.SwiftBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) @@ -563,8 +558,8 @@ class BackupSwiftTestCase(test.TestCase): self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) @@ -582,8 +577,8 @@ class BackupSwiftTestCase(test.TestCase): container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) @@ -628,8 +623,8 @@ class BackupSwiftTestCase(test.TestCase): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata', - fake_backup_metadata) + self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata', + fake_backup_metadata) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, @@ -655,14 +650,14 @@ class BackupSwiftTestCase(test.TestCase): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata', - fake_backup_metadata) + self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata', + fake_backup_metadata) def fake_delete(self, backup): raise exception.BackupOperationError() # Raise a pseudo exception.BackupOperationError. - self.stubs.Set(swift_dr.SwiftBackupDriver, 'delete', fake_delete) + self.mock_object(swift_dr.SwiftBackupDriver, 'delete', fake_delete) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, @@ -688,10 +683,9 @@ class BackupSwiftTestCase(test.TestCase): prefix = volume + '_' + backup_name return prefix - # Raise a pseudo exception.BackupDriverException. - self.stubs.Set(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) + self.mock_object(swift_dr.SwiftBackupDriver, + '_generate_object_name_prefix', + _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) @@ -701,8 +695,8 @@ class BackupSwiftTestCase(test.TestCase): self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) - self.stubs.Set(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) + self.mock_object(swift, 'Connection', + fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) @@ -784,9 +778,9 @@ class BackupSwiftTestCase(test.TestCase): def _fake_delete_object(self, container, object_name): raise AssertionError('delete_object method should not be called.') - self.stubs.Set(swift_dr.SwiftBackupDriver, - 'delete_object', - _fake_delete_object) + self.mock_object(swift_dr.SwiftBackupDriver, + 'delete_object', + _fake_delete_object) self._create_backup_db_entry(volume_id=volume_id) service = swift_dr.SwiftBackupDriver(self.ctxt) diff --git a/cinder/tests/unit/backup/test_backup.py b/cinder/tests/unit/backup/test_backup.py index 7167803f5..61ac64dc9 100644 --- a/cinder/tests/unit/backup/test_backup.py +++ b/cinder/tests/unit/backup/test_backup.py @@ -20,7 +20,7 @@ import tempfile import uuid import mock -import os_brick +from os_brick.initiator import connector from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils @@ -35,8 +35,8 @@ from cinder import exception from cinder import objects from cinder.objects import fields from cinder import test +from cinder.tests import fake_driver from cinder.tests.unit.backup import fake_service_with_verify as fake_service -from cinder.tests.unit import fake_driver from cinder.tests.unit import utils from cinder.volume import driver @@ -113,14 +113,15 @@ class BaseBackupTest(test.TestCase): display_description='this is a test volume', status='backing-up', previous_status='available', - size=1): + size=1, + host='testhost'): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size - vol['host'] = 'testhost' + vol['host'] = host vol['user_id'] = str(uuid.uuid4()) vol['project_id'] = str(uuid.uuid4()) vol['status'] = status @@ -205,11 +206,11 @@ class BaseBackupTest(test.TestCase): class BackupTestCase(BaseBackupTest): """Test Case for backups.""" - @mock.patch.object(cinder.tests.unit.fake_driver.FakeISCSIDriver, + @mock.patch.object(cinder.tests.fake_driver.FakeISCSIDriver, 'set_initialized') - @mock.patch.object(cinder.tests.unit.fake_driver.FakeISCSIDriver, + @mock.patch.object(cinder.tests.fake_driver.FakeISCSIDriver, 'do_setup') - @mock.patch.object(cinder.tests.unit.fake_driver.FakeISCSIDriver, + @mock.patch.object(cinder.tests.fake_driver.FakeISCSIDriver, 'check_for_setup_error') @mock.patch('cinder.context.get_admin_context') def test_init_host(self, mock_get_admin_context, mock_check, mock_setup, @@ -639,10 +640,18 @@ class BackupTestCase(BaseBackupTest): 'secure_enabled': False, 'is_snapshot': True, } + # TODO(walter-boring) This is to account for the missing FakeConnector + # in os-brick 1.6.0 and > + if hasattr(connector, 'FakeConnector'): + conn = connector.FakeConnector(None) + else: + from os_brick.initiator.connectors import fake + conn = fake.FakeConnector(None) + attach_info = { 'device': {'path': '/dev/null'}, 'conn': {'data': {}}, - 'connector': os_brick.initiator.connector.FakeConnector(None)} + 'connector': conn} mock_detach_snapshot = self.mock_object(driver.BaseVD, '_detach_snapshot') mock_attach_snapshot = self.mock_object(driver.BaseVD, @@ -1277,8 +1286,23 @@ class BackupTestCaseWithVerify(BaseBackupTest): with mock.patch.object(manager.BackupManager, '_map_service_to_driver') as \ mock_map_service_to_driver: + # It should works when the service name is a string + mock_map_service_to_driver.return_value = 'swift' + self.backup_mgr.reset_status(self.ctxt, + backup, + fields.BackupStatus.AVAILABLE) + mock_clean_temp.assert_called_once_with(self.ctxt, backup) + new_backup = db.backup_get(self.ctxt, backup.id) + self.assertEqual(fields.BackupStatus.AVAILABLE, + new_backup['status']) + mock_map_service_to_driver.return_value = \ fake_service.get_backup_driver(self.ctxt) + self.backup_mgr.reset_status(self.ctxt, + backup, + fields.BackupStatus.ERROR) + mock_clean_temp.reset_mock() + self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) @@ -1456,6 +1480,20 @@ class BackupAPITestCase(BaseBackupTest): volume_id=volume_id, container='volumebackups') + @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') + @mock.patch('cinder.backup.api.API._is_backup_service_enabled') + def test_create_backup_in_same_host(self, mock_is_enable, + mock_create): + self.override_config('backup_use_same_host', True) + mock_is_enable.return_value = True + self.ctxt.user_id = 'fake_user' + self.ctxt.project_id = 'fake_project' + volume_id = self._create_volume_db_entry(status='available', + host='testhost#lvm', + size=1) + backup = self.api.create(self.ctxt, None, None, volume_id, None) + self.assertEqual('testhost', backup.host) + @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_volume(self, diff --git a/cinder/tests/unit/brick/test_brick_lvm.py b/cinder/tests/unit/brick/test_brick_lvm.py index a8dc3f9f8..864e1c551 100644 --- a/cinder/tests/unit/brick/test_brick_lvm.py +++ b/cinder/tests/unit/brick/test_brick_lvm.py @@ -47,7 +47,7 @@ class BrickLvmTestCase(test.TestCase): def fake_customised_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "") - def fake_execute(obj, *cmd, **kwargs): + def fake_execute(obj, *cmd, **kwargs): # noqa cmd_string = ', '.join(cmd) data = "\n" @@ -115,8 +115,18 @@ class BrickLvmTestCase(test.TestCase): cmd_string): if 'test-volumes' in cmd_string: data = ' wi-a-' + elif 'snapshot' in cmd_string: + data = ' swi-a-s--' + elif 'open' in cmd_string: + data = ' -wi-ao---' else: data = ' owi-a-' + elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Origin' in + cmd_string): + if 'snapshot' in cmd_string: + data = ' fake-volume-1' + else: + data = ' ' elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string: data = " fake-vg|/dev/sda|10.00|1.00\n" data += " fake-vg|/dev/sdb|10.00|1.00\n" @@ -322,6 +332,19 @@ class BrickLvmTestCase(test.TestCase): self.assertTrue(self.vg.lv_has_snapshot('fake-vg')) self.assertFalse(self.vg.lv_has_snapshot('test-volumes')) + def test_lv_is_snapshot(self): + self.assertTrue(self.vg.lv_is_snapshot('fake-snapshot')) + self.assertFalse(self.vg.lv_is_snapshot('test-volumes')) + + def test_lv_is_open(self): + self.assertTrue(self.vg.lv_is_open('fake-open')) + self.assertFalse(self.vg.lv_is_open('fake-snapshot')) + + def test_lv_get_origin(self): + self.assertEqual('fake-volume-1', + self.vg.lv_get_origin('fake-snapshot')) + self.assertFalse(None, self.vg.lv_get_origin('test-volumes')) + def test_activate_lv(self): with mock.patch.object(self.vg, '_execute'): self.vg._supports_lvchange_ignoreskipactivation = True @@ -349,3 +372,20 @@ class BrickLvmTestCase(test.TestCase): self.vg.vg_name = "test-volumes" self.vg.extend_volume("test", "2G") self.assertFalse(self.vg.deactivate_lv.called) + + def test_lv_deactivate(self): + with mock.patch.object(self.vg, '_execute'): + is_active_mock = mock.Mock() + is_active_mock.return_value = False + self.vg._lv_is_active = is_active_mock + self.vg.create_volume('test', '1G') + self.vg.deactivate_lv('test') + + def test_lv_deactivate_timeout(self): + with mock.patch.object(self.vg, '_execute'): + is_active_mock = mock.Mock() + is_active_mock.return_value = True + self.vg._lv_is_active = is_active_mock + self.vg.create_volume('test', '1G') + self.assertRaises(exception.VolumeNotDeactivated, + self.vg.deactivate_lv, 'test') diff --git a/cinder/tests/unit/compute/test_nova.py b/cinder/tests/unit/compute/test_nova.py index 4d4a6dc47..2c69b6b04 100644 --- a/cinder/tests/unit/compute/test_nova.py +++ b/cinder/tests/unit/compute/test_nova.py @@ -162,7 +162,9 @@ class NovaApiTestCase(test.TestCase): self.api.update_server_volume(self.ctx, 'server_id', 'attach_id', 'new_volume_id') - mock_novaclient.assert_called_once_with(self.ctx) + mock_novaclient.assert_called_once_with(self.ctx, + admin_endpoint=True, + privileged_user=True) mock_update_server_volume.assert_called_once_with( 'server_id', 'attach_id', diff --git a/cinder/tests/unit/conf_fixture.py b/cinder/tests/unit/conf_fixture.py index 7f101c5d6..bf13e9f25 100644 --- a/cinder/tests/unit/conf_fixture.py +++ b/cinder/tests/unit/conf_fixture.py @@ -23,10 +23,9 @@ CONF = cfg.CONF CONF.import_opt('policy_file', 'cinder.policy', group='oslo_policy') CONF.import_opt('volume_driver', 'cinder.volume.manager') -CONF.import_opt('xiv_ds8k_proxy', - 'cinder.volume.drivers.ibm.xiv_ds8k') CONF.import_opt('backup_driver', 'cinder.backup.manager') -CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') +CONF.import_opt('api_class', 'cinder.keymgr', group='key_manager') +CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='key_manager') CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') def_vol_type = 'fake_vol_type' @@ -35,21 +34,23 @@ def_vol_type = 'fake_vol_type' def set_defaults(conf): conf.set_default('default_volume_type', def_vol_type) conf.set_default('volume_driver', - 'cinder.tests.unit.fake_driver.FakeISCSIDriver') + 'cinder.tests.fake_driver.FakeISCSIDriver') conf.set_default('iscsi_helper', 'fake') conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake') conf.set_default('connection', 'sqlite://', group='database') conf.set_default('sqlite_synchronous', False, group='database') conf.set_default('policy_file', 'cinder.tests.unit/policy.json', group='oslo_policy') - conf.set_default( - 'xiv_ds8k_proxy', - 'cinder.tests.unit.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver') conf.set_default('backup_driver', 'cinder.tests.unit.backup.fake_service') - conf.set_default('fixed_key', default='0' * 64, group='keymgr') + conf.set_default('api_class', + 'cinder.keymgr.conf_key_mgr.ConfKeyManager', + group='key_manager') + conf.set_default('fixed_key', default='0' * 64, group='key_manager') conf.set_default('scheduler_driver', 'cinder.scheduler.filter_scheduler.FilterScheduler') conf.set_default('state_path', os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..'))) conf.set_default('policy_dirs', [], group='oslo_policy') + # This is where we don't authenticate conf.set_default('auth_strategy', 'noauth') + conf.set_default('auth_uri', 'fake', 'keystone_authtoken') diff --git a/cinder/tests/unit/consistencygroup/test_cg.py b/cinder/tests/unit/consistencygroup/test_cg.py index 154ad30f7..4b006cbef 100644 --- a/cinder/tests/unit/consistencygroup/test_cg.py +++ b/cinder/tests/unit/consistencygroup/test_cg.py @@ -23,7 +23,7 @@ from cinder import quota from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot -from cinder.tests.unit.test_volume import BaseVolumeTestCase +from cinder.tests.unit import test_volume from cinder.tests.unit import utils as tests_utils import cinder.volume from cinder.volume import driver @@ -33,7 +33,7 @@ CGQUOTAS = quota.CGQUOTAS CONF = cfg.CONF -class ConsistencyGroupTestCase(BaseVolumeTestCase): +class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): def test_delete_volume_in_consistency_group(self): """Test deleting a volume that's tied to a consistency group fails.""" consistencygroup_id = fake.CONSISTENCY_GROUP_ID @@ -65,8 +65,8 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase): self.assertEqual('fakepool', pool) return {'status': 'available'} - self.stubs.Set(self.volume.driver, 'create_consistencygroup', - fake_driver_create_cg) + self.mock_object(self.volume.driver, 'create_consistencygroup', + fake_driver_create_cg) group = tests_utils.create_consistencygroup( self.context, @@ -460,6 +460,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase): return cgsnap, snaps + @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup', autospec=True, return_value={'status': 'available'}) @@ -474,7 +475,8 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase): return_value=({'status': 'deleted'}, [])) def test_create_delete_cgsnapshot(self, mock_del_cgsnap, mock_create_cgsnap, - mock_del_cg, _mock_create_cg): + mock_del_cg, _mock_create_cg, + mock_notify): """Test cgsnapshot can be created and deleted.""" group = tests_utils.create_consistencygroup( @@ -488,11 +490,9 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase): volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) - if len(self.notifier.notifications) > 2: - self.assertFalse(self.notifier.notifications[2], - self.notifier.notifications) - self.assertEqual(2, len(self.notifier.notifications), - self.notifier.notifications) + self.assert_notify_called(mock_notify, + (['INFO', 'volume.create.start'], + ['INFO', 'volume.create.end'])) cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume_id]) cgsnapshot = cgsnapshot_returns[0] @@ -502,51 +502,27 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase): context.get_admin_context(), cgsnapshot.id).id) - if len(self.notifier.notifications) > 6: - self.assertFalse(self.notifier.notifications[6], - self.notifier.notifications) - - msg = self.notifier.notifications[2] - self.assertEqual('cgsnapshot.create.start', msg['event_type']) - expected = { - 'created_at': 'DONTCARE', - 'name': None, - 'cgsnapshot_id': cgsnapshot.id, - 'status': 'creating', - 'tenant_id': fake.PROJECT_ID, - 'user_id': fake.USER_ID, - 'consistencygroup_id': group.id - } - self.assertDictMatch(expected, msg['payload']) - msg = self.notifier.notifications[3] - self.assertEqual('snapshot.create.start', msg['event_type']) - msg = self.notifier.notifications[4] - expected['status'] = 'available' - self.assertEqual('cgsnapshot.create.end', msg['event_type']) - self.assertDictMatch(expected, msg['payload']) - msg = self.notifier.notifications[5] - self.assertEqual('snapshot.create.end', msg['event_type']) - - self.assertEqual(6, len(self.notifier.notifications), - self.notifier.notifications) + self.assert_notify_called(mock_notify, + (['INFO', 'volume.create.start'], + ['INFO', 'volume.create.end'], + ['INFO', 'cgsnapshot.create.start'], + ['INFO', 'snapshot.create.start'], + ['INFO', 'cgsnapshot.create.end'], + ['INFO', 'snapshot.create.end'])) self.volume.delete_cgsnapshot(self.context, cgsnapshot) - if len(self.notifier.notifications) > 10: - self.assertFalse(self.notifier.notifications[10], - self.notifier.notifications) - - msg = self.notifier.notifications[6] - self.assertEqual('cgsnapshot.delete.start', msg['event_type']) - expected['status'] = 'available' - self.assertDictMatch(expected, msg['payload']) - msg = self.notifier.notifications[8] - self.assertEqual('cgsnapshot.delete.end', msg['event_type']) - expected['status'] = 'deleted' - self.assertDictMatch(expected, msg['payload']) - - self.assertEqual(10, len(self.notifier.notifications), - self.notifier.notifications) + self.assert_notify_called(mock_notify, + (['INFO', 'volume.create.start'], + ['INFO', 'volume.create.end'], + ['INFO', 'cgsnapshot.create.start'], + ['INFO', 'snapshot.create.start'], + ['INFO', 'cgsnapshot.create.end'], + ['INFO', 'snapshot.create.end'], + ['INFO', 'cgsnapshot.delete.start'], + ['INFO', 'snapshot.delete.start'], + ['INFO', 'cgsnapshot.delete.end'], + ['INFO', 'snapshot.delete.end'])) cgsnap = objects.CGSnapshot.get_by_id( context.get_admin_context(read_deleted='yes'), diff --git a/cinder/tests/unit/db/test_cluster.py b/cinder/tests/unit/db/test_cluster.py new file mode 100644 index 000000000..f10161c4a --- /dev/null +++ b/cinder/tests/unit/db/test_cluster.py @@ -0,0 +1,298 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for cluster table related operations.""" + +import datetime + +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from sqlalchemy.orm import exc + +from cinder import db +from cinder import exception +from cinder.tests.unit import test_db_api + + +CONF = cfg.CONF + + +class ClusterTestCase(test_db_api.BaseTest): + """Unit tests for cinder.db.api.cluster_*.""" + + def _default_cluster_values(self): + return { + 'name': 'cluster_name', + 'binary': 'cinder-volume', + 'disabled': False, + 'disabled_reason': None, + 'deleted': False, + 'updated_at': None, + 'deleted_at': None, + } + + def _create_cluster(self, **values): + create_values = self._default_cluster_values() + create_values.update(values) + cluster = db.cluster_create(self.ctxt, create_values) + return db.cluster_get(self.ctxt, cluster.id, services_summary=True) + + def _create_populated_cluster(self, num_services, num_down_svcs=0, + **values): + """Helper method that creates a cluster with up and down services.""" + up_time = timeutils.utcnow() + down_time = (up_time - + datetime.timedelta(seconds=CONF.service_down_time + 1)) + cluster = self._create_cluster(**values) + + svcs = [ + db.service_create( + self.ctxt, + {'cluster_name': cluster.name, + 'updated_at': down_time if i < num_down_svcs else up_time}) + for i in range(num_services) + ] + return cluster, svcs + + def test_cluster_create_and_get(self): + """Basic cluster creation test.""" + values = self._default_cluster_values() + cluster = db.cluster_create(self.ctxt, values) + values['last_heartbeat'] = None + self.assertEqual(0, cluster.race_preventer) + for k, v in values.items(): + self.assertEqual(v, getattr(cluster, k)) + + db_cluster = db.cluster_get(self.ctxt, cluster.id, + services_summary=True) + for k, v in values.items(): + self.assertEqual(v, getattr(db_cluster, k)) + self.assertEqual(0, db_cluster.race_preventer) + + def test_cluster_create_cfg_disabled(self): + """Test that create uses enable_new_services configuration option.""" + self.override_config('enable_new_services', False) + cluster = self._create_cluster(disabled=None) + self.assertTrue(cluster.disabled) + + def test_cluster_create_disabled_preference(self): + """Test that provided disabled value has highest priority on create.""" + self.override_config('enable_new_services', False) + cluster = self._create_cluster() + self.assertFalse(cluster.disabled) + + def test_cluster_create_duplicate(self): + """Test that unique constraints are working. + + To remove potential races on creation we have a constraint set on name + and race_preventer fields, and we set value on creation to 0, so 2 + clusters with the same name will fail this constraint. On deletion we + change this field to the same value as the id which will be unique and + will not conflict with the creation of another cluster with the same + name. + """ + cluster = self._create_cluster() + self.assertRaises(exception.ClusterExists, + self._create_cluster, + name=cluster.name) + + def test_cluster_create_not_duplicate(self): + """Test that unique constraints will work with delete operation. + + To remove potential races on creation we have a constraint set on name + and race_preventer fields, and we set value on creation to 0, so 2 + clusters with the same name will fail this constraint. On deletion we + change this field to the same value as the id which will be unique and + will not conflict with the creation of another cluster with the same + name. + """ + cluster = self._create_cluster() + self.assertIsNone(db.cluster_destroy(self.ctxt, cluster.id)) + self.assertIsNotNone(self._create_cluster(name=cluster.name)) + + def test_cluster_get_fail(self): + """Test that cluster get will fail if the cluster doesn't exists.""" + self._create_cluster(name='cluster@backend') + self.assertRaises(exception.ClusterNotFound, + db.cluster_get, self.ctxt, 'name=cluster@backend2') + + def test_cluster_get_by_name(self): + """Getting a cluster by name will include backends if not specified.""" + cluster = self._create_cluster(name='cluster@backend') + # Get without the backend + db_cluster = db.cluster_get(self.ctxt, name='cluster') + self.assertEqual(cluster.id, db_cluster.id) + # Get with the backend detail + db_cluster = db.cluster_get(self.ctxt, name='cluster@backend') + self.assertEqual(cluster.id, db_cluster.id) + + def test_cluster_get_without_summary(self): + """Test getting cluster without summary information.""" + cluster = self._create_cluster() + db_cluster = db.cluster_get(self.ctxt, cluster.id) + self.assertRaises(exc.DetachedInstanceError, + getattr, db_cluster, 'num_hosts') + self.assertRaises(exc.DetachedInstanceError, + getattr, db_cluster, 'num_down_hosts') + self.assertIsNone(db_cluster.last_heartbeat) + + def test_cluster_get_with_summary_empty_cluster(self): + """Test getting empty cluster with summary information.""" + cluster = self._create_cluster() + db_cluster = db.cluster_get(self.ctxt, cluster.id, + services_summary=True) + self.assertEqual(0, db_cluster.num_hosts) + self.assertEqual(0, db_cluster.num_down_hosts) + self.assertIsNone(db_cluster.last_heartbeat) + + def test_cluster_get_with_summary(self): + """Test getting cluster with summary information.""" + cluster, svcs = self._create_populated_cluster(3, 1) + db_cluster = db.cluster_get(self.ctxt, cluster.id, + services_summary=True) + self.assertEqual(3, db_cluster.num_hosts) + self.assertEqual(1, db_cluster.num_down_hosts) + self.assertEqual(svcs[1].updated_at, db_cluster.last_heartbeat) + + def test_cluster_get_is_up_on_empty_cluster(self): + """Test is_up filter works on empty clusters.""" + cluster = self._create_cluster() + db_cluster = db.cluster_get(self.ctxt, cluster.id, is_up=False) + self.assertEqual(cluster.id, db_cluster.id) + self.assertRaises(exception.ClusterNotFound, + db.cluster_get, self.ctxt, cluster.id, is_up=True) + + def test_cluster_get_services_on_empty_cluster(self): + """Test get_services filter works on empty clusters.""" + cluster = self._create_cluster() + db_cluster = db.cluster_get(self.ctxt, cluster.id, get_services=True) + self.assertEqual(cluster.id, db_cluster.id) + self.assertListEqual([], db_cluster.services) + + def test_cluster_get_services(self): + """Test services is properly populated on non empty cluster.""" + # We create another cluster to see we do the selection correctly + self._create_populated_cluster(2, name='cluster2') + # We create our cluster with 2 up nodes and 1 down + cluster, svcs = self._create_populated_cluster(3, 1) + # Add a deleted service to the cluster + db.service_create(self.ctxt, + {'cluster_name': cluster.name, + 'deleted': True}) + db_cluster = db.cluster_get(self.ctxt, name=cluster.name, + get_services=True) + self.assertEqual(3, len(db_cluster.services)) + self.assertSetEqual({svc.id for svc in svcs}, + {svc.id for svc in db_cluster.services}) + + def test_cluster_get_is_up_all_are_down(self): + """Test that is_up filter works when all services are down.""" + cluster, svcs = self._create_populated_cluster(3, 3) + self.assertRaises(exception.ClusterNotFound, + db.cluster_get, self.ctxt, cluster.id, is_up=True) + db_cluster = db.cluster_get(self.ctxt, name=cluster.name, is_up=False) + self.assertEqual(cluster.id, db_cluster.id) + + def test_cluster_get_by_num_down_hosts(self): + """Test cluster_get by subquery field num_down_hosts.""" + cluster, svcs = self._create_populated_cluster(3, 2) + result = db.cluster_get(self.ctxt, num_down_hosts=2) + self.assertEqual(cluster.id, result.id) + + def test_cluster_get_by_num_hosts(self): + """Test cluster_get by subquery field num_hosts.""" + cluster, svcs = self._create_populated_cluster(3, 2) + result = db.cluster_get(self.ctxt, num_hosts=3) + self.assertEqual(cluster.id, result.id) + + def test_cluster_destroy(self): + """Test basic cluster destroy.""" + cluster = self._create_cluster() + # On creation race_preventer is marked with a 0 + self.assertEqual(0, cluster.race_preventer) + db.cluster_destroy(self.ctxt, cluster.id) + db_cluster = db.cluster_get(self.ctxt, cluster.id, read_deleted='yes') + self.assertTrue(db_cluster.deleted) + self.assertIsNotNone(db_cluster.deleted_at) + # On deletion race_preventer is marked with the id + self.assertEqual(cluster.id, db_cluster.race_preventer) + + def test_cluster_destroy_non_existent(self): + """Test destroying non existent cluster.""" + self.assertRaises(exception.ClusterNotFound, + db.cluster_destroy, self.ctxt, 0) + + def test_cluster_destroy_has_services(self): + """Test that we cannot delete a cluster with non deleted services.""" + cluster, svcs = self._create_populated_cluster(3, 1) + self.assertRaises(exception.ClusterHasHosts, + db.cluster_destroy, self.ctxt, cluster.id) + + def test_cluster_update_non_existent(self): + """Test that we raise an exception on updating non existent cluster.""" + self.assertRaises(exception.ClusterNotFound, + db.cluster_update, self.ctxt, 0, {'disabled': True}) + + def test_cluster_update(self): + """Test basic cluster update.""" + cluster = self._create_cluster() + self.assertFalse(cluster.disabled) + db.cluster_update(self.ctxt, cluster.id, {'disabled': True}) + db_cluster = db.cluster_get(self.ctxt, cluster.id) + self.assertTrue(db_cluster.disabled) + + def test_cluster_get_all_empty(self): + """Test basic empty cluster get_all.""" + self.assertListEqual([], db.cluster_get_all(self.ctxt)) + + def test_cluster_get_all_matches(self): + """Basic test of get_all with a matching filter.""" + cluster1, svcs = self._create_populated_cluster(3, 1) + cluster2, svcs = self._create_populated_cluster(3, 2, name='cluster2') + cluster3, svcs = self._create_populated_cluster(3, 3, name='cluster3') + + expected = {cluster1.id, cluster2.id} + result = db.cluster_get_all(self.ctxt, is_up=True) + self.assertEqual(len(expected), len(result)) + self.assertSetEqual(expected, {cluster.id for cluster in result}) + + def test_cluster_get_all_no_match(self): + """Basic test of get_all with a non matching filter.""" + cluster1, svcs = self._create_populated_cluster(3, 3) + result = db.cluster_get_all(self.ctxt, is_up=True) + self.assertListEqual([], result) + + @mock.patch('cinder.db.sqlalchemy.api._cluster_query') + def test_cluster_get_all_passes_parameters(self, cluster_query_mock): + """Test that get_all passes all parameters. + + Since we have already tested all filters and parameters with + cluster_get method all we have to do for get_all is to check that we + are passing them to the query building method. + """ + args = (mock.sentinel.read_deleted, mock.sentinel.get_services, + mock.sentinel.services_summary, mock.sentinel.is_up, + mock.sentinel.name_match_level) + filters = {'session': mock.sentinel.session, + 'name': mock.sentinel.name, + 'disabled': mock.sentinel.disabled, + 'disabled_reason': mock.sentinel.disabled_reason, + 'race_preventer': mock.sentinel.race_preventer, + 'last_heartbeat': mock.sentinel.last_heartbeat, + 'num_hosts': mock.sentinel.num_hosts, + 'num_down_hosts': mock.sentinel.num_down_hosts} + db.cluster_get_all(self.ctxt, *args, **filters) + cluster_query_mock.assert_called_once_with(self.ctxt, *args, **filters) diff --git a/cinder/tests/unit/db/test_purge.py b/cinder/tests/unit/db/test_purge.py index 7e0cedcae..194a35ba8 100644 --- a/cinder/tests/unit/db/test_purge.py +++ b/cinder/tests/unit/db/test_purge.py @@ -60,6 +60,9 @@ class PurgeDeletedTest(test.TestCase): self.vgm = sqlalchemyutils.get_table( self.engine, "volume_glance_metadata") + self.qos = sqlalchemyutils.get_table( + self.engine, "quality_of_service_specs") + self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) @@ -89,6 +92,19 @@ class PurgeDeletedTest(test.TestCase): snapshot_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) + ins_stmt = self.qos.insert().values( + id=uuidstr, key='QoS_Specs_Name', value='test') + self.conn.execute(ins_stmt) + + ins_stmt = self.vol_types.insert().values( + id=uuid.uuid4().hex, qos_specs_id=uuidstr) + self.conn.execute(ins_stmt) + + ins_stmt = self.qos.insert().values( + id=uuid.uuid4().hex, specs_id=uuidstr, key='desc', + value='test') + self.conn.execute(ins_stmt) + # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) @@ -145,6 +161,25 @@ class PurgeDeletedTest(test.TestCase): where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) + make_qos_old = self.qos.update().where( + self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old) + make_qos_older = self.qos.update().where( + self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older) + + make_qos_child_record_old = self.qos.update().where( + self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values( + deleted_at=old) + make_qos_child_record_older = self.qos.update().where( + self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values( + deleted_at=older) + + make_vol_types1_old = self.vol_types.update().where( + self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values( + deleted_at=old) + make_vol_types1_older = self.vol_types.update().where( + self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values( + deleted_at=older) + self.conn.execute(make_vol_old) self.conn.execute(make_vol_older) self.conn.execute(make_vol_meta_old) @@ -165,6 +200,15 @@ class PurgeDeletedTest(test.TestCase): self.conn.execute(make_snap_glance_meta_old) self.conn.execute(make_snap_glance_meta_older) + self.conn.execute(make_qos_old) + self.conn.execute(make_qos_older) + + self.conn.execute(make_qos_child_record_old) + self.conn.execute(make_qos_child_record_older) + + self.conn.execute(make_vol_types1_old) + self.conn.execute(make_vol_types1_older) + def test_purge_deleted_rows_old(self): dialect = self.engine.url.get_dialect() if dialect == sqlite.dialect: @@ -186,15 +230,17 @@ class PurgeDeletedTest(test.TestCase): snap_rows = self.session.query(self.snapshots).count() snap_meta_rows = self.session.query(self.sm).count() vol_glance_meta_rows = self.session.query(self.vgm).count() + qos_rows = self.session.query(self.qos).count() # Verify that we only deleted 2 self.assertEqual(4, vol_rows) self.assertEqual(4, vol_meta_rows) - self.assertEqual(4, vol_type_rows) + self.assertEqual(8, vol_type_rows) self.assertEqual(4, vol_type_proj_rows) self.assertEqual(4, snap_rows) self.assertEqual(4, snap_meta_rows) self.assertEqual(8, vol_glance_meta_rows) + self.assertEqual(8, qos_rows) def test_purge_deleted_rows_older(self): dialect = self.engine.url.get_dialect() @@ -217,15 +263,17 @@ class PurgeDeletedTest(test.TestCase): snap_rows = self.session.query(self.snapshots).count() snap_meta_rows = self.session.query(self.sm).count() vol_glance_meta_rows = self.session.query(self.vgm).count() + qos_rows = self.session.query(self.qos).count() # Verify that we only have 2 rows now self.assertEqual(2, vol_rows) self.assertEqual(2, vol_meta_rows) - self.assertEqual(2, vol_type_rows) + self.assertEqual(4, vol_type_rows) self.assertEqual(2, vol_type_proj_rows) self.assertEqual(2, snap_rows) self.assertEqual(2, snap_meta_rows) self.assertEqual(4, vol_glance_meta_rows) + self.assertEqual(4, qos_rows) def test_purge_deleted_rows_bad_args(self): # Test with no age argument @@ -234,10 +282,6 @@ class PurgeDeletedTest(test.TestCase): self.assertRaises(exception.InvalidParameterValue, db.purge_deleted_rows, self.context, age_in_days='ten') - # Test with negative value - self.assertRaises(exception.InvalidParameterValue, - db.purge_deleted_rows, self.context, - age_in_days=-1) def test_purge_deleted_rows_integrity_failure(self): dialect = self.engine.url.get_dialect() diff --git a/cinder/tests/unit/db/test_qos_specs.py b/cinder/tests/unit/db/test_qos_specs.py index 48ab1b8f9..79596df9c 100644 --- a/cinder/tests/unit/db/test_qos_specs.py +++ b/cinder/tests/unit/db/test_qos_specs.py @@ -40,16 +40,14 @@ class QualityOfServiceSpecsTableTestCase(test.TestCase): project_id=fake.PROJECT_ID, is_admin=True) - def _create_qos_specs(self, name, values=None): + def _create_qos_specs(self, name, consumer='back-end', values=None): """Create a transfer object.""" - if values: - specs = dict(name=name, qos_specs=values) - else: - specs = {'name': name, - 'qos_specs': { - 'consumer': 'back-end', - 'key1': 'value1', - 'key2': 'value2'}} + if values is None: + values = {'key1': 'value1', 'key2': 'value2'} + + specs = {'name': name, + 'consumer': consumer, + 'specs': values} return db.qos_specs_create(self.ctxt, specs)['id'] def test_qos_specs_create(self): @@ -66,84 +64,66 @@ class QualityOfServiceSpecsTableTestCase(test.TestCase): self.assertEqual(specs_id, query_id) def test_qos_specs_get(self): - value = dict(consumer='front-end', - key1='foo', key2='bar') - specs_id = self._create_qos_specs('Name1', value) + qos_spec = {'name': 'Name1', + 'consumer': 'front-end', + 'specs': {'key1': 'foo', 'key2': 'bar'}} + specs_id = self._create_qos_specs(qos_spec['name'], + qos_spec['consumer'], + qos_spec['specs']) fake_id = fake.WILL_NOT_BE_FOUND_ID self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, self.ctxt, fake_id) - specs = db.qos_specs_get(self.ctxt, specs_id) - expected = dict(name='Name1', id=specs_id, consumer='front-end') - del value['consumer'] - expected.update(dict(specs=value)) - self.assertDictMatch(expected, specs) + specs_returned = db.qos_specs_get(self.ctxt, specs_id) + qos_spec['id'] = specs_id + self.assertDictMatch(qos_spec, specs_returned) def test_qos_specs_get_all(self): - value1 = dict(consumer='front-end', - key1='v1', key2='v2') - value2 = dict(consumer='back-end', - key3='v3', key4='v4') - value3 = dict(consumer='back-end', - key5='v5', key6='v6') + qos_list = [ + {'name': 'Name1', + 'consumer': 'front-end', + 'specs': {'key1': 'v1', 'key2': 'v2'}}, + {'name': 'Name2', + 'consumer': 'back-end', + 'specs': {'key1': 'v3', 'key2': 'v4'}}, + {'name': 'Name3', + 'consumer': 'back-end', + 'specs': {'key1': 'v5', 'key2': 'v6'}}] - spec_id1 = self._create_qos_specs('Name1', value1) - spec_id2 = self._create_qos_specs('Name2', value2) - spec_id3 = self._create_qos_specs('Name3', value3) + for qos in qos_list: + qos['id'] = self._create_qos_specs(qos['name'], + qos['consumer'], + qos['specs']) - specs = db.qos_specs_get_all(self.ctxt) - self.assertEqual(3, len(specs), + specs_list_returned = db.qos_specs_get_all(self.ctxt) + self.assertEqual(len(qos_list), len(specs_list_returned), "Unexpected number of qos specs records") - expected1 = dict(name='Name1', id=spec_id1, consumer='front-end') - expected2 = dict(name='Name2', id=spec_id2, consumer='back-end') - expected3 = dict(name='Name3', id=spec_id3, consumer='back-end') - del value1['consumer'] - del value2['consumer'] - del value3['consumer'] - expected1.update(dict(specs=value1)) - expected2.update(dict(specs=value2)) - expected3.update(dict(specs=value3)) - self.assertIn(expected1, specs) - self.assertIn(expected2, specs) - self.assertIn(expected3, specs) - - def test_qos_specs_get_by_name(self): - name = str(int(time.time())) - value = dict(consumer='front-end', - foo='Foo', bar='Bar') - specs_id = self._create_qos_specs(name, value) - specs = db.qos_specs_get_by_name(self.ctxt, name) - del value['consumer'] - expected = {'name': name, - 'id': specs_id, - 'consumer': 'front-end', - 'specs': value} - self.assertDictMatch(expected, specs) + for expected_qos in qos_list: + self.assertIn(expected_qos, specs_list_returned) def test_qos_specs_delete(self): name = str(int(time.time())) specs_id = self._create_qos_specs(name) db.qos_specs_delete(self.ctxt, specs_id) - self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, + self.assertRaises(exception.QoSSpecsNotFound, + db.qos_specs_get, self.ctxt, specs_id) def test_qos_specs_item_delete(self): name = str(int(time.time())) - value = dict(consumer='front-end', - foo='Foo', bar='Bar') - specs_id = self._create_qos_specs(name, value) + value = dict(foo='Foo', bar='Bar') + specs_id = self._create_qos_specs(name, 'front-end', value) - del value['consumer'] del value['foo'] expected = {'name': name, 'id': specs_id, 'consumer': 'front-end', 'specs': value} db.qos_specs_item_delete(self.ctxt, specs_id, 'foo') - specs = db.qos_specs_get_by_name(self.ctxt, name) + specs = db.qos_specs_get(self.ctxt, specs_id) self.assertDictMatch(expected, specs) def test_associate_type_with_qos(self): @@ -214,7 +194,8 @@ class QualityOfServiceSpecsTableTestCase(test.TestCase): def test_qos_specs_update(self): name = 'FakeName' specs_id = self._create_qos_specs(name) - value = dict(key2='new_value2', key3='value3') + value = {'consumer': 'both', + 'specs': {'key2': 'new_value2', 'key3': 'value3'}} self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update, self.ctxt, fake.WILL_NOT_BE_FOUND_ID, value) @@ -222,3 +203,4 @@ class QualityOfServiceSpecsTableTestCase(test.TestCase): specs = db.qos_specs_get(self.ctxt, specs_id) self.assertEqual('new_value2', specs['specs']['key2']) self.assertEqual('value3', specs['specs']['key3']) + self.assertEqual('both', specs['consumer']) diff --git a/cinder/tests/unit/fake_cluster.py b/cinder/tests/unit/fake_cluster.py new file mode 100644 index 000000000..7ea3395a4 --- /dev/null +++ b/cinder/tests/unit/fake_cluster.py @@ -0,0 +1,70 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import timeutils +from oslo_versionedobjects import fields + +from cinder.db.sqlalchemy import models +from cinder import objects + + +def cluster_basic_fields(): + """Return basic fields for a cluster.""" + return { + 'id': 1, + 'created_at': timeutils.utcnow(with_timezone=False), + 'deleted': False, + 'name': 'cluster_name', + 'binary': 'cinder-volume', + 'race_preventer': 0, + } + + +def fake_cluster_orm(**updates): + """Create a fake ORM cluster instance.""" + db_cluster = fake_db_cluster(**updates) + del db_cluster['services'] + cluster = models.Cluster(**db_cluster) + return cluster + + +def fake_db_cluster(**updates): + """Helper method for fake_cluster_orm. + + Creates a complete dictionary filling missing fields based on the Cluster + field definition (defaults and nullable). + """ + db_cluster = cluster_basic_fields() + + for name, field in objects.Cluster.fields.items(): + if name in db_cluster: + continue + if field.default != fields.UnspecifiedDefault: + db_cluster[name] = field.default + elif field.nullable: + db_cluster[name] = None + else: + raise Exception('fake_db_cluster needs help with %s.' % name) + + if updates: + db_cluster.update(updates) + + return db_cluster + + +def fake_cluster_ovo(context, **updates): + """Create a fake Cluster versioned object.""" + return objects.Cluster._from_db_object(context, objects.Cluster(), + fake_cluster_orm(**updates)) diff --git a/cinder/tests/unit/fake_constants.py b/cinder/tests/unit/fake_constants.py index a16f6f05f..d80e5d496 100644 --- a/cinder/tests/unit/fake_constants.py +++ b/cinder/tests/unit/fake_constants.py @@ -71,3 +71,10 @@ VOLUME_TYPE3_ID = 'a3d55d15-eeb1-4816-ada9-bf82decc09b3' VOLUME_TYPE4_ID = '69943076-754d-4da8-8718-0b0117e9cab1' VOLUME_TYPE5_ID = '1c450d81-8aab-459e-b338-a6569139b835' WILL_NOT_BE_FOUND_ID = 'ce816f65-c5aa-46d6-bd62-5272752d584a' +GROUP_TYPE_ID = '29514915-5208-46ab-9ece-1cc4688ad0c1' +GROUP_TYPE2_ID = 'f8645498-1323-47a2-9442-5c57724d2e3c' +GROUP_TYPE3_ID = '1b7915f4-b899-4510-9eff-bd67508c3334' +GROUP_ID = '9a965cc6-ee3a-468d-a721-cebb193f696f' +GROUP2_ID = '40a85639-abc3-4461-9230-b131abd8ee07' +GROUP_SNAPSHOT_ID = '1e2ab152-44f0-11e6-819f-000c29d19d84' +GROUP_SNAPSHOT2_ID = '33e2ff04-44f0-11e6-819f-000c29d19d84' diff --git a/cinder/tests/unit/fake_group.py b/cinder/tests/unit/fake_group.py new file mode 100644 index 000000000..2bbc680f8 --- /dev/null +++ b/cinder/tests/unit/fake_group.py @@ -0,0 +1,49 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import fields + +from cinder import objects +from cinder.tests.unit import fake_constants as fake + + +def fake_db_group_type(**updates): + db_group_type = { + 'id': fake.GROUP_TYPE_ID, + 'name': 'type-1', + 'description': 'A fake group type', + 'is_public': True, + 'projects': [], + 'group_specs': {}, + } + + for name, field in objects.GroupType.fields.items(): + if name in db_group_type: + continue + if field.nullable: + db_group_type[name] = None + elif field.default != fields.UnspecifiedDefault: + db_group_type[name] = field.default + else: + raise Exception('fake_db_group_type needs help with %s.' % name) + + if updates: + db_group_type.update(updates) + + return db_group_type + + +def fake_group_type_obj(context, **updates): + return objects.GroupType._from_db_object( + context, objects.GroupType(), fake_db_group_type(**updates)) diff --git a/cinder/tests/unit/fake_notifier.py b/cinder/tests/unit/fake_notifier.py index 87eb97731..45b1493a4 100644 --- a/cinder/tests/unit/fake_notifier.py +++ b/cinder/tests/unit/fake_notifier.py @@ -64,13 +64,14 @@ class FakeNotifier(object): del self.notifications[:] -def stub_notifier(stubs): - stubs.Set(messaging, 'Notifier', FakeNotifier) +def mock_notifier(testcase): + testcase.mock_object(messaging, 'Notifier', FakeNotifier) if rpc.NOTIFIER: serializer = getattr(rpc.NOTIFIER, '_serializer', None) - stubs.Set(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, - rpc.NOTIFIER.publisher_id, - serializer=serializer)) + testcase.mock_object(rpc, 'NOTIFIER', + FakeNotifier(rpc.NOTIFIER.transport, + rpc.NOTIFIER.publisher_id, + serializer=serializer)) def get_fake_notifier(service=None, host=None, publisher_id=None): diff --git a/cinder/tests/unit/fake_service.py b/cinder/tests/unit/fake_service.py index 676b39713..f3a65f160 100644 --- a/cinder/tests/unit/fake_service.py +++ b/cinder/tests/unit/fake_service.py @@ -15,9 +15,17 @@ from oslo_utils import timeutils from oslo_versionedobjects import fields +from cinder.db.sqlalchemy import models from cinder import objects +def fake_service_orm(**updates): + """Create a fake ORM service instance.""" + db_service = fake_db_service(**updates) + service = models.Service(**db_service) + return service + + def fake_db_service(**updates): NOW = timeutils.utcnow().replace(microsecond=0) db_service = { diff --git a/cinder/tests/unit/fake_utils.py b/cinder/tests/unit/fake_utils.py index 3f430acd2..a535453ea 100644 --- a/cinder/tests/unit/fake_utils.py +++ b/cinder/tests/unit/fake_utils.py @@ -19,8 +19,6 @@ import re from eventlet import greenthread import six -from cinder import utils - _fake_execute_repliers = [] _fake_execute_log = [] @@ -88,9 +86,3 @@ def fake_execute(*cmd_parts, **kwargs): # Replicate the sleep call in the real function greenthread.sleep(0) return reply - - -def stub_out_utils_execute(stubs): - fake_execute_set_repliers([]) - fake_execute_clear_log() - stubs.Set(utils, 'execute', fake_execute) diff --git a/cinder/tests/unit/group/__init__.py b/cinder/tests/unit/group/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/group/test_groups_api.py b/cinder/tests/unit/group/test_groups_api.py new file mode 100644 index 000000000..bc06817ad --- /dev/null +++ b/cinder/tests/unit/group/test_groups_api.py @@ -0,0 +1,417 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for group API. +""" + +import ddt +import mock + +from cinder import context +from cinder import db +import cinder.group +from cinder import objects +from cinder.objects import fields +from cinder import test +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import utils + + +@ddt.ddt +class GroupAPITestCase(test.TestCase): + """Test Case for group API.""" + + def setUp(self): + super(GroupAPITestCase, self).setUp() + self.group_api = cinder.group.API() + self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, + auth_token=True, + is_admin=True) + self.user_ctxt = context.RequestContext( + fake.USER_ID, fake.PROJECT_ID, auth_token=True) + + @mock.patch('cinder.objects.Group.get_by_id') + @mock.patch('cinder.group.api.check_policy') + def test_get(self, mock_policy, mock_group_get): + fake_group = 'fake_group' + mock_group_get.return_value = fake_group + grp = self.group_api.get(self.ctxt, fake.GROUP_ID) + self.assertEqual(fake_group, grp) + + @ddt.data(True, False) + @mock.patch('cinder.objects.GroupList.get_all') + @mock.patch('cinder.objects.GroupList.get_all_by_project') + @mock.patch('cinder.group.api.check_policy') + def test_get_all(self, is_admin, mock_policy, mock_get_all_by_project, + mock_get_all): + self.group_api.LOG = mock.Mock() + fake_groups = ['fake_group1', 'fake_group2'] + fake_groups_by_project = ['fake_group1'] + mock_get_all.return_value = fake_groups + mock_get_all_by_project.return_value = fake_groups_by_project + + if is_admin: + grps = self.group_api.get_all(self.ctxt, + filters={'all_tenants': True}) + self.assertEqual(fake_groups, grps) + else: + grps = self.group_api.get_all(self.user_ctxt) + self.assertEqual(fake_groups_by_project, grps) + + @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group') + @mock.patch('cinder.db.volume_get_all_by_generic_group') + @mock.patch('cinder.db.volumes_update') + @mock.patch('cinder.group.api.API._cast_create_group') + @mock.patch('cinder.group.api.API.update_quota') + @mock.patch('cinder.objects.Group') + @mock.patch('cinder.db.group_type_get') + @mock.patch('cinder.db.volume_types_get_by_name_or_id') + @mock.patch('cinder.group.api.check_policy') + def test_create_delete(self, mock_policy, mock_volume_types_get, + mock_group_type_get, mock_group, + mock_update_quota, mock_cast_create_group, + mock_volumes_update, mock_volume_get_all, + mock_rpc_delete_group): + mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] + mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} + name = "test_group" + description = "this is a test group" + grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID, + volume_type_ids = [fake.VOLUME_TYPE_ID], + availability_zone = 'nova', host = None, + name = name, description = description, + status = fields.GroupStatus.CREATING) + mock_group.return_value = grp + + ret_group = self.group_api.create(self.ctxt, name, description, + fake.GROUP_TYPE_ID, + [fake.VOLUME_TYPE_ID], + availability_zone = 'nova') + self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) + + ret_group.host = "test_host@fakedrv#fakepool" + ret_group.status = fields.GroupStatus.AVAILABLE + self.group_api.delete(self.ctxt, ret_group, delete_volumes = True) + mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id) + mock_volumes_update.assert_called_once_with(self.ctxt, []) + mock_rpc_delete_group.assert_called_once_with(self.ctxt, ret_group) + + @mock.patch('cinder.volume.rpcapi.VolumeAPI.update_group') + @mock.patch('cinder.db.volume_get_all_by_generic_group') + @mock.patch('cinder.group.api.API._cast_create_group') + @mock.patch('cinder.group.api.API.update_quota') + @mock.patch('cinder.objects.Group') + @mock.patch('cinder.db.group_type_get') + @mock.patch('cinder.db.volume_types_get_by_name_or_id') + @mock.patch('cinder.group.api.check_policy') + def test_update(self, mock_policy, mock_volume_types_get, + mock_group_type_get, mock_group, + mock_update_quota, mock_cast_create_group, + mock_volume_get_all, mock_rpc_update_group): + vol_type_dict = {'id': fake.VOLUME_TYPE_ID, + 'name': 'fake_volume_type'} + vol_type = objects.VolumeType(self.ctxt, **vol_type_dict) + + mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] + mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} + name = "test_group" + description = "this is a test group" + grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID, + volume_type_ids = [fake.VOLUME_TYPE_ID], + availability_zone = 'nova', host = None, + name = name, description = description, + status = fields.GroupStatus.CREATING) + mock_group.return_value = grp + + ret_group = self.group_api.create(self.ctxt, name, description, + fake.GROUP_TYPE_ID, + [fake.VOLUME_TYPE_ID], + availability_zone = 'nova') + self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) + + ret_group.volume_types = [vol_type] + ret_group.host = "test_host@fakedrv#fakepool" + ret_group.status = fields.GroupStatus.AVAILABLE + ret_group.id = fake.GROUP_ID + + vol1 = utils.create_volume( + self.ctxt, host = ret_group.host, + availability_zone = ret_group.availability_zone, + volume_type_id = fake.VOLUME_TYPE_ID) + + vol2 = utils.create_volume( + self.ctxt, host = ret_group.host, + availability_zone = ret_group.availability_zone, + volume_type_id = fake.VOLUME_TYPE_ID, + group_id = fake.GROUP_ID) + vol2_dict = { + 'id': vol2.id, + 'group_id': fake.GROUP_ID, + 'volume_type_id': fake.VOLUME_TYPE_ID, + 'availability_zone': ret_group.availability_zone, + 'host': ret_group.host, + 'status': 'available', + } + mock_volume_get_all.return_value = [vol2_dict] + + new_name = "new_group_name" + new_desc = "this is a new group" + self.group_api.update(self.ctxt, ret_group, new_name, new_desc, + vol1.id, vol2.id) + mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id) + mock_rpc_update_group.assert_called_once_with(self.ctxt, ret_group, + add_volumes = vol1.id, + remove_volumes = vol2.id) + + @mock.patch('cinder.objects.GroupSnapshot.get_by_id') + @mock.patch('cinder.group.api.check_policy') + def test_get_group_snapshot(self, mock_policy, mock_group_snap): + fake_group_snap = 'fake_group_snap' + mock_group_snap.return_value = fake_group_snap + grp_snap = self.group_api.get_group_snapshot( + self.ctxt, fake.GROUP_SNAPSHOT_ID) + self.assertEqual(fake_group_snap, grp_snap) + + @ddt.data(True, False) + @mock.patch('cinder.objects.GroupSnapshotList.get_all') + @mock.patch('cinder.objects.GroupSnapshotList.get_all_by_project') + @mock.patch('cinder.group.api.check_policy') + def test_get_all_group_snapshots(self, is_admin, mock_policy, + mock_get_all_by_project, + mock_get_all): + fake_group_snaps = ['fake_group_snap1', 'fake_group_snap2'] + fake_group_snaps_by_project = ['fake_group_snap1'] + mock_get_all.return_value = fake_group_snaps + mock_get_all_by_project.return_value = fake_group_snaps_by_project + + if is_admin: + grp_snaps = self.group_api.get_all_group_snapshots( + self.ctxt, search_opts={'all_tenants': True}) + self.assertEqual(fake_group_snaps, grp_snaps) + else: + grp_snaps = self.group_api.get_all_group_snapshots( + self.user_ctxt) + self.assertEqual(fake_group_snaps_by_project, grp_snaps) + + @mock.patch('cinder.objects.GroupSnapshot') + @mock.patch('cinder.group.api.check_policy') + def test_update_group_snapshot(self, mock_policy, mock_group_snap): + grp_snap_update = {"name": "new_name", + "description": "This is a new description"} + self.group_api.update_group_snapshot(self.ctxt, mock_group_snap, + grp_snap_update) + mock_group_snap.update.assert_called_once_with(grp_snap_update) + mock_group_snap.save.assert_called_once_with() + + @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group_snapshot') + @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_snapshot') + @mock.patch('cinder.volume.api.API.create_snapshots_in_db') + @mock.patch('cinder.objects.Group') + @mock.patch('cinder.objects.GroupSnapshot') + @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') + @mock.patch('cinder.group.api.check_policy') + def test_create_delete_group_snapshot(self, mock_policy, + mock_snap_get_all, + mock_group_snap, mock_group, + mock_create_in_db, + mock_create_api, mock_delete_api): + name = "fake_name" + description = "fake description" + mock_group.id = fake.GROUP_ID + mock_group.volumes = [] + ret_group_snap = self.group_api.create_group_snapshot( + self.ctxt, mock_group, name, description) + mock_snap_get_all.return_value = [] + + options = {'group_id': fake.GROUP_ID, + 'user_id': self.ctxt.user_id, + 'project_id': self.ctxt.project_id, + 'status': "creating", + 'name': name, + 'description': description} + mock_group_snap.assert_called_once_with(self.ctxt, **options) + ret_group_snap.create.assert_called_once_with() + mock_create_in_db.assert_called_once_with(self.ctxt, [], + ret_group_snap.name, + ret_group_snap.description, + None, + ret_group_snap.id) + mock_create_api.assert_called_once_with(self.ctxt, ret_group_snap) + + self.group_api.delete_group_snapshot(self.ctxt, ret_group_snap) + mock_delete_api.assert_called_once_with(mock.ANY, ret_group_snap) + + @mock.patch('cinder.volume.volume_types.get_volume_type') + @mock.patch('cinder.db.group_volume_type_mapping_create') + @mock.patch('cinder.volume.api.API.create') + @mock.patch('cinder.objects.GroupSnapshot.get_by_id') + @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') + @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') + @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') + def test_create_group_from_snap(self, mock_volume_get_all, + mock_rpc_create_group_from_src, + mock_snap_get_all, mock_group_snap_get, + mock_volume_api_create, + mock_mapping_create, + mock_get_volume_type): + vol_type = utils.create_volume_type(self.ctxt, + name = 'fake_volume_type') + mock_get_volume_type.return_value = vol_type + + grp_snap = utils.create_group_snapshot( + self.ctxt, fake.GROUP_ID, + group_type_id = fake.GROUP_TYPE_ID, + status = fields.GroupStatus.CREATING) + mock_group_snap_get.return_value = grp_snap + + vol1 = utils.create_volume( + self.ctxt, + availability_zone = 'nova', + volume_type_id = vol_type['id'], + group_id = fake.GROUP_ID) + + snap = utils.create_snapshot(self.ctxt, vol1.id, + volume_type_id = vol_type['id'], + status = fields.GroupStatus.CREATING) + mock_snap_get_all.return_value = [snap] + + name = "test_group" + description = "this is a test group" + grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID, + volume_type_ids = [vol_type['id']], + availability_zone = 'nova', + name = name, description = description, + group_snapshot_id = grp_snap.id, + status = fields.GroupStatus.CREATING) + + vol2 = utils.create_volume( + self.ctxt, + availability_zone = grp.availability_zone, + volume_type_id = vol_type['id'], + group_id = grp.id, + snapshot_id = snap.id) + mock_volume_get_all.return_value = [vol2] + + self.group_api._create_group_from_group_snapshot(self.ctxt, grp, + grp_snap.id) + + mock_volume_api_create.assert_called_once_with( + self.ctxt, 1, None, None, + availability_zone = grp.availability_zone, + group_snapshot = grp_snap, + group = grp, + snapshot = snap, + volume_type = vol_type) + + mock_rpc_create_group_from_src.assert_called_once_with( + self.ctxt, grp, grp_snap) + + vol2.destroy() + grp.destroy() + snap.destroy() + vol1.destroy() + grp_snap.destroy() + db.volume_type_destroy(self.ctxt, vol_type['id']) + + @mock.patch('cinder.volume.volume_types.get_volume_type') + @mock.patch('cinder.db.group_volume_type_mapping_create') + @mock.patch('cinder.volume.api.API.create') + @mock.patch('cinder.objects.Group.get_by_id') + @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') + @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') + @mock.patch('cinder.group.api.check_policy') + def test_create_group_from_group(self, mock_policy, mock_volume_get_all, + mock_rpc_create_group_from_src, + mock_group_get, + mock_volume_api_create, + mock_mapping_create, + mock_get_volume_type): + vol_type = utils.create_volume_type(self.ctxt, + name = 'fake_volume_type') + mock_get_volume_type.return_value = vol_type + + grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID, + volume_type_ids = [vol_type['id']], + availability_zone = 'nova', + status = fields.GroupStatus.CREATING) + mock_group_get.return_value = grp + + vol = utils.create_volume( + self.ctxt, + availability_zone = grp.availability_zone, + volume_type_id = fake.VOLUME_TYPE_ID, + group_id = grp.id) + mock_volume_get_all.return_value = [vol] + + grp2 = utils.create_group(self.ctxt, + group_type_id = fake.GROUP_TYPE_ID, + volume_type_ids = [vol_type['id']], + availability_zone = 'nova', + source_group_id = grp.id, + status = fields.GroupStatus.CREATING) + + vol2 = utils.create_volume( + self.ctxt, + availability_zone = grp.availability_zone, + volume_type_id = vol_type['id'], + group_id = grp2.id, + source_volid = vol.id) + + self.group_api._create_group_from_source_group(self.ctxt, grp2, + grp.id) + + mock_volume_api_create.assert_called_once_with( + self.ctxt, 1, None, None, + availability_zone = grp.availability_zone, + source_group = grp, + group = grp2, + source_volume = vol, + volume_type = vol_type) + + mock_rpc_create_group_from_src.assert_called_once_with( + self.ctxt, grp2, None, grp) + + vol2.destroy() + grp2.destroy() + vol.destroy() + grp.destroy() + db.volume_type_destroy(self.ctxt, vol_type['id']) + + @mock.patch('cinder.group.api.API._create_group_from_group_snapshot') + @mock.patch('cinder.group.api.API._create_group_from_source_group') + @mock.patch('cinder.group.api.API.update_quota') + @mock.patch('cinder.objects.Group') + @mock.patch('cinder.group.api.check_policy') + def test_create_from_src(self, mock_policy, mock_group, mock_update_quota, + mock_create_from_group, mock_create_from_snap): + name = "test_group" + description = "this is a test group" + grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID, + volume_type_ids = [fake.VOLUME_TYPE_ID], + availability_zone = 'nova', + name = name, description = description, + status = fields.GroupStatus.CREATING, + group_snapshot_id = fake.GROUP_SNAPSHOT_ID, + source_group_id = fake.GROUP_ID) + mock_group.return_value = grp + + ret_group = self.group_api.create_from_src( + self.ctxt, name, description, + group_snapshot_id = fake.GROUP_SNAPSHOT_ID, + source_group_id = None) + self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) + mock_create_from_snap.assert_called_once_with( + self.ctxt, grp, fake.GROUP_SNAPSHOT_ID) diff --git a/cinder/tests/unit/group/test_groups_manager.py b/cinder/tests/unit/group/test_groups_manager.py new file mode 100644 index 000000000..18515be98 --- /dev/null +++ b/cinder/tests/unit/group/test_groups_manager.py @@ -0,0 +1,732 @@ +# Copyright (C) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_config import cfg +from oslo_utils import importutils + +from cinder import context +from cinder import db +from cinder import exception +from cinder import objects +from cinder.objects import fields +from cinder import quota +from cinder import test +from cinder.tests.unit import conf_fixture +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import utils as tests_utils +import cinder.volume +from cinder.volume import configuration as conf +from cinder.volume import driver +from cinder.volume import utils as volutils + +GROUP_QUOTAS = quota.GROUP_QUOTAS +CONF = cfg.CONF + + +class GroupManagerTestCase(test.TestCase): + + def setUp(self): + super(GroupManagerTestCase, self).setUp() + self.volume = importutils.import_object(CONF.volume_manager) + self.configuration = mock.Mock(conf.Configuration) + self.context = context.get_admin_context() + self.context.user_id = fake.USER_ID + self.project_id = fake.PROJECT3_ID + self.context.project_id = self.project_id + self.volume.driver.set_initialized() + self.volume.stats = {'allocated_capacity_gb': 0, + 'pools': {}} + self.volume_api = cinder.volume.api.API() + + def test_delete_volume_in_group(self): + """Test deleting a volume that's tied to a group fails.""" + volume_api = cinder.volume.api.API() + volume_params = {'status': 'available', + 'group_id': fake.GROUP_ID} + volume = tests_utils.create_volume(self.context, **volume_params) + self.assertRaises(exception.InvalidVolume, + volume_api.delete, self.context, volume) + + @mock.patch.object(GROUP_QUOTAS, "reserve", + return_value=["RESERVATION"]) + @mock.patch.object(GROUP_QUOTAS, "commit") + @mock.patch.object(GROUP_QUOTAS, "rollback") + @mock.patch.object(driver.VolumeDriver, + "delete_group", + return_value=({'status': ( + fields.GroupStatus.DELETED)}, [])) + def test_create_delete_group(self, fake_delete_grp, + fake_rollback, + fake_commit, fake_reserve): + """Test group can be created and deleted.""" + + def fake_driver_create_grp(context, group): + """Make sure that the pool is part of the host.""" + self.assertIn('host', group) + host = group.host + pool = volutils.extract_host(host, level='pool') + self.assertEqual('fakepool', pool) + return {'status': fields.GroupStatus.AVAILABLE} + + self.mock_object(self.volume.driver, 'create_group', + fake_driver_create_grp) + + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type_ids=[fake.VOLUME_TYPE_ID], + host='fakehost@fakedrv#fakepool', + group_type_id=fake.GROUP_TYPE_ID) + group = objects.Group.get_by_id(self.context, group.id) + self.assertEqual(0, len(self.notifier.notifications), + self.notifier.notifications) + self.volume.create_group(self.context, group) + self.assertEqual(2, len(self.notifier.notifications), + self.notifier.notifications) + msg = self.notifier.notifications[0] + self.assertEqual('group.create.start', msg['event_type']) + expected = { + 'status': fields.GroupStatus.AVAILABLE, + 'name': 'test_group', + 'availability_zone': 'nova', + 'tenant_id': self.context.project_id, + 'created_at': 'DONTCARE', + 'user_id': fake.USER_ID, + 'group_id': group.id, + 'group_type': fake.GROUP_TYPE_ID + } + self.assertDictMatch(expected, msg['payload']) + msg = self.notifier.notifications[1] + self.assertEqual('group.create.end', msg['event_type']) + self.assertDictMatch(expected, msg['payload']) + self.assertEqual( + group.id, + objects.Group.get_by_id(context.get_admin_context(), + group.id).id) + + self.volume.delete_group(self.context, group) + grp = objects.Group.get_by_id( + context.get_admin_context(read_deleted='yes'), group.id) + self.assertEqual(fields.GroupStatus.DELETED, grp.status) + self.assertEqual(4, len(self.notifier.notifications), + self.notifier.notifications) + msg = self.notifier.notifications[2] + self.assertEqual('group.delete.start', msg['event_type']) + self.assertDictMatch(expected, msg['payload']) + msg = self.notifier.notifications[3] + self.assertEqual('group.delete.end', msg['event_type']) + expected['status'] = fields.GroupStatus.DELETED + self.assertDictMatch(expected, msg['payload']) + self.assertRaises(exception.NotFound, + objects.Group.get_by_id, + self.context, + group.id) + + @mock.patch.object(GROUP_QUOTAS, "reserve", + return_value=["RESERVATION"]) + @mock.patch.object(GROUP_QUOTAS, "commit") + @mock.patch.object(GROUP_QUOTAS, "rollback") + @mock.patch.object(driver.VolumeDriver, + "create_group", + return_value={'status': 'available'}) + @mock.patch.object(driver.VolumeDriver, + "update_group") + def test_update_group(self, fake_update_grp, + fake_create_grp, fake_rollback, + fake_commit, fake_reserve): + """Test group can be updated.""" + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + self.volume.create_group(self.context, group) + + volume = tests_utils.create_volume( + self.context, + group_id=group.id, + volume_type_id=fake.VOLUME_TYPE_ID, + status='available', + host=group.host) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + + volume2 = tests_utils.create_volume( + self.context, + group_id=None, + volume_type_id=fake.VOLUME_TYPE_ID, + status='available', + host=group.host) + volume_id2 = volume2['id'] + self.volume.create_volume(self.context, volume_id2) + + fake_update_grp.return_value = ( + {'status': fields.GroupStatus.AVAILABLE}, + [{'id': volume_id2, 'status': 'available'}], + [{'id': volume_id, 'status': 'available'}]) + + self.volume.update_group(self.context, group, + add_volumes=volume_id2, + remove_volumes=volume_id) + grp = objects.Group.get_by_id(self.context, group.id) + expected = { + 'status': fields.GroupStatus.AVAILABLE, + 'name': 'test_group', + 'availability_zone': 'nova', + 'tenant_id': self.context.project_id, + 'created_at': 'DONTCARE', + 'user_id': fake.USER_ID, + 'group_id': group.id, + 'group_type': fake.GROUP_TYPE_ID + } + self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status) + self.assertEqual(10, len(self.notifier.notifications), + self.notifier.notifications) + msg = self.notifier.notifications[6] + self.assertEqual('group.update.start', msg['event_type']) + self.assertDictMatch(expected, msg['payload']) + msg = self.notifier.notifications[8] + self.assertEqual('group.update.end', msg['event_type']) + self.assertDictMatch(expected, msg['payload']) + grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id) + grpvol_ids = [grpvol['id'] for grpvol in grpvolumes] + # Verify volume is removed. + self.assertNotIn(volume_id, grpvol_ids) + # Verify volume is added. + self.assertIn(volume_id2, grpvol_ids) + + volume3 = tests_utils.create_volume( + self.context, + group_id=None, + host=group.host, + volume_type_id=fake.VOLUME_TYPE_ID, + status='wrong-status') + volume_id3 = volume3['id'] + + volume_get_orig = self.volume.db.volume_get + self.volume.db.volume_get = mock.Mock( + return_value={'status': 'wrong_status', + 'id': volume_id3}) + # Try to add a volume in wrong status + self.assertRaises(exception.InvalidVolume, + self.volume.update_group, + self.context, + group, + add_volumes=volume_id3, + remove_volumes=None) + self.volume.db.volume_get.reset_mock() + self.volume.db.volume_get = volume_get_orig + + @mock.patch.object(driver.VolumeDriver, + "create_group", + return_value={'status': 'available'}) + @mock.patch.object(driver.VolumeDriver, + "delete_group", + return_value=({'status': 'deleted'}, [])) + @mock.patch.object(driver.VolumeDriver, + "create_group_snapshot", + return_value={'status': 'available'}) + @mock.patch.object(driver.VolumeDriver, + "delete_group_snapshot", + return_value=({'status': 'deleted'}, [])) + @mock.patch.object(driver.VolumeDriver, + "create_group_from_src", + return_value=(None, None)) + @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' + 'create_volume_from_snapshot') + @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' + 'create_cloned_volume') + def test_create_group_from_src(self, + mock_create_cloned_vol, + mock_create_vol_from_snap, + mock_create_from_src, + mock_delete_grpsnap, + mock_create_grpsnap, + mock_delete_grp, + mock_create_grp): + """Test group can be created and deleted.""" + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + status=fields.GroupStatus.AVAILABLE, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + volume = tests_utils.create_volume( + self.context, + group_id=group.id, + status='available', + host=group.host, + volume_type_id=fake.VOLUME_TYPE_ID, + size=1) + volume_id = volume['id'] + group_snapshot_returns = self._create_group_snapshot(group.id, + [volume_id]) + group_snapshot = group_snapshot_returns[0] + snapshot_id = group_snapshot_returns[1][0]['id'] + + # Create group from source group snapshot. + group2 = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + group_snapshot_id=group_snapshot.id, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + group2 = objects.Group.get_by_id(self.context, group2.id) + volume2 = tests_utils.create_volume( + self.context, + group_id=group2.id, + snapshot_id=snapshot_id, + status='available', + host=group2.host, + volume_type_id=fake.VOLUME_TYPE_ID) + self.volume.create_volume(self.context, volume2.id, volume=volume2) + self.volume.create_group_from_src( + self.context, group2, group_snapshot=group_snapshot) + grp2 = objects.Group.get_by_id(self.context, group2.id) + expected = { + 'status': fields.GroupStatus.AVAILABLE, + 'name': 'test_group', + 'availability_zone': 'nova', + 'tenant_id': self.context.project_id, + 'created_at': 'DONTCARE', + 'user_id': fake.USER_ID, + 'group_id': group2.id, + 'group_type': fake.GROUP_TYPE_ID, + } + self.assertEqual(fields.GroupStatus.AVAILABLE, grp2.status) + self.assertEqual(group2.id, grp2['id']) + self.assertEqual(group_snapshot.id, grp2['group_snapshot_id']) + self.assertIsNone(grp2['source_group_id']) + + msg = self.notifier.notifications[2] + self.assertEqual('group.create.start', msg['event_type']) + self.assertDictMatch(expected, msg['payload']) + msg = self.notifier.notifications[4] + self.assertEqual('group.create.end', msg['event_type']) + self.assertDictMatch(expected, msg['payload']) + + if len(self.notifier.notifications) > 6: + self.assertFalse(self.notifier.notifications[6], + self.notifier.notifications) + self.assertEqual(6, len(self.notifier.notifications), + self.notifier.notifications) + + self.volume.delete_group(self.context, group2) + + if len(self.notifier.notifications) > 9: + self.assertFalse(self.notifier.notifications[10], + self.notifier.notifications) + self.assertEqual(9, len(self.notifier.notifications), + self.notifier.notifications) + + msg = self.notifier.notifications[6] + self.assertEqual('group.delete.start', msg['event_type']) + expected['status'] = fields.GroupStatus.AVAILABLE + self.assertDictMatch(expected, msg['payload']) + msg = self.notifier.notifications[8] + self.assertEqual('group.delete.end', msg['event_type']) + expected['status'] = fields.GroupStatus.DELETED + self.assertDictMatch(expected, msg['payload']) + + grp2 = objects.Group.get_by_id( + context.get_admin_context(read_deleted='yes'), group2.id) + self.assertEqual(fields.GroupStatus.DELETED, grp2.status) + self.assertRaises(exception.NotFound, + objects.Group.get_by_id, + self.context, + group2.id) + + # Create group from source group + group3 = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + source_group_id=group.id, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + volume3 = tests_utils.create_volume( + self.context, + group_id=group3.id, + source_volid=volume_id, + status='available', + host=group3.host, + volume_type_id=fake.VOLUME_TYPE_ID) + self.volume.create_volume(self.context, volume3.id, volume=volume3) + self.volume.create_group_from_src( + self.context, group3, source_group=group) + + grp3 = objects.Group.get_by_id(self.context, group3.id) + + self.assertEqual(fields.GroupStatus.AVAILABLE, grp3.status) + self.assertEqual(group3.id, grp3.id) + self.assertEqual(group.id, grp3.source_group_id) + self.assertIsNone(grp3.group_snapshot_id) + + self.volume.delete_group_snapshot(self.context, group_snapshot) + self.volume.delete_group(self.context, group) + + def test_sort_snapshots(self): + vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1', + 'snapshot_id': fake.SNAPSHOT_ID, + 'group_id': fake.GROUP_ID} + vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2', + 'snapshot_id': fake.SNAPSHOT2_ID, + 'group_id': fake.GROUP_ID} + vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3', + 'snapshot_id': fake.SNAPSHOT3_ID, + 'group_id': fake.GROUP_ID} + snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', + 'group_snapshot_id': fake.GROUP_ID} + snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', + 'group_snapshot_id': fake.GROUP_ID} + snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3', + 'group_snapshot_id': fake.GROUP_ID} + snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) + snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) + snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3) + volumes = [] + snapshots = [] + volumes.append(vol1) + volumes.append(vol2) + volumes.append(vol3) + snapshots.append(snp2_obj) + snapshots.append(snp3_obj) + snapshots.append(snp1_obj) + i = 0 + for vol in volumes: + snap = snapshots[i] + i += 1 + self.assertNotEqual(vol['snapshot_id'], snap.id) + sorted_snaps = self.volume._sort_snapshots(volumes, snapshots) + i = 0 + for vol in volumes: + snap = sorted_snaps[i] + i += 1 + self.assertEqual(vol['snapshot_id'], snap.id) + + snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID + self.assertRaises(exception.SnapshotNotFound, + self.volume._sort_snapshots, + volumes, snapshots) + + self.assertRaises(exception.InvalidInput, + self.volume._sort_snapshots, + volumes, []) + + def test_sort_source_vols(self): + vol1 = {'id': '1', 'name': 'volume 1', + 'source_volid': '1', + 'group_id': '2'} + vol2 = {'id': '2', 'name': 'volume 2', + 'source_volid': '2', + 'group_id': '2'} + vol3 = {'id': '3', 'name': 'volume 3', + 'source_volid': '3', + 'group_id': '2'} + src_vol1 = {'id': '1', 'name': 'source vol 1', + 'group_id': '1'} + src_vol2 = {'id': '2', 'name': 'source vol 2', + 'group_id': '1'} + src_vol3 = {'id': '3', 'name': 'source vol 3', + 'group_id': '1'} + volumes = [] + src_vols = [] + volumes.append(vol1) + volumes.append(vol2) + volumes.append(vol3) + src_vols.append(src_vol2) + src_vols.append(src_vol3) + src_vols.append(src_vol1) + i = 0 + for vol in volumes: + src_vol = src_vols[i] + i += 1 + self.assertNotEqual(vol['source_volid'], src_vol['id']) + sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols) + i = 0 + for vol in volumes: + src_vol = sorted_src_vols[i] + i += 1 + self.assertEqual(vol['source_volid'], src_vol['id']) + + src_vols[2]['id'] = '9999' + self.assertRaises(exception.VolumeNotFound, + self.volume._sort_source_vols, + volumes, src_vols) + + self.assertRaises(exception.InvalidInput, + self.volume._sort_source_vols, + volumes, []) + + def _create_group_snapshot(self, group_id, volume_ids, size='0'): + """Create a group_snapshot object.""" + grpsnap = objects.GroupSnapshot(self.context) + grpsnap.user_id = fake.USER_ID + grpsnap.project_id = fake.PROJECT_ID + grpsnap.group_id = group_id + grpsnap.status = fields.GroupStatus.CREATING + grpsnap.create() + + # Create snapshot list + for volume_id in volume_ids: + snaps = [] + snap = objects.Snapshot(context.get_admin_context()) + snap.volume_size = size + snap.user_id = fake.USER_ID + snap.project_id = fake.PROJECT_ID + snap.volume_id = volume_id + snap.status = fields.SnapshotStatus.AVAILABLE + snap.group_snapshot_id = grpsnap.id + snap.create() + snaps.append(snap) + + return grpsnap, snaps + + @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') + @mock.patch('cinder.volume.driver.VolumeDriver.create_group', + autospec=True, + return_value={'status': 'available'}) + @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', + autospec=True, + return_value=({'status': 'deleted'}, [])) + @mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot', + autospec=True, + return_value=({'status': 'available'}, [])) + @mock.patch('cinder.volume.driver.VolumeDriver.delete_group_snapshot', + autospec=True, + return_value=({'status': 'deleted'}, [])) + def test_create_delete_group_snapshot(self, + mock_del_grpsnap, + mock_create_grpsnap, + mock_del_grp, + _mock_create_grp, + mock_notify): + """Test group_snapshot can be created and deleted.""" + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + volume = tests_utils.create_volume( + self.context, + group_id=group.id, + host=group.host, + volume_type_id=fake.VOLUME_TYPE_ID) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + + self.assert_notify_called(mock_notify, + (['INFO', 'volume.create.start'], + ['INFO', 'volume.create.end'])) + + group_snapshot_returns = self._create_group_snapshot(group.id, + [volume_id]) + group_snapshot = group_snapshot_returns[0] + self.volume.create_group_snapshot(self.context, group_snapshot) + self.assertEqual(group_snapshot.id, + objects.GroupSnapshot.get_by_id( + context.get_admin_context(), + group_snapshot.id).id) + + self.assert_notify_called(mock_notify, + (['INFO', 'volume.create.start'], + ['INFO', 'volume.create.end'], + ['INFO', 'group_snapshot.create.start'], + ['INFO', 'snapshot.create.start'], + ['INFO', 'group_snapshot.create.end'], + ['INFO', 'snapshot.create.end'])) + + self.volume.delete_group_snapshot(self.context, group_snapshot) + + self.assert_notify_called(mock_notify, + (['INFO', 'volume.create.start'], + ['INFO', 'volume.create.end'], + ['INFO', 'group_snapshot.create.start'], + ['INFO', 'snapshot.create.start'], + ['INFO', 'group_snapshot.create.end'], + ['INFO', 'snapshot.create.end'], + ['INFO', 'group_snapshot.delete.start'], + ['INFO', 'snapshot.delete.start'], + ['INFO', 'group_snapshot.delete.end'], + ['INFO', 'snapshot.delete.end'])) + + grpsnap = objects.GroupSnapshot.get_by_id( + context.get_admin_context(read_deleted='yes'), + group_snapshot.id) + self.assertEqual('deleted', grpsnap.status) + self.assertRaises(exception.NotFound, + objects.GroupSnapshot.get_by_id, + self.context, + group_snapshot.id) + + self.volume.delete_group(self.context, group) + + self.assertTrue(mock_create_grpsnap.called) + self.assertTrue(mock_del_grpsnap.called) + self.assertTrue(mock_del_grp.called) + + @mock.patch('cinder.volume.driver.VolumeDriver.create_group', + return_value={'status': 'available'}) + @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', + return_value=({'status': 'deleted'}, [])) + def test_delete_group_correct_host(self, + mock_del_grp, + _mock_create_grp): + """Test group can be deleted. + + Test group can be deleted when volumes are on + the correct volume node. + """ + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID) + volume = tests_utils.create_volume( + self.context, + group_id=group.id, + host='host1@backend1#pool1', + status='creating', + volume_type_id=fake.VOLUME_TYPE_ID, + size=1) + self.volume.host = 'host1@backend1' + self.volume.create_volume(self.context, volume.id, volume=volume) + + self.volume.delete_group(self.context, group) + grp = objects.Group.get_by_id( + context.get_admin_context(read_deleted='yes'), + group.id) + self.assertEqual(fields.GroupStatus.DELETED, grp.status) + self.assertRaises(exception.NotFound, + objects.Group.get_by_id, + self.context, + group.id) + + self.assertTrue(mock_del_grp.called) + + @mock.patch('cinder.volume.driver.VolumeDriver.create_group', + return_value={'status': 'available'}) + def test_delete_group_wrong_host(self, *_mock_create_grp): + """Test group cannot be deleted. + + Test group cannot be deleted when volumes in the + group are not local to the volume node. + """ + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID) + volume = tests_utils.create_volume( + self.context, + group_id=group.id, + host='host1@backend1#pool1', + status='creating', + volume_type_id=fake.VOLUME_TYPE_ID, + size=1) + self.volume.host = 'host1@backend2' + self.volume.create_volume(self.context, volume.id, volume=volume) + + self.assertRaises(exception.InvalidVolume, + self.volume.delete_group, + self.context, + group) + grp = objects.Group.get_by_id(self.context, group.id) + # Group is not deleted + self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status) + + def test_create_volume_with_group_invalid_type(self): + """Test volume creation with group & invalid volume type.""" + vol_type = db.volume_type_create( + context.get_admin_context(), + dict(name=conf_fixture.def_vol_type, extra_specs={}) + ) + db_vol_type = db.volume_type_get(context.get_admin_context(), + vol_type.id) + + grp = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + status=fields.GroupStatus.AVAILABLE, + volume_type_ids=[db_vol_type['id']], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + + fake_type = { + 'id': '9999', + 'name': 'fake', + } + vol_api = cinder.volume.api.API() + + # Volume type must be provided when creating a volume in a + # group. + self.assertRaises(exception.InvalidInput, + vol_api.create, + self.context, 1, 'vol1', 'volume 1', + group=grp) + + # Volume type must be valid. + self.assertRaises(exception.InvalidInput, + vol_api.create, + self.context, 1, 'vol1', 'volume 1', + volume_type=fake_type, + group=grp) + + @mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot', + autospec=True, + return_value=({'status': 'available'}, [])) + def test_create_group_snapshot_with_bootable_volumes(self, + mock_create_grpsnap): + """Test group_snapshot can be created and deleted.""" + group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type_ids=[fake.VOLUME_TYPE_ID], + group_type_id=fake.GROUP_TYPE_ID, + host=CONF.host) + volume = tests_utils.create_volume( + self.context, + group_id=group.id, + host=group.host, + volume_type_id=fake.VOLUME_TYPE_ID) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + # Create a bootable volume + bootable_vol_params = {'status': 'creating', 'host': CONF.host, + 'size': 1, 'bootable': True} + bootable_vol = tests_utils.create_volume(self.context, + group_id=group.id, + **bootable_vol_params) + # Create a common volume + bootable_vol_id = bootable_vol['id'] + self.volume.create_volume(self.context, bootable_vol_id) + + volume_ids = [volume_id, bootable_vol_id] + group_snapshot_returns = self._create_group_snapshot(group.id, + volume_ids) + group_snapshot = group_snapshot_returns[0] + self.volume.create_group_snapshot(self.context, group_snapshot) + self.assertEqual(group_snapshot.id, + objects.GroupSnapshot.get_by_id( + context.get_admin_context(), + group_snapshot.id).id) + self.assertTrue(mock_create_grpsnap.called) diff --git a/cinder/tests/unit/image/fake.py b/cinder/tests/unit/image/fake.py index c9aa4a999..b89d34cec 100644 --- a/cinder/tests/unit/image/fake.py +++ b/cinder/tests/unit/image/fake.py @@ -237,8 +237,8 @@ def FakeImageService_reset(): _fakeImageService = _FakeImageService() -def stub_out_image_service(stubs): - stubs.Set(cinder.image.glance, 'get_remote_image_service', - lambda x, y: (FakeImageService(), y)) - stubs.Set(cinder.image.glance, 'get_default_image_service', - lambda: FakeImageService()) +def mock_image_service(testcase): + testcase.mock_object(cinder.image.glance, 'get_remote_image_service', + lambda x, y: (FakeImageService(), y)) + testcase.mock_object(cinder.image.glance, 'get_default_image_service', + mock.Mock(side_effect=FakeImageService)) diff --git a/cinder/tests/unit/image/test_glance.py b/cinder/tests/unit/image/test_glance.py index 34f0a0bb7..a1f66b58f 100644 --- a/cinder/tests/unit/image/test_glance.py +++ b/cinder/tests/unit/image/test_glance.py @@ -79,7 +79,7 @@ class TestGlanceImageService(test.TestCase): 1. Glance -> ImageService - This is needed so we can support multiple ImageServices (Glance, Local, etc) - 2. ImageService -> API - This is needed so we can support multple + 2. ImageService -> API - This is needed so we can support multiple APIs (OpenStack, EC2) """ @@ -103,15 +103,14 @@ class TestGlanceImageService(test.TestCase): self.service = self._create_image_service(client) self.context = context.RequestContext('fake', 'fake', auth_token=True) self.context.service_catalog = service_catalog - self.stubs.Set(glance.time, 'sleep', lambda s: None) + self.mock_object(glance.time, 'sleep', mock.Mock(return_value=None)) def _create_image_service(self, client): def _fake_create_glance_client(context, netloc, use_ssl, version): return client - self.stubs.Set(glance, - '_create_glance_client', - _fake_create_glance_client) + self.mock_object(glance, '_create_glance_client', + _fake_create_glance_client) client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) @@ -875,7 +874,7 @@ class TestGlanceImageServiceClient(test.TestCase): def setUp(self): super(TestGlanceImageServiceClient, self).setUp() self.context = context.RequestContext('fake', 'fake', auth_token=True) - self.stubs.Set(glance.time, 'sleep', lambda s: None) + self.mock_object(glance.time, 'sleep', mock.Mock(return_value=None)) def test_create_glance_client(self): self.flags(auth_strategy='keystone') @@ -888,7 +887,7 @@ class TestGlanceImageServiceClient(test.TestCase): self.assertTrue(kwargs['token']) self.assertEqual(60, kwargs['timeout']) - self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) + self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) @@ -904,7 +903,7 @@ class TestGlanceImageServiceClient(test.TestCase): self.assertNotIn('token', kwargs) self.assertEqual(60, kwargs['timeout']) - self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) + self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) @@ -920,7 +919,7 @@ class TestGlanceImageServiceClient(test.TestCase): self.assertTrue(kwargs['token']) self.assertNotIn('timeout', kwargs) - self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) + self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) diff --git a/cinder/tests/unit/keymgr/fake.py b/cinder/tests/unit/keymgr/fake.py index 7612ebafb..000add23b 100644 --- a/cinder/tests/unit/keymgr/fake.py +++ b/cinder/tests/unit/keymgr/fake.py @@ -17,8 +17,8 @@ """Implementation of a fake key manager.""" -from cinder.tests.unit.keymgr import mock_key_mgr +from castellan.tests.unit.key_manager import mock_key_manager -def fake_api(): - return mock_key_mgr.MockKeyManager() +def fake_api(configuration=None): + return mock_key_manager.MockKeyManager(configuration) diff --git a/cinder/tests/unit/keymgr/mock_key_mgr.py b/cinder/tests/unit/keymgr/mock_key_mgr.py deleted file mode 100644 index 39f2caa64..000000000 --- a/cinder/tests/unit/keymgr/mock_key_mgr.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A mock implementation of a key manager that stores keys in a dictionary. - -This key manager implementation is primarily intended for testing. In -particular, it does not store keys persistently. Lack of a centralized key -store also makes this implementation unsuitable for use among different -services. - -Note: Instantiating this class multiple times will create separate key stores. -Keys created in one instance will not be accessible from other instances of -this class. -""" - -import array -import binascii -import uuid - -from cinder import exception -from cinder.keymgr import key -from cinder.keymgr import key_mgr -from cinder.volume import utils - - -class MockKeyManager(key_mgr.KeyManager): - - """Mocking manager for integration tests. - - This mock key manager implementation supports all the methods specified - by the key manager interface. This implementation stores keys within a - dictionary, and as a result, it is not acceptable for use across different - services. Side effects (e.g., raising exceptions) for each method are - handled as specified by the key manager interface. - - This key manager is not suitable for use in production deployments. - """ - - def __init__(self): - self.keys = {} - - def _generate_hex_key(self, **kwargs): - key_length = kwargs.get('key_length', 256) - # hex digit => 4 bits - hex_encoded = utils.generate_password(length=key_length // 4, - symbolgroups='0123456789ABCDEF') - return hex_encoded - - def _generate_key(self, **kwargs): - _hex = self._generate_hex_key(**kwargs) - key_bytes = array.array('B', binascii.unhexlify(_hex)).tolist() - return key.SymmetricKey('AES', key_bytes) - - def create_key(self, ctxt, **kwargs): - """Creates a key. - - This implementation returns a UUID for the created key. A - NotAuthorized exception is raised if the specified context is None. - """ - if ctxt is None: - raise exception.NotAuthorized() - - key = self._generate_key(**kwargs) - return self.store_key(ctxt, key) - - def _generate_key_id(self): - key_id = str(uuid.uuid4()) - while key_id in self.keys: - key_id = str(uuid.uuid4()) - - return key_id - - def store_key(self, ctxt, key, **kwargs): - """Stores (i.e., registers) a key with the key manager.""" - if ctxt is None: - raise exception.NotAuthorized() - - key_id = self._generate_key_id() - self.keys[key_id] = key - - return key_id - - def copy_key(self, ctxt, key_id, **kwargs): - if ctxt is None: - raise exception.NotAuthorized() - - copied_key_id = self._generate_key_id() - self.keys[copied_key_id] = self.keys[key_id] - - return copied_key_id - - def get_key(self, ctxt, key_id, **kwargs): - """Retrieves the key identified by the specified id. - - This implementation returns the key that is associated with the - specified UUID. A NotAuthorized exception is raised if the specified - context is None; a KeyError is raised if the UUID is invalid. - """ - if ctxt is None: - raise exception.NotAuthorized() - - return self.keys[key_id] - - def delete_key(self, ctxt, key_id, **kwargs): - """Deletes the key identified by the specified id. - - A NotAuthorized exception is raised if the context is None and a - KeyError is raised if the UUID is invalid. - """ - if ctxt is None: - raise exception.NotAuthorized() - - del self.keys[key_id] diff --git a/cinder/tests/unit/keymgr/test_barbican.py b/cinder/tests/unit/keymgr/test_barbican.py deleted file mode 100644 index 75dcc1fc2..000000000 --- a/cinder/tests/unit/keymgr/test_barbican.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test cases for the barbican key manager. -""" - -import array -import base64 -import binascii - -import mock -from oslo_config import cfg - -from cinder import exception -from cinder.keymgr import barbican -from cinder.keymgr import key as keymgr_key -from cinder.tests.unit.keymgr import test_key_mgr - -CONF = cfg.CONF -CONF.import_opt('encryption_auth_url', 'cinder.keymgr.key_mgr', group='keymgr') -CONF.import_opt('encryption_api_url', 'cinder.keymgr.key_mgr', group='keymgr') - - -class BarbicanKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): - - def _create_key_manager(self): - return barbican.BarbicanKeyManager() - - def setUp(self): - super(BarbicanKeyManagerTestCase, self).setUp() - - # Create fake auth_token - self.ctxt = mock.Mock() - self.ctxt.auth_token = "fake_token" - self.ctxt.project_id = "fake_project_id" - - # Create mock barbican client - self._build_mock_barbican() - - # Create a key_id, secret_ref, pre_hex, and hex to use - self.key_id = "d152fa13-2b41-42ca-a934-6c21566c0f40" - self.secret_ref = self.key_mgr._create_secret_ref(self.key_id, - self.mock_barbican) - self.pre_hex = "AIDxQp2++uAbKaTVDMXFYIu8PIugJGqkK0JLqkU0rhY=" - self.hex = ("0080f1429dbefae01b29a4d50cc5c5608bbc3c8ba0246aa42b424baa4" - "534ae16") - self.original_api_url = CONF.keymgr.encryption_api_url - self.addCleanup(self._restore) - - def _restore(self): - if hasattr(self, 'original_key'): - keymgr_key.SymmetricKey = self.original_key - if hasattr(self, 'original_base64'): - base64.b64encode = self.original_base64 - if hasattr(self, 'original_api_url'): - CONF.keymgr.encryption_api_url = self.original_api_url - - def _build_mock_barbican(self): - self.mock_barbican = mock.MagicMock(name='mock_barbican') - - # Set commonly used methods - self.get = self.mock_barbican.secrets.get - self.delete = self.mock_barbican.secrets.delete - self.store = self.mock_barbican.secrets.store - self.create = self.mock_barbican.secrets.create - - self.key_mgr._barbican_client = self.mock_barbican - self.key_mgr._current_context = self.ctxt - - def _build_mock_symKey(self): - self.mock_symKey = mock.Mock() - - def fake_sym_key(alg, key): - self.mock_symKey.get_encoded.return_value = key - self.mock_symKey.get_algorithm.return_value = alg - return self.mock_symKey - self.original_key = keymgr_key.SymmetricKey - keymgr_key.SymmetricKey = fake_sym_key - - def _build_mock_base64(self): - - def fake_base64_b64encode(string): - return self.pre_hex - - self.original_base64 = base64.b64encode - base64.b64encode = fake_base64_b64encode - - def test_copy_key(self): - # Create metadata for original secret - original_secret_metadata = mock.Mock() - original_secret_metadata.algorithm = 'fake_algorithm' - original_secret_metadata.bit_length = 'fake_bit_length' - original_secret_metadata.name = 'original_name' - original_secret_metadata.expiration = 'fake_expiration' - original_secret_metadata.mode = 'fake_mode' - content_types = {'default': 'fake_type'} - original_secret_metadata.content_types = content_types - original_secret_data = mock.Mock() - original_secret_metadata.payload = original_secret_data - self.get.return_value = original_secret_metadata - - # Create the mock key - self._build_mock_symKey() - - # Copy the original - self.key_mgr.copy_key(self.ctxt, self.key_id) - - # Assert proper methods were called - self.get.assert_called_once_with(self.secret_ref) - self.create.assert_called_once_with( - original_secret_metadata.name, - self.mock_symKey.get_encoded(), - content_types['default'], - 'base64', - original_secret_metadata.algorithm, - original_secret_metadata.bit_length, - None, - original_secret_metadata.mode, - original_secret_metadata.expiration) - self.create.return_value.store.assert_called_once_with() - - def test_copy_null_context(self): - self.key_mgr._barbican_client = None - self.assertRaises(exception.NotAuthorized, - self.key_mgr.copy_key, None, self.key_id) - - def test_create_key(self): - # Create order_ref_url and assign return value - order_ref_url = ("http://localhost:9311/v1/None/orders/" - "4fe939b7-72bc-49aa-bd1e-e979589858af") - key_order = mock.Mock() - self.mock_barbican.orders.create_key.return_value = key_order - key_order.submit.return_value = order_ref_url - - # Create order and assign return value - order = mock.Mock() - order.secret_ref = self.secret_ref - self.mock_barbican.orders.get.return_value = order - - # Create the key, get the UUID - returned_uuid = self.key_mgr.create_key(self.ctxt) - - self.mock_barbican.orders.get.assert_called_once_with(order_ref_url) - self.assertEqual(self.key_id, returned_uuid) - - def test_create_null_context(self): - self.key_mgr._barbican_client = None - self.assertRaises(exception.NotAuthorized, - self.key_mgr.create_key, None) - - def test_delete_null_context(self): - self.key_mgr._barbican_client = None - self.assertRaises(exception.NotAuthorized, - self.key_mgr.delete_key, None, self.key_id) - - def test_delete_key(self): - self.key_mgr.delete_key(self.ctxt, self.key_id) - self.delete.assert_called_once_with(self.secret_ref) - - def test_delete_unknown_key(self): - self.assertRaises(exception.KeyManagerError, - self.key_mgr.delete_key, self.ctxt, None) - - def test_get_key(self): - self._build_mock_base64() - content_type = 'application/octet-stream' - - key = self.key_mgr.get_key(self.ctxt, self.key_id, content_type) - - self.get.assert_called_once_with(self.secret_ref) - encoded = array.array('B', binascii.unhexlify(self.hex)).tolist() - self.assertEqual(encoded, key.get_encoded()) - - def test_get_null_context(self): - self.key_mgr._barbican_client = None - self.assertRaises(exception.NotAuthorized, - self.key_mgr.get_key, None, self.key_id) - - def test_get_unknown_key(self): - self.assertRaises(exception.KeyManagerError, - self.key_mgr.get_key, self.ctxt, None) - - def test_store_key_base64(self): - # Create Key to store - secret_key = array.array('B', [0x01, 0x02, 0xA0, 0xB3]).tolist() - _key = keymgr_key.SymmetricKey('AES', secret_key) - - # Define the return values - secret = mock.Mock() - self.create.return_value = secret - secret.store.return_value = self.secret_ref - - # Store the Key - returned_uuid = self.key_mgr.store_key(self.ctxt, _key, bit_length=32) - - self.create.assert_called_once_with('Cinder Volume Key', - b'AQKgsw==', - 'application/octet-stream', - 'base64', - 'AES', 32, None, 'CBC', - None) - self.assertEqual(self.key_id, returned_uuid) - - def test_store_key_plaintext(self): - # Create the plaintext key - secret_key_text = "This is a test text key." - _key = keymgr_key.SymmetricKey('AES', secret_key_text) - - # Store the Key - self.key_mgr.store_key(self.ctxt, _key, - payload_content_type='text/plain', - payload_content_encoding=None) - self.create.assert_called_once_with('Cinder Volume Key', - secret_key_text, - 'text/plain', - None, - 'AES', 256, None, 'CBC', - None) - self.create.return_value.store.assert_called_once_with() - - def test_store_null_context(self): - self.key_mgr._barbican_client = None - self.assertRaises(exception.NotAuthorized, - self.key_mgr.store_key, None, None) - - def test_null_project_id(self): - self.key_mgr._barbican_client = None - self.ctxt.project_id = None - self.assertRaises(exception.KeyManagerError, - self.key_mgr.create_key, self.ctxt) - - def test_ctxt_without_project_id(self): - self.key_mgr._barbican_client = None - del self.ctxt.project_id - self.assertRaises(exception.KeyManagerError, - self.key_mgr.create_key, self.ctxt) - - @mock.patch('cinder.keymgr.barbican.identity.v3.Token') - @mock.patch('cinder.keymgr.barbican.session.Session') - @mock.patch('cinder.keymgr.barbican.barbican_client.Client') - def test_ctxt_with_project_id(self, mock_client, mock_session, - mock_token): - # set client to None so that client creation will occur - self.key_mgr._barbican_client = None - - # mock the return values - mock_auth = mock.Mock() - mock_token.return_value = mock_auth - mock_sess = mock.Mock() - mock_session.return_value = mock_sess - - # mock the endpoint - mock_endpoint = mock.Mock() - self.key_mgr._barbican_endpoint = mock_endpoint - - self.key_mgr.create_key(self.ctxt) - - # assert proper calls occurred, including with project_id - mock_token.assert_called_once_with( - auth_url=CONF.keymgr.encryption_auth_url, - token=self.ctxt.auth_token, - project_id=self.ctxt.project_id) - mock_session.assert_called_once_with(auth=mock_auth) - mock_client.assert_called_once_with(session=mock_sess, - endpoint=mock_endpoint) - - def test_parse_barbican_api_url(self): - # assert that the correct format is handled correctly - CONF.keymgr.encryption_api_url = "http://host:port/v1/" - dummy = barbican.BarbicanKeyManager() - self.assertEqual(dummy._barbican_endpoint, "http://host:port") - - # assert that invalid api url formats will raise an exception - CONF.keymgr.encryption_api_url = "http://host:port/" - self.assertRaises(exception.KeyManagerError, - barbican.BarbicanKeyManager) - CONF.keymgr.encryption_api_url = "http://host:port/secrets" - self.assertRaises(exception.KeyManagerError, - barbican.BarbicanKeyManager) diff --git a/cinder/tests/unit/keymgr/test_conf_key_mgr.py b/cinder/tests/unit/keymgr/test_conf_key_mgr.py index 06e3b1f4f..f9669940b 100644 --- a/cinder/tests/unit/keymgr/test_conf_key_mgr.py +++ b/cinder/tests/unit/keymgr/test_conf_key_mgr.py @@ -17,40 +17,40 @@ Test cases for the conf key manager. """ -import array import binascii +from castellan.common.objects import symmetric_key as key from oslo_config import cfg from cinder import context from cinder import exception from cinder.keymgr import conf_key_mgr -from cinder.keymgr import key -from cinder.tests.unit.keymgr import test_key_mgr - +from cinder import test CONF = cfg.CONF -CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') +CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='key_manager') -class ConfKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): +class ConfKeyManagerTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) self._hex_key = '1' * 64 def _create_key_manager(self): - CONF.set_default('fixed_key', default=self._hex_key, group='keymgr') - return conf_key_mgr.ConfKeyManager() + CONF.set_default('fixed_key', default=self._hex_key, + group='key_manager') + return conf_key_mgr.ConfKeyManager(CONF) def setUp(self): super(ConfKeyManagerTestCase, self).setUp() + self.key_mgr = self._create_key_manager() self.ctxt = context.RequestContext('fake', 'fake') self.key_id = '00000000-0000-0000-0000-000000000000' - encoded = array.array('B', binascii.unhexlify(self._hex_key)).tolist() - self.key = key.SymmetricKey('AES', encoded) + encoded = bytes(binascii.unhexlify(self._hex_key)) + self.key = key.SymmetricKey('AES', len(encoded) * 8, encoded) def test___init__(self): self.assertEqual(self.key_id, self.key_mgr.key_id) @@ -65,60 +65,54 @@ class ConfKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): self.assertRaises(exception.NotAuthorized, self.key_mgr.create_key, None) - def test_store_key(self): - key_id = self.key_mgr.store_key(self.ctxt, self.key) + def test_create_key_pair(self): + self.assertRaises(NotImplementedError, + self.key_mgr.create_key_pair, self.ctxt) - actual_key = self.key_mgr.get_key(self.ctxt, key_id) + def test_create_key_pair_null_context(self): + self.assertRaises(NotImplementedError, + self.key_mgr.create_key_pair, None) + + def test_store_key(self): + key_id = self.key_mgr.store(self.ctxt, self.key) + + actual_key = self.key_mgr.get(self.ctxt, key_id) self.assertEqual(self.key, actual_key) def test_store_null_context(self): self.assertRaises(exception.NotAuthorized, - self.key_mgr.store_key, None, self.key) + self.key_mgr.store, None, self.key) def test_store_key_invalid(self): - encoded = self.key.get_encoded() - inverse_key = key.SymmetricKey('AES', [~b for b in encoded]) + encoded = bytes(binascii.unhexlify('0' * 64)) + inverse_key = key.SymmetricKey('AES', len(encoded) * 8, encoded) self.assertRaises(exception.KeyManagerError, - self.key_mgr.store_key, self.ctxt, inverse_key) - - def test_copy_key(self): - key_id = self.key_mgr.create_key(self.ctxt) - key = self.key_mgr.get_key(self.ctxt, key_id) - - copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) - copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) - - self.assertEqual(key_id, copied_key_id) - self.assertEqual(key, copied_key) - - def test_copy_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.copy_key, None, None) + self.key_mgr.store, self.ctxt, inverse_key) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) - self.key_mgr.delete_key(self.ctxt, key_id) + self.key_mgr.delete(self.ctxt, key_id) # cannot delete key -- might have lingering references self.assertEqual(self.key, - self.key_mgr.get_key(self.ctxt, self.key_id)) + self.key_mgr.get(self.ctxt, self.key_id)) def test_delete_null_context(self): self.assertRaises(exception.NotAuthorized, - self.key_mgr.delete_key, None, None) + self.key_mgr.delete, None, None) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, - self.key_mgr.delete_key, self.ctxt, None) + self.key_mgr.delete, self.ctxt, None) def test_get_key(self): self.assertEqual(self.key, - self.key_mgr.get_key(self.ctxt, self.key_id)) + self.key_mgr.get(self.ctxt, self.key_id)) def test_get_null_context(self): self.assertRaises(exception.NotAuthorized, - self.key_mgr.get_key, None, None) + self.key_mgr.get, None, None) def test_get_unknown_key(self): - self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) + self.assertRaises(KeyError, self.key_mgr.get, self.ctxt, None) diff --git a/cinder/tests/unit/keymgr/test_key.py b/cinder/tests/unit/keymgr/test_key.py deleted file mode 100644 index 3430f05a2..000000000 --- a/cinder/tests/unit/keymgr/test_key.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test cases for the key classes. -""" - -from cinder.keymgr import key -from cinder import test - - -class KeyTestCase(test.TestCase): - - def _create_key(self): - raise NotImplementedError() - - def setUp(self): - super(KeyTestCase, self).setUp() - - self.key = self._create_key() - - -class SymmetricKeyTestCase(KeyTestCase): - - def _create_key(self): - return key.SymmetricKey(self.algorithm, self.encoded) - - def setUp(self): - self.algorithm = 'AES' - self.encoded = [0] * 32 - - super(SymmetricKeyTestCase, self).setUp() - - def test_get_algorithm(self): - self.assertEqual(self.algorithm, self.key.get_algorithm()) - - def test_get_format(self): - self.assertEqual('RAW', self.key.get_format()) - - def test_get_encoded(self): - self.assertEqual(self.encoded, self.key.get_encoded()) - - def test___eq__(self): - self.assertTrue(self.key == self.key) - - self.assertFalse(self.key is None) - self.assertFalse(None == self.key) - - def test___ne__(self): - self.assertFalse(self.key != self.key) - - self.assertTrue(self.key is not None) - self.assertTrue(None != self.key) diff --git a/cinder/tests/unit/keymgr/test_mock_key_mgr.py b/cinder/tests/unit/keymgr/test_mock_key_mgr.py deleted file mode 100644 index dfcd8ba15..000000000 --- a/cinder/tests/unit/keymgr/test_mock_key_mgr.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test cases for the mock key manager. -""" - -import array - -from cinder import context -from cinder import exception -from cinder.keymgr import key as keymgr_key -from cinder.tests.unit.keymgr import mock_key_mgr -from cinder.tests.unit.keymgr import test_key_mgr - - -class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): - - def _create_key_manager(self): - return mock_key_mgr.MockKeyManager() - - def setUp(self): - super(MockKeyManagerTestCase, self).setUp() - - self.ctxt = context.RequestContext('fake', 'fake') - - def test_create_key(self): - key_id_1 = self.key_mgr.create_key(self.ctxt) - key_id_2 = self.key_mgr.create_key(self.ctxt) - # ensure that the UUIDs are unique - self.assertNotEqual(key_id_1, key_id_2) - - def test_create_key_with_length(self): - for length in [64, 128, 256]: - key_id = self.key_mgr.create_key(self.ctxt, key_length=length) - key = self.key_mgr.get_key(self.ctxt, key_id) - self.assertEqual(length // 8, len(key.get_encoded())) - - def test_create_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.create_key, None) - - def test_store_key(self): - secret_key = array.array('B', b'\x00' * 32).tolist() - _key = keymgr_key.SymmetricKey('AES', secret_key) - key_id = self.key_mgr.store_key(self.ctxt, _key) - - actual_key = self.key_mgr.get_key(self.ctxt, key_id) - self.assertEqual(_key, actual_key) - - def test_store_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.store_key, None, None) - - def test_copy_key(self): - key_id = self.key_mgr.create_key(self.ctxt) - key = self.key_mgr.get_key(self.ctxt, key_id) - - copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) - copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) - - self.assertNotEqual(key_id, copied_key_id) - self.assertEqual(key, copied_key) - - def test_copy_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.copy_key, None, None) - - def test_get_key(self): - pass - - def test_get_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.get_key, None, None) - - def test_get_unknown_key(self): - self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) - - def test_delete_key(self): - key_id = self.key_mgr.create_key(self.ctxt) - self.key_mgr.delete_key(self.ctxt, key_id) - - self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id) - - def test_delete_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.delete_key, None, None) - - def test_delete_unknown_key(self): - self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None) diff --git a/cinder/tests/unit/keymgr/test_not_implemented_key_mgr.py b/cinder/tests/unit/keymgr/test_not_implemented_key_mgr.py deleted file mode 100644 index 6f2896b13..000000000 --- a/cinder/tests/unit/keymgr/test_not_implemented_key_mgr.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test cases for the not implemented key manager. -""" - -from cinder.keymgr import not_implemented_key_mgr -from cinder.tests.unit.keymgr import test_key_mgr - - -class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): - - def _create_key_manager(self): - return not_implemented_key_mgr.NotImplementedKeyManager() - - def setUp(self): - super(NotImplementedKeyManagerTestCase, self).setUp() - - def test_create_key(self): - self.assertRaises(NotImplementedError, - self.key_mgr.create_key, None) - - def test_store_key(self): - self.assertRaises(NotImplementedError, - self.key_mgr.store_key, None, None) - - def test_copy_key(self): - self.assertRaises(NotImplementedError, - self.key_mgr.copy_key, None, None) - - def test_get_key(self): - self.assertRaises(NotImplementedError, - self.key_mgr.get_key, None, None) - - def test_delete_key(self): - self.assertRaises(NotImplementedError, - self.key_mgr.delete_key, None, None) diff --git a/cinder/tests/unit/objects/__init__.py b/cinder/tests/unit/objects/__init__.py index 7a5066ca9..3e1886d30 100644 --- a/cinder/tests/unit/objects/__init__.py +++ b/cinder/tests/unit/objects/__init__.py @@ -45,11 +45,12 @@ class BaseObjectsTestCase(test.TestCase): # base class" error continue - if field in ('modified_at', 'created_at', - 'updated_at', 'deleted_at') and db[field]: + obj_field = getattr(obj, field) + if field in ('modified_at', 'created_at', 'updated_at', + 'deleted_at', 'last_heartbeat') and db[field]: test.assertEqual(db[field], - timeutils.normalize_time(obj[field])) - elif isinstance(obj[field], obj_base.ObjectListBase): - test.assertEqual(db[field], obj[field].objects) + timeutils.normalize_time(obj_field)) + elif isinstance(obj_field, obj_base.ObjectListBase): + test.assertEqual(db[field], obj_field.objects) else: - test.assertEqual(db[field], obj[field]) + test.assertEqual(db[field], obj_field) diff --git a/cinder/tests/unit/objects/test_backup.py b/cinder/tests/unit/objects/test_backup.py index 037bbf24c..12c633ef5 100644 --- a/cinder/tests/unit/objects/test_backup.py +++ b/cinder/tests/unit/objects/test_backup.py @@ -13,6 +13,8 @@ # under the License. import mock +from oslo_utils import timeutils +import pytz import six from cinder.db.sqlalchemy import models @@ -77,13 +79,22 @@ class TestBackup(test_objects.BaseObjectsTestCase): backup_update.assert_called_once_with(self.context, backup.id, {'display_name': 'foobar'}) - @mock.patch('cinder.db.backup_destroy') - def test_destroy(self, backup_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.backup_destroy') + def test_destroy(self, backup_destroy, utcnow_mock): + backup_destroy.return_value = { + 'status': fields.BackupStatus.DELETED, + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} backup = objects.Backup(context=self.context, id=fake.BACKUP_ID) backup.destroy() self.assertTrue(backup_destroy.called) admin_context = backup_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) + self.assertTrue(backup.deleted) + self.assertEqual(fields.BackupStatus.DELETED, backup.status) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + backup.deleted_at) def test_obj_field_temp_volume_snapshot_id(self): backup = objects.Backup(context=self.context, diff --git a/cinder/tests/unit/objects/test_base.py b/cinder/tests/unit/objects/test_base.py index 072acaf20..993983081 100644 --- a/cinder/tests/unit/objects/test_base.py +++ b/cinder/tests/unit/objects/test_base.py @@ -31,6 +31,32 @@ from cinder.tests.unit import fake_objects from cinder.tests.unit import objects as test_objects +class TestCinderObjectVersionHistory(test_objects.BaseObjectsTestCase): + def test_add(self): + history = test_objects.obj_base.CinderObjectVersionsHistory() + v10 = {'Backup': '2.0'} + v11 = {'Backup': '2.1'} + history.add('1.0', v10) + history.add('1.1', v11) + # We have 3 elements because we have the liberty version by default + self.assertEqual(2 + 1, len(history)) + + expected_v10 = history['liberty'].copy() + expected_v10.update(v10) + expected_v11 = history['liberty'].copy() + expected_v11.update(v11) + + self.assertEqual('1.1', history.get_current()) + self.assertEqual(expected_v11, history.get_current_versions()) + self.assertEqual(expected_v10, history['1.0']) + + def test_add_existing(self): + history = test_objects.obj_base.CinderObjectVersionsHistory() + history.add('1.0', {'Backup': '1.0'}) + self.assertRaises(exception.ProgrammingError, + history.add, '1.0', {'Backup': '1.0'}) + + class TestCinderObject(test_objects.BaseObjectsTestCase): """Tests methods from CinderObject.""" @@ -94,6 +120,23 @@ class TestCinderObject(test_objects.BaseObjectsTestCase): test_obj.refresh() self._compare(self, refresh_obj, test_obj) + @mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id') + def test_refresh_readonly(self, get_by_id_mock): + @objects.base.CinderObjectRegistry.register_if(False) + class MyTestObject(objects.base.CinderObject, + objects.base.CinderObjectDictCompat, + objects.base.CinderComparableObject, + objects.base.CinderPersistentObject): + fields = {'id': fields.UUIDField(), + 'name': fields.StringField(read_only=True)} + + test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo') + refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar') + get_by_id_mock.return_value = refresh_obj + + test_obj.refresh() + self._compare(self, refresh_obj, test_obj) + def test_refresh_no_id_field(self): @objects.base.CinderObjectRegistry.register_if(False) class MyTestObjectNoId(objects.base.CinderObject, @@ -655,7 +698,7 @@ class TestCinderObjectConditionalUpdate(test.TestCase): # is not relevant). self.assertEqual(1, update.call_count) arg = update.call_args[0][0] - self.assertTrue(isinstance(arg, dict)) + self.assertIsInstance(arg, dict) self.assertEqual(set(values.keys()), set(arg.keys())) def test_conditional_update_multitable_fail(self): @@ -700,9 +743,9 @@ class TestCinderDictObject(test_objects.BaseObjectsTestCase): def test_dict_objects(self): obj = self.TestDictObject() - self.assertIsNone(obj.get('non_existing')) + self.assertNotIn('non_existing', obj) self.assertEqual('val', obj.get('abc', 'val')) - self.assertIsNone(obj.get('abc')) + self.assertNotIn('abc', obj) obj.abc = 'val2' self.assertEqual('val2', obj.get('abc', 'val')) self.assertEqual(42, obj.get('foo')) diff --git a/cinder/tests/unit/objects/test_cgsnapshot.py b/cinder/tests/unit/objects/test_cgsnapshot.py index 7d7d09dbc..42696122b 100644 --- a/cinder/tests/unit/objects/test_cgsnapshot.py +++ b/cinder/tests/unit/objects/test_cgsnapshot.py @@ -13,6 +13,8 @@ # under the License. import mock +from oslo_utils import timeutils +import pytz import six from cinder import exception @@ -81,14 +83,23 @@ class TestCGSnapshot(test_objects.BaseObjectsTestCase): cgsnapshot.obj_get_changes()) self.assertRaises(exception.ObjectActionError, cgsnapshot.save) - @mock.patch('cinder.db.cgsnapshot_destroy') - def test_destroy(self, cgsnapshot_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_destroy') + def test_destroy(self, cgsnapshot_destroy, utcnow_mock): + cgsnapshot_destroy.return_value = { + 'status': 'deleted', + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} cgsnapshot = objects.CGSnapshot(context=self.context, id=fake.CGSNAPSHOT_ID) cgsnapshot.destroy() self.assertTrue(cgsnapshot_destroy.called) admin_context = cgsnapshot_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) + self.assertTrue(cgsnapshot.deleted) + self.assertEqual('deleted', cgsnapshot.status) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + cgsnapshot.deleted_at) @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') diff --git a/cinder/tests/unit/objects/test_cluster.py b/cinder/tests/unit/objects/test_cluster.py new file mode 100644 index 000000000..ca0bb3225 --- /dev/null +++ b/cinder/tests/unit/objects/test_cluster.py @@ -0,0 +1,135 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import timeutils + +from cinder import objects +from cinder.tests.unit import fake_cluster +from cinder.tests.unit import objects as test_objects +from cinder import utils + + +def _get_filters_sentinel(): + return {'session': mock.sentinel.session, + 'name_match_level': mock.sentinel.name_match_level, + 'read_deleted': mock.sentinel.read_deleted, + 'get_services': mock.sentinel.get_services, + 'services_summary': mock.sentinel.services_summary, + 'name': mock.sentinel.name, + 'binary': mock.sentinel.binary, + 'is_up': mock.sentinel.is_up, + 'disabled': mock.sentinel.disabled, + 'disabled_reason': mock.sentinel.disabled_reason, + 'race_preventer': mock.sentinel.race_preventer, + 'last_heartbeat': mock.sentinel.last_heartbeat, + 'num_hosts': mock.sentinel.num_hosts, + 'num_down_hosts': mock.sentinel.num_down_hosts} + + +class TestCluster(test_objects.BaseObjectsTestCase): + """Test Cluster Versioned Object methods.""" + cluster = fake_cluster.fake_cluster_orm() + + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster) + def test_get_by_id(self, cluster_get_mock): + filters = _get_filters_sentinel() + cluster = objects.Cluster.get_by_id(self.context, + mock.sentinel.cluster_id, + **filters) + self.assertIsInstance(cluster, objects.Cluster) + self._compare(self, self.cluster, cluster) + cluster_get_mock.assert_called_once_with(self.context, + mock.sentinel.cluster_id, + **filters) + + @mock.patch('cinder.db.sqlalchemy.api.cluster_create', + return_value=cluster) + def test_create(self, cluster_create_mock): + cluster = objects.Cluster(context=self.context, name='cluster_name') + cluster.create() + self.assertEqual(self.cluster.id, cluster.id) + cluster_create_mock.assert_called_once_with(self.context, + {'name': 'cluster_name'}) + + @mock.patch('cinder.db.sqlalchemy.api.cluster_update', + return_value=cluster) + def test_save(self, cluster_update_mock): + cluster = fake_cluster.fake_cluster_ovo(self.context) + cluster.disabled = True + cluster.save() + cluster_update_mock.assert_called_once_with(self.context, cluster.id, + {'disabled': True}) + + @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy') + def test_destroy(self, cluster_destroy_mock): + cluster = fake_cluster.fake_cluster_ovo(self.context) + cluster.destroy() + cluster_destroy_mock.assert_called_once_with(mock.ANY, cluster.id) + + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster) + def test_refresh(self, cluster_get_mock): + cluster = fake_cluster.fake_cluster_ovo(self.context) + cluster.refresh() + cluster_get_mock.assert_called_once_with(self.context, cluster.id) + + def test_is_up_no_last_hearbeat(self): + cluster = fake_cluster.fake_cluster_ovo(self.context, + last_heartbeat=None) + self.assertFalse(cluster.is_up()) + + def test_is_up(self): + cluster = fake_cluster.fake_cluster_ovo( + self.context, + last_heartbeat=timeutils.utcnow(with_timezone=True)) + self.assertTrue(cluster.is_up()) + + def test_is_up_limit(self): + limit_expired = (utils.service_expired_time(True) + + timeutils.datetime.timedelta(seconds=1)) + cluster = fake_cluster.fake_cluster_ovo(self.context, + last_heartbeat=limit_expired) + self.assertTrue(cluster.is_up()) + + def test_is_up_down(self): + expired_time = (utils.service_expired_time(True) - + timeutils.datetime.timedelta(seconds=1)) + cluster = fake_cluster.fake_cluster_ovo(self.context, + last_heartbeat=expired_time) + self.assertFalse(cluster.is_up()) + + +class TestClusterList(test_objects.BaseObjectsTestCase): + """Test ClusterList Versioned Object methods.""" + + @mock.patch('cinder.db.sqlalchemy.api.cluster_get_all') + def test_cluster_get_all(self, cluster_get_all_mock): + orm_values = [ + fake_cluster.fake_cluster_orm(), + fake_cluster.fake_cluster_orm(id=2, name='cluster_name2'), + ] + cluster_get_all_mock.return_value = orm_values + filters = _get_filters_sentinel() + + result = objects.ClusterList.get_all(self.context, **filters) + + cluster_get_all_mock.assert_called_once_with( + self.context, filters.pop('is_up'), filters.pop('get_services'), + filters.pop('services_summary'), filters.pop('read_deleted'), + filters.pop('name_match_level'), **filters) + self.assertEqual(2, len(result)) + for i in range(len(result)): + self.assertIsInstance(result[i], objects.Cluster) + self._compare(self, orm_values[i], result[i]) diff --git a/cinder/tests/unit/objects/test_consistencygroup.py b/cinder/tests/unit/objects/test_consistencygroup.py index b5577aa9f..6e7e7ff07 100644 --- a/cinder/tests/unit/objects/test_consistencygroup.py +++ b/cinder/tests/unit/objects/test_consistencygroup.py @@ -13,6 +13,8 @@ # under the License. import mock +from oslo_utils import timeutils +import pytz import six from cinder import exception @@ -146,14 +148,24 @@ class TestConsistencyGroup(test_objects.BaseObjectsTestCase): mock_vol_get_all_by_group.assert_called_once_with(self.context, consistencygroup.id) - @mock.patch('cinder.db.consistencygroup_destroy') - def test_destroy(self, consistencygroup_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_destroy') + def test_destroy(self, consistencygroup_destroy, utcnow_mock): + consistencygroup_destroy.return_value = { + 'status': fields.ConsistencyGroupStatus.DELETED, + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.CONSISTENCY_GROUP_ID) consistencygroup.destroy() self.assertTrue(consistencygroup_destroy.called) admin_context = consistencygroup_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) + self.assertTrue(consistencygroup.deleted) + self.assertEqual(fields.ConsistencyGroupStatus.DELETED, + consistencygroup.status) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + consistencygroup.deleted_at) @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get') def test_refresh(self, consistencygroup_get): @@ -245,3 +257,23 @@ class TestConsistencyGroupList(test_objects.BaseObjectsTestCase): limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) + + @mock.patch('cinder.db.consistencygroup_include_in_cluster') + def test_include_in_cluster(self, include_mock): + filters = {'host': mock.sentinel.host, + 'cluster_name': mock.sentinel.cluster_name} + cluster = 'new_cluster' + objects.ConsistencyGroupList.include_in_cluster(self.context, cluster, + **filters) + include_mock.assert_called_once_with(self.context, cluster, True, + **filters) + + @mock.patch('cinder.db.consistencygroup_include_in_cluster') + def test_include_in_cluster_specify_partial(self, include_mock): + filters = {'host': mock.sentinel.host, + 'cluster_name': mock.sentinel.cluster_name} + cluster = 'new_cluster' + objects.ConsistencyGroupList.include_in_cluster( + self.context, cluster, mock.sentinel.partial_rename, **filters) + include_mock.assert_called_once_with( + self.context, cluster, mock.sentinel.partial_rename, **filters) diff --git a/cinder/tests/unit/objects/test_group.py b/cinder/tests/unit/objects/test_group.py new file mode 100644 index 000000000..fff36e207 --- /dev/null +++ b/cinder/tests/unit/objects/test_group.py @@ -0,0 +1,207 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import six + +from cinder import exception +from cinder import objects +from cinder.objects import fields +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_volume +from cinder.tests.unit import objects as test_objects + +fake_group = { + 'id': fake.GROUP_ID, + 'user_id': fake.USER_ID, + 'project_id': fake.PROJECT_ID, + 'host': 'fake_host', + 'availability_zone': 'fake_az', + 'name': 'fake_name', + 'description': 'fake_description', + 'group_type_id': fake.GROUP_TYPE_ID, + 'status': fields.GroupStatus.CREATING, +} + + +class TestGroup(test_objects.BaseObjectsTestCase): + + @mock.patch('cinder.db.sqlalchemy.api.group_get', + return_value=fake_group) + def test_get_by_id(self, group_get): + group = objects.Group.get_by_id( + self.context, fake.GROUP_ID) + self._compare(self, fake_group, group) + group_get.assert_called_once_with( + self.context, fake.GROUP_ID) + + @mock.patch('cinder.db.sqlalchemy.api.model_query') + def test_get_by_id_no_existing_id(self, model_query): + model_query().filter_by().first.return_value = None + self.assertRaises(exception.GroupNotFound, + objects.Group.get_by_id, self.context, + 123) + + @mock.patch('cinder.db.group_create', + return_value=fake_group) + def test_create(self, group_create): + fake_grp = fake_group.copy() + del fake_grp['id'] + group = objects.Group(context=self.context, + **fake_grp) + group.create() + self._compare(self, fake_group, group) + + def test_create_with_id_except_exception(self, ): + group = objects.Group( + context=self.context, **{'id': fake.GROUP_ID}) + self.assertRaises(exception.ObjectActionError, group.create) + + @mock.patch('cinder.db.group_update') + def test_save(self, group_update): + group = objects.Group._from_db_object( + self.context, objects.Group(), fake_group) + group.status = fields.GroupStatus.AVAILABLE + group.save() + group_update.assert_called_once_with( + self.context, + group.id, + {'status': fields.GroupStatus.AVAILABLE}) + + def test_save_with_volumes(self): + group = objects.Group._from_db_object( + self.context, objects.Group(), fake_group) + volumes_objs = [objects.Volume(context=self.context, id=i) + for i in [fake.VOLUME_ID, fake.VOLUME2_ID, + fake.VOLUME3_ID]] + volumes = objects.VolumeList(objects=volumes_objs) + group.name = 'foobar' + group.volumes = volumes + self.assertEqual({'name': 'foobar', + 'volumes': volumes}, + group.obj_get_changes()) + self.assertRaises(exception.ObjectActionError, group.save) + + @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') + @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') + def test_obj_load_attr(self, mock_vol_get_all_by_group, + mock_vol_type_get_all_by_group): + group = objects.Group._from_db_object( + self.context, objects.Group(), fake_group) + + # Test volumes lazy-loaded field + volume_objs = [objects.Volume(context=self.context, id=i) + for i in [fake.VOLUME_ID, fake.VOLUME2_ID, + fake.VOLUME3_ID]] + volumes = objects.VolumeList(context=self.context, objects=volume_objs) + mock_vol_get_all_by_group.return_value = volumes + self.assertEqual(volumes, group.volumes) + mock_vol_get_all_by_group.assert_called_once_with(self.context, + group.id) + + @mock.patch('cinder.db.group_destroy') + def test_destroy(self, group_destroy): + group = objects.Group( + context=self.context, id=fake.GROUP_ID) + group.destroy() + self.assertTrue(group_destroy.called) + admin_context = group_destroy.call_args[0][0] + self.assertTrue(admin_context.is_admin) + + @mock.patch('cinder.db.sqlalchemy.api.group_get') + def test_refresh(self, group_get): + db_group1 = fake_group.copy() + db_group2 = db_group1.copy() + db_group2['description'] = 'foobar' + + # On the second group_get, return the Group with + # an updated description + group_get.side_effect = [db_group1, db_group2] + group = objects.Group.get_by_id(self.context, + fake.GROUP_ID) + self._compare(self, db_group1, group) + + # description was updated, so a Group refresh should have a + # new value for that field + group.refresh() + self._compare(self, db_group2, group) + if six.PY3: + call_bool = mock.call.__bool__() + else: + call_bool = mock.call.__nonzero__() + group_get.assert_has_calls([ + mock.call( + self.context, + fake.GROUP_ID), + call_bool, + mock.call( + self.context, + fake.GROUP_ID)]) + + def test_from_db_object_with_all_expected_attributes(self): + expected_attrs = ['volumes'] + db_volumes = [fake_volume.fake_db_volume(admin_metadata={}, + volume_metadata={})] + db_group = fake_group.copy() + db_group['volumes'] = db_volumes + group = objects.Group._from_db_object( + self.context, objects.Group(), db_group, expected_attrs) + self.assertEqual(len(db_volumes), len(group.volumes)) + self._compare(self, db_volumes[0], group.volumes[0]) + + +class TestGroupList(test_objects.BaseObjectsTestCase): + @mock.patch('cinder.db.group_get_all', + return_value=[fake_group]) + def test_get_all(self, group_get_all): + groups = objects.GroupList.get_all(self.context) + self.assertEqual(1, len(groups)) + TestGroup._compare(self, fake_group, + groups[0]) + + @mock.patch('cinder.db.group_get_all_by_project', + return_value=[fake_group]) + def test_get_all_by_project(self, group_get_all_by_project): + groups = objects.GroupList.get_all_by_project( + self.context, self.project_id) + self.assertEqual(1, len(groups)) + TestGroup._compare(self, fake_group, + groups[0]) + + @mock.patch('cinder.db.group_get_all', + return_value=[fake_group]) + def test_get_all_with_pagination(self, group_get_all): + groups = objects.GroupList.get_all( + self.context, filters={'id': 'fake'}, marker=None, limit=1, + offset=None, sort_keys='id', sort_dirs='asc') + self.assertEqual(1, len(groups)) + group_get_all.assert_called_once_with( + self.context, filters={'id': 'fake'}, marker=None, limit=1, + offset=None, sort_keys='id', sort_dirs='asc') + TestGroup._compare(self, fake_group, + groups[0]) + + @mock.patch('cinder.db.group_get_all_by_project', + return_value=[fake_group]) + def test_get_all_by_project_with_pagination( + self, group_get_all_by_project): + groups = objects.GroupList.get_all_by_project( + self.context, self.project_id, filters={'id': 'fake'}, marker=None, + limit=1, offset=None, sort_keys='id', sort_dirs='asc') + self.assertEqual(1, len(groups)) + group_get_all_by_project.assert_called_once_with( + self.context, self.project_id, filters={'id': 'fake'}, marker=None, + limit=1, offset=None, sort_keys='id', sort_dirs='asc') + TestGroup._compare(self, fake_group, + groups[0]) diff --git a/cinder/tests/unit/objects/test_group_snapshot.py b/cinder/tests/unit/objects/test_group_snapshot.py new file mode 100644 index 000000000..1f1416e1e --- /dev/null +++ b/cinder/tests/unit/objects/test_group_snapshot.py @@ -0,0 +1,187 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import timeutils +import pytz +import six + +from cinder import exception +from cinder import objects +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import objects as test_objects +from cinder.tests.unit.objects.test_group import fake_group + +fake_group_snapshot = { + 'id': fake.GROUP_SNAPSHOT_ID, + 'user_id': fake.USER_ID, + 'project_id': fake.PROJECT_ID, + 'name': 'fake_name', + 'description': 'fake_description', + 'status': 'creating', + 'group_id': fake.GROUP_ID, +} + + +class TestGroupSnapshot(test_objects.BaseObjectsTestCase): + + @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get', + return_value=fake_group_snapshot) + def test_get_by_id(self, group_snapshot_get): + group_snapshot = objects.GroupSnapshot.get_by_id( + self.context, + fake.GROUP_SNAPSHOT_ID) + self._compare(self, fake_group_snapshot, group_snapshot) + + @mock.patch('cinder.db.group_snapshot_create', + return_value=fake_group_snapshot) + def test_create(self, group_snapshot_create): + fake_group_snap = fake_group_snapshot.copy() + del fake_group_snap['id'] + group_snapshot = objects.GroupSnapshot(context=self.context, + **fake_group_snap) + group_snapshot.create() + self._compare(self, fake_group_snapshot, group_snapshot) + + def test_create_with_id_except_exception(self): + group_snapshot = objects.GroupSnapshot( + context=self.context, + **{'id': fake.GROUP_ID}) + self.assertRaises(exception.ObjectActionError, group_snapshot.create) + + @mock.patch('cinder.db.group_snapshot_update') + def test_save(self, group_snapshot_update): + group_snapshot = objects.GroupSnapshot._from_db_object( + self.context, objects.GroupSnapshot(), fake_group_snapshot) + group_snapshot.status = 'active' + group_snapshot.save() + group_snapshot_update.assert_called_once_with(self.context, + group_snapshot.id, + {'status': 'active'}) + + @mock.patch('cinder.db.group_update', + return_value=fake_group) + @mock.patch('cinder.db.group_snapshot_update') + def test_save_with_group(self, group_snapshot_update, + group_snapshot_cg_update): + group = objects.Group._from_db_object( + self.context, objects.Group(), fake_group) + group_snapshot = objects.GroupSnapshot._from_db_object( + self.context, objects.GroupSnapshot(), fake_group_snapshot) + group_snapshot.name = 'foobar' + group_snapshot.group = group + self.assertEqual({'name': 'foobar', + 'group': group}, + group_snapshot.obj_get_changes()) + self.assertRaises(exception.ObjectActionError, group_snapshot.save) + + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_destroy') + def test_destroy(self, group_snapshot_destroy, utcnow_mock): + group_snapshot_destroy.return_value = { + 'status': 'deleted', + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} + group_snapshot = objects.GroupSnapshot(context=self.context, + id=fake.GROUP_SNAPSHOT_ID) + group_snapshot.destroy() + self.assertTrue(group_snapshot_destroy.called) + admin_context = group_snapshot_destroy.call_args[0][0] + self.assertTrue(admin_context.is_admin) + self.assertTrue(group_snapshot.deleted) + self.assertEqual('deleted', group_snapshot.status) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + group_snapshot.deleted_at) + + @mock.patch('cinder.objects.group.Group.get_by_id') + @mock.patch( + 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') + def test_obj_load_attr(self, snapshotlist_get_for_cgs, + group_get_by_id): + group_snapshot = objects.GroupSnapshot._from_db_object( + self.context, objects.GroupSnapshot(), fake_group_snapshot) + # Test group lazy-loaded field + group = objects.Group( + context=self.context, id=fake.GROUP_ID) + group_get_by_id.return_value = group + self.assertEqual(group, group_snapshot.group) + group_get_by_id.assert_called_once_with( + self.context, group_snapshot.group_id) + # Test snapshots lazy-loaded field + snapshots_objs = [objects.Snapshot(context=self.context, id=i) + for i in [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, + fake.SNAPSHOT3_ID]] + snapshots = objects.SnapshotList(context=self.context, + objects=snapshots_objs) + snapshotlist_get_for_cgs.return_value = snapshots + self.assertEqual(snapshots, group_snapshot.snapshots) + snapshotlist_get_for_cgs.assert_called_once_with( + self.context, group_snapshot.id) + + @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get') + def test_refresh(self, group_snapshot_get): + db_group_snapshot1 = fake_group_snapshot.copy() + db_group_snapshot2 = db_group_snapshot1.copy() + db_group_snapshot2['description'] = 'foobar' + + # On the second group_snapshot_get, return the GroupSnapshot with an + # updated description + group_snapshot_get.side_effect = [db_group_snapshot1, + db_group_snapshot2] + group_snapshot = objects.GroupSnapshot.get_by_id( + self.context, fake.GROUP_SNAPSHOT_ID) + self._compare(self, db_group_snapshot1, group_snapshot) + + # description was updated, so a GroupSnapshot refresh should have a new + # value for that field + group_snapshot.refresh() + self._compare(self, db_group_snapshot2, group_snapshot) + if six.PY3: + call_bool = mock.call.__bool__() + else: + call_bool = mock.call.__nonzero__() + group_snapshot_get.assert_has_calls( + [mock.call(self.context, + fake.GROUP_SNAPSHOT_ID), + call_bool, + mock.call(self.context, + fake.GROUP_SNAPSHOT_ID)]) + + +class TestGroupSnapshotList(test_objects.BaseObjectsTestCase): + @mock.patch('cinder.db.group_snapshot_get_all', + return_value=[fake_group_snapshot]) + def test_get_all(self, group_snapshot_get_all): + group_snapshots = objects.GroupSnapshotList.get_all(self.context) + self.assertEqual(1, len(group_snapshots)) + TestGroupSnapshot._compare(self, fake_group_snapshot, + group_snapshots[0]) + + @mock.patch('cinder.db.group_snapshot_get_all_by_project', + return_value=[fake_group_snapshot]) + def test_get_all_by_project(self, group_snapshot_get_all_by_project): + group_snapshots = objects.GroupSnapshotList.get_all_by_project( + self.context, self.project_id) + self.assertEqual(1, len(group_snapshots)) + TestGroupSnapshot._compare(self, fake_group_snapshot, + group_snapshots[0]) + + @mock.patch('cinder.db.group_snapshot_get_all_by_group', + return_value=[fake_group_snapshot]) + def test_get_all_by_group(self, group_snapshot_get_all_by_group): + group_snapshots = objects.GroupSnapshotList.get_all_by_group( + self.context, self.project_id) + self.assertEqual(1, len(group_snapshots)) + TestGroupSnapshot._compare(self, fake_group_snapshot, + group_snapshots[0]) diff --git a/cinder/tests/unit/objects/test_group_type.py b/cinder/tests/unit/objects/test_group_type.py new file mode 100644 index 000000000..71c1877e9 --- /dev/null +++ b/cinder/tests/unit/objects/test_group_type.py @@ -0,0 +1,127 @@ +# Copyright 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import six + +from cinder import objects +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_group +from cinder.tests.unit import objects as test_objects + + +class TestGroupType(test_objects.BaseObjectsTestCase): + + @mock.patch('cinder.db.sqlalchemy.api._group_type_get_full') + def test_get_by_id(self, group_type_get): + db_group_type = fake_group.fake_db_group_type() + group_type_get.return_value = db_group_type + group_type = objects.GroupType.get_by_id(self.context, + fake.GROUP_TYPE_ID) + self._compare(self, db_group_type, group_type) + + @mock.patch('cinder.volume.group_types.create') + def test_create(self, group_type_create): + db_group_type = fake_group.fake_db_group_type() + group_type_create.return_value = db_group_type + + group_type = objects.GroupType(context=self.context) + group_type.name = db_group_type['name'] + group_type.group_specs = db_group_type['group_specs'] + group_type.is_public = db_group_type['is_public'] + group_type.projects = db_group_type['projects'] + group_type.description = db_group_type['description'] + group_type.create() + + group_type_create.assert_called_once_with( + self.context, db_group_type['name'], + db_group_type['group_specs'], db_group_type['is_public'], + db_group_type['projects'], db_group_type['description']) + + @mock.patch('cinder.volume.group_types.update') + def test_save(self, group_type_update): + db_group_type = fake_group.fake_db_group_type() + group_type = objects.GroupType._from_db_object(self.context, + objects.GroupType(), + db_group_type) + group_type.description = 'foobar' + group_type.save() + group_type_update.assert_called_once_with(self.context, + group_type.id, + group_type.name, + group_type.description) + + @mock.patch('cinder.volume.group_types.destroy') + def test_destroy(self, group_type_destroy): + db_group_type = fake_group.fake_db_group_type() + group_type = objects.GroupType._from_db_object(self.context, + objects.GroupType(), + db_group_type) + group_type.destroy() + self.assertTrue(group_type_destroy.called) + admin_context = group_type_destroy.call_args[0][0] + self.assertTrue(admin_context.is_admin) + + @mock.patch('cinder.db.sqlalchemy.api._group_type_get_full') + def test_refresh(self, group_type_get): + db_type1 = fake_group.fake_db_group_type() + db_type2 = db_type1.copy() + db_type2['description'] = 'foobar' + + # updated description + group_type_get.side_effect = [db_type1, db_type2] + group_type = objects.GroupType.get_by_id(self.context, + fake.GROUP_TYPE_ID) + self._compare(self, db_type1, group_type) + + # description was updated, so a group type refresh should have a new + # value for that field + group_type.refresh() + self._compare(self, db_type2, group_type) + if six.PY3: + call_bool = mock.call.__bool__() + else: + call_bool = mock.call.__nonzero__() + group_type_get.assert_has_calls([mock.call(self.context, + fake.GROUP_TYPE_ID), + call_bool, + mock.call(self.context, + fake.GROUP_TYPE_ID)]) + + +class TestGroupTypeList(test_objects.BaseObjectsTestCase): + @mock.patch('cinder.volume.group_types.get_all_group_types') + def test_get_all(self, get_all_types): + db_group_type = fake_group.fake_db_group_type() + get_all_types.return_value = {db_group_type['name']: db_group_type} + + group_types = objects.GroupTypeList.get_all(self.context) + self.assertEqual(1, len(group_types)) + TestGroupType._compare(self, db_group_type, group_types[0]) + + @mock.patch('cinder.volume.group_types.get_all_group_types') + def test_get_all_with_pagination(self, get_all_types): + db_group_type = fake_group.fake_db_group_type() + get_all_types.return_value = {db_group_type['name']: db_group_type} + + group_types = objects.GroupTypeList.get_all(self.context, + filters={'is_public': + True}, + marker=None, + limit=1, + sort_keys='id', + sort_dirs='desc', + offset=None) + self.assertEqual(1, len(group_types)) + TestGroupType._compare(self, db_group_type, group_types[0]) diff --git a/cinder/tests/unit/objects/test_objects.py b/cinder/tests/unit/objects/test_objects.py index 2272176a0..cebe21896 100644 --- a/cinder/tests/unit/objects/test_objects.py +++ b/cinder/tests/unit/objects/test_objects.py @@ -26,20 +26,32 @@ object_data = { 'Backup': '1.4-c50f7a68bb4c400dd53dd219685b3992', 'BackupImport': '1.4-c50f7a68bb4c400dd53dd219685b3992', 'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', + 'Cluster': '1.0-6f06e867c073e9d31722c53b0a9329b8', + 'ClusterList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'CGSnapshot': '1.0-3212ac2b4c2811b7134fb9ba2c49ff74', 'CGSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'ConsistencyGroup': '1.2-ff7638e03ae7a3bb7a43a6c5c4d0c94a', + 'ConsistencyGroup': '1.3-7bf01a79b82516639fc03cd3ab6d9c01', 'ConsistencyGroupList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Service': '1.3-d7c1e133791c9d766596a0528fc9a12f', + 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8', + 'QualityOfServiceSpecsList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc', + 'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733', + 'Service': '1.4-c7d011989d1718ca0496ccf640b42712', 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Snapshot': '1.1-37966f7141646eb29e9ad5298ff2ca8a', + 'Snapshot': '1.1-d6a9d58f627bb2a5cf804b0dd7a12bc7', 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Volume': '1.3-15ff1f42d4e8eb321aa8217dd46aa1e1', + 'Volume': '1.5-19919d8086d6a38ab9d3ab88139e70e0', 'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'VolumeAttachment': '1.0-b30dacf62b2030dd83d8a1603f1064ff', 'VolumeAttachmentList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'VolumeType': '1.1-6673dd9ce7c27e9c85279afb20833877', + 'VolumeProperties': '1.1-cadac86b2bdc11eb79d1dcea988ff9e8', + 'VolumeType': '1.2-02ecb0baac87528d041f4ddd95b95579', 'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', + 'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30', + 'GroupTypeList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc', + 'Group': '1.1-bd853b1d1ee05949d9ce4b33f80ac1a0', + 'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', + 'GroupSnapshot': '1.0-9af3e994e889cbeae4427c3e351fa91d', + 'GroupSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', } @@ -76,7 +88,12 @@ class TestObjectVersions(test.TestCase): # db model and object match. def _check_table_matched(db_model, cls): for column in db_model.__table__.columns: - if column.name in cls.fields: + # NOTE(xyang): Skip the comparison of the colume name + # group_type_id in table Group because group_type_id + # is in the object Group but it is stored in a different + # table in the database, not in the Group table. + if (column.name in cls.fields and + (column.name != 'group_type_id' and name != 'Group')): self.assertEqual( column.nullable, cls.fields[column.name].nullable, @@ -86,7 +103,7 @@ class TestObjectVersions(test.TestCase): classes = base.CinderObjectRegistry.obj_classes() for name, cls in classes.items(): - if not issubclass(cls[0], base.ObjectListBase): + if issubclass(cls[0], base.CinderPersistentObject): db_model = db.get_model_for_versioned_object(cls[0]) _check_table_matched(db_model, cls[0]) diff --git a/cinder/tests/unit/objects/test_qos.py b/cinder/tests/unit/objects/test_qos.py new file mode 100644 index 000000000..cb88b2f4c --- /dev/null +++ b/cinder/tests/unit/objects/test_qos.py @@ -0,0 +1,126 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import timeutils +import pytz + +from cinder.db.sqlalchemy import models +from cinder import exception +from cinder import objects +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import objects as test_objects + +fake_qos = {'consumer': 'front-end', + 'id': fake.OBJECT_ID, + 'name': 'qos_name', + 'specs': {'key1': 'val1', 'key2': 'val2'}} + +fake_qos_no_id = fake_qos.copy() +del fake_qos_no_id['id'] + + +class TestQos(test_objects.BaseObjectsTestCase): + @mock.patch('cinder.db.get_by_id', return_value=fake_qos) + def test_get_by_id(self, qos_get): + qos_object = objects.QualityOfServiceSpecs.get_by_id( + self.context, fake.OBJECT_ID) + self._compare(self, fake_qos, qos_object) + qos_get.assert_called_once_with( + self.context, models.QualityOfServiceSpecs, fake.OBJECT_ID) + + @mock.patch('cinder.db.qos_specs_create', + return_value={'name': 'qos_name', 'id': fake.OBJECT_ID}) + def test_create(self, qos_fake_create): + qos_object = objects.QualityOfServiceSpecs( + self.context, **fake_qos_no_id) + qos_object.create() + self._compare(self, fake_qos, qos_object) + + # Fail to create a second time + self.assertRaises(exception.ObjectActionError, qos_object.create) + + self.assertEqual(1, len(qos_fake_create.mock_calls)) + + @mock.patch('cinder.db.qos_specs_item_delete') + @mock.patch('cinder.db.qos_specs_update') + def test_save(self, qos_fake_update, qos_fake_delete): + qos_dict = fake_qos.copy() + qos_dict['specs']['key_to_remove1'] = 'val' + qos_dict['specs']['key_to_remove2'] = 'val' + qos_object = objects.QualityOfServiceSpecs._from_db_object( + self.context, objects.QualityOfServiceSpecs(), qos_dict) + + qos_object.specs['key1'] = 'val1' + qos_object.save() + # No values have changed so no updates should be made + self.assertFalse(qos_fake_update.called) + + qos_object.consumer = 'back-end' + qos_object.specs['key1'] = 'val2' + qos_object.specs['new_key'] = 'val3' + + del qos_object.specs['key_to_remove1'] + del qos_object.specs['key_to_remove2'] + qos_object.save() + qos_fake_update.assert_called_once_with( + self.context, fake.OBJECT_ID, + {'specs': {'key1': 'val2', 'new_key': 'val3'}, + 'consumer': 'back-end'}) + qos_fake_delete.assert_has_calls([ + mock.call(self.context, fake.OBJECT_ID, 'key_to_remove1'), + mock.call(self.context, fake.OBJECT_ID, 'key_to_remove2')]) + + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos', + return_value=None) + @mock.patch('cinder.db.sqlalchemy.api.qos_specs_delete') + def test_destroy_no_vol_types(self, qos_fake_delete, fake_get_vol_types, + utcnow_mock): + qos_fake_delete.return_value = { + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} + qos_object = objects.QualityOfServiceSpecs._from_db_object( + self.context, objects.QualityOfServiceSpecs(), fake_qos) + qos_object.destroy() + + qos_fake_delete.assert_called_once_with(mock.ANY, fake_qos['id']) + self.assertTrue(qos_object.deleted) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + qos_object.deleted_at) + + @mock.patch('cinder.db.sqlalchemy.api.qos_specs_delete') + @mock.patch('cinder.db.qos_specs_disassociate_all') + @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos') + def test_destroy_with_vol_types(self, fake_get_vol_types, + qos_fake_disassociate, qos_fake_delete): + qos_object = objects.QualityOfServiceSpecs._from_db_object( + self.context, objects.QualityOfServiceSpecs(), fake_qos) + fake_get_vol_types.return_value = objects.VolumeTypeList( + objects=[objects.VolumeType(id=fake.VOLUME_TYPE_ID)]) + self.assertRaises(exception.QoSSpecsInUse, qos_object.destroy) + + qos_object.destroy(force=True) + qos_fake_delete.assert_called_once_with(mock.ANY, fake_qos['id']) + qos_fake_disassociate.assert_called_once_with( + self.context, fake_qos['id']) + + @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos', + return_value=None) + @mock.patch('cinder.db.get_by_id', return_value=fake_qos) + def test_get_volume_type(self, fake_get_by_id, fake_get_vol_types): + qos_object = objects.QualityOfServiceSpecs.get_by_id( + self.context, fake.OBJECT_ID) + self.assertFalse(fake_get_vol_types.called) + # Access lazy-loadable attribute + qos_object.volume_types + self.assertTrue(fake_get_vol_types.called) diff --git a/cinder/tests/unit/objects/test_service.py b/cinder/tests/unit/objects/test_service.py index 57586d8d1..0f485da01 100644 --- a/cinder/tests/unit/objects/test_service.py +++ b/cinder/tests/unit/objects/test_service.py @@ -13,8 +13,11 @@ # under the License. import mock +from oslo_utils import timeutils +import pytz import six +from cinder import exception from cinder import objects from cinder.tests.unit import fake_service from cinder.tests.unit import objects as test_objects @@ -30,25 +33,25 @@ class TestService(test_objects.BaseObjectsTestCase): self._compare(self, db_service, service) service_get.assert_called_once_with(self.context, 1) - @mock.patch('cinder.db.service_get_by_host_and_topic') - def test_get_by_host_and_topic(self, service_get_by_host_and_topic): + @mock.patch('cinder.db.service_get') + def test_get_by_host_and_topic(self, service_get): db_service = fake_service.fake_db_service() - service_get_by_host_and_topic.return_value = db_service + service_get.return_value = db_service service = objects.Service.get_by_host_and_topic( self.context, 'fake-host', 'fake-topic') self._compare(self, db_service, service) - service_get_by_host_and_topic.assert_called_once_with( - self.context, 'fake-host', 'fake-topic') + service_get.assert_called_once_with( + self.context, disabled=False, host='fake-host', topic='fake-topic') - @mock.patch('cinder.db.service_get_by_args') - def test_get_by_args(self, service_get_by_args): + @mock.patch('cinder.db.service_get') + def test_get_by_args(self, service_get): db_service = fake_service.fake_db_service() - service_get_by_args.return_value = db_service + service_get.return_value = db_service service = objects.Service.get_by_args( self.context, 'fake-host', 'fake-key') self._compare(self, db_service, service) - service_get_by_args.assert_called_once_with( - self.context, 'fake-host', 'fake-key') + service_get.assert_called_once_with( + self.context, host='fake-host', binary='fake-key') @mock.patch('cinder.db.service_create') def test_create(self, service_create): @@ -69,14 +72,21 @@ class TestService(test_objects.BaseObjectsTestCase): service_update.assert_called_once_with(self.context, service.id, {'topic': 'foobar'}) - @mock.patch('cinder.db.service_destroy') - def test_destroy(self, service_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.service_destroy') + def test_destroy(self, service_destroy, utcnow_mock): + service_destroy.return_value = { + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) with mock.patch.object(service._context, 'elevated') as elevated_ctx: service.destroy() service_destroy.assert_called_once_with(elevated_ctx(), 123) + self.assertTrue(service.deleted) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + service.deleted_at) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_refresh(self, service_get): @@ -102,38 +112,55 @@ class TestService(test_objects.BaseObjectsTestCase): call_bool, mock.call(self.context, 123)]) - @mock.patch('cinder.db.service_get_all_by_binary') - def _test_get_minimum_version(self, services_update, expected, - service_get_all_by_binary): - services = [fake_service.fake_db_service(**s) for s in services_update] - service_get_all_by_binary.return_value = services - - min_rpc = objects.Service.get_minimum_rpc_version(self.context, 'foo') - self.assertEqual(expected[0], min_rpc) - min_obj = objects.Service.get_minimum_obj_version(self.context, 'foo') - self.assertEqual(expected[1], min_obj) - service_get_all_by_binary.assert_has_calls( - [mock.call(self.context, 'foo', disabled=None)] * 2) - - @mock.patch('cinder.db.service_get_all_by_binary') - def test_get_minimum_version(self, service_get_all_by_binary): + @mock.patch('cinder.db.service_get_all') + def test_get_minimum_version(self, service_get_all): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, ] expected = ('1.0', '1.2') - self._test_get_minimum_version(services_update, expected) + services = [fake_service.fake_db_service(**s) for s in services_update] + service_get_all.return_value = services - @mock.patch('cinder.db.service_get_all_by_binary') - def test_get_minimum_version_liberty(self, service_get_all_by_binary): + min_rpc = objects.Service.get_minimum_rpc_version(self.context, 'foo') + self.assertEqual(expected[0], min_rpc) + min_obj = objects.Service.get_minimum_obj_version(self.context, 'foo') + self.assertEqual(expected[1], min_obj) + service_get_all.assert_has_calls( + [mock.call(self.context, binary='foo', disabled=None)] * 2) + + @mock.patch('cinder.db.service_get_all') + def test_get_minimum_version_liberty(self, service_get_all): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': None}, {'rpc_current_version': None, 'object_current_version': '2.5'}, ] - expected = ('liberty', 'liberty') - self._test_get_minimum_version(services_update, expected) + services = [fake_service.fake_db_service(**s) for s in services_update] + service_get_all.return_value = services + + self.assertRaises(exception.ServiceTooOld, + objects.Service.get_minimum_rpc_version, + self.context, 'foo') + self.assertRaises(exception.ServiceTooOld, + objects.Service.get_minimum_obj_version, + self.context, 'foo') + + @mock.patch('cinder.db.service_get_all') + def test_get_minimum_version_no_binary(self, service_get_all): + services_update = [ + {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, + {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, + {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, + ] + services = [fake_service.fake_db_service(**s) for s in services_update] + service_get_all.return_value = services + + min_obj = objects.Service.get_minimum_obj_version(self.context) + self.assertEqual('1.2', min_obj) + service_get_all.assert_called_once_with(self.context, binary=None, + disabled=None) class TestServiceList(test_objects.BaseObjectsTestCase): @@ -144,30 +171,30 @@ class TestServiceList(test_objects.BaseObjectsTestCase): filters = {'host': 'host', 'binary': 'foo', 'disabled': False} services = objects.ServiceList.get_all(self.context, filters) - service_get_all.assert_called_once_with(self.context, filters) + service_get_all.assert_called_once_with(self.context, **filters) self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) - @mock.patch('cinder.db.service_get_all_by_topic') - def test_get_all_by_topic(self, service_get_all_by_topic): + @mock.patch('cinder.db.service_get_all') + def test_get_all_by_topic(self, service_get_all): db_service = fake_service.fake_db_service() - service_get_all_by_topic.return_value = [db_service] + service_get_all.return_value = [db_service] services = objects.ServiceList.get_all_by_topic( self.context, 'foo', 'bar') - service_get_all_by_topic.assert_called_once_with( - self.context, 'foo', disabled='bar') + service_get_all.assert_called_once_with( + self.context, topic='foo', disabled='bar') self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) - @mock.patch('cinder.db.service_get_all_by_binary') - def test_get_all_by_binary(self, service_get_all_by_binary): + @mock.patch('cinder.db.service_get_all') + def test_get_all_by_binary(self, service_get_all): db_service = fake_service.fake_db_service() - service_get_all_by_binary.return_value = [db_service] + service_get_all.return_value = [db_service] services = objects.ServiceList.get_all_by_binary( self.context, 'foo', 'bar') - service_get_all_by_binary.assert_called_once_with( - self.context, 'foo', disabled='bar') + service_get_all.assert_called_once_with( + self.context, binary='foo', disabled='bar') self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) diff --git a/cinder/tests/unit/objects/test_snapshot.py b/cinder/tests/unit/objects/test_snapshot.py index 0e92a8964..3b2547a94 100644 --- a/cinder/tests/unit/objects/test_snapshot.py +++ b/cinder/tests/unit/objects/test_snapshot.py @@ -14,6 +14,8 @@ import copy import mock +from oslo_utils import timeutils +import pytz import six from cinder.db.sqlalchemy import models @@ -112,12 +114,21 @@ class TestSnapshot(test_objects.BaseObjectsTestCase): {'key1': 'value1'}, True) - @mock.patch('cinder.db.snapshot_destroy') - def test_destroy(self, snapshot_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.snapshot_destroy') + def test_destroy(self, snapshot_destroy, utcnow_mock): + snapshot_destroy.return_value = { + 'status': 'deleted', + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} snapshot = objects.Snapshot(context=self.context, id=fake.SNAPSHOT_ID) snapshot.destroy() snapshot_destroy.assert_called_once_with(self.context, fake.SNAPSHOT_ID) + self.assertTrue(snapshot.deleted) + self.assertEqual('deleted', snapshot.status) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + snapshot.deleted_at) @mock.patch('cinder.db.snapshot_metadata_delete') def test_delete_metadata_key(self, snapshot_metadata_delete): diff --git a/cinder/tests/unit/objects/test_volume.py b/cinder/tests/unit/objects/test_volume.py index 6c5d87557..4f00ad9c7 100644 --- a/cinder/tests/unit/objects/test_volume.py +++ b/cinder/tests/unit/objects/test_volume.py @@ -14,6 +14,8 @@ import ddt import mock +from oslo_utils import timeutils +import pytz import six from cinder import context @@ -44,7 +46,8 @@ class TestVolume(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): - pf = model_query().options().options().options().options().options() + pf = (model_query().options().options().options().options().options(). + options()) pf.filter_by().first.return_value = None self.assertRaises(exception.VolumeNotFound, objects.Volume.get_by_id, self.context, 123) @@ -132,8 +135,13 @@ class TestVolume(test_objects.BaseObjectsTestCase): volume.snapshots = objects.SnapshotList() self.assertRaises(exception.ObjectActionError, volume.save) - @mock.patch('cinder.db.volume_destroy') - def test_destroy(self, volume_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.volume_destroy') + def test_destroy(self, volume_destroy, utcnow_mock): + volume_destroy.return_value = { + 'status': 'deleted', + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) @@ -141,6 +149,11 @@ class TestVolume(test_objects.BaseObjectsTestCase): self.assertTrue(volume_destroy.called) admin_context = volume_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) + self.assertTrue(volume.deleted) + self.assertEqual('deleted', volume.status) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + volume.deleted_at) + self.assertIsNone(volume.migration_status) def test_obj_fields(self): volume = objects.Volume(context=self.context, id=fake.VOLUME_ID, @@ -368,6 +381,9 @@ class TestVolume(test_objects.BaseObjectsTestCase): updated_dest_volume.display_description) self.assertEqual(src_volume.id, updated_dest_volume._name_id) self.assertTrue(volume_update.called) + volume_update.assert_has_calls([ + mock.call(self.context, src_volume.id, mock.ANY), + mock.call(self.context, dest_volume.id, mock.ANY)]) ctxt, vol_id, updates = volume_update.call_args[0] self.assertNotIn('volume_type', updates) @@ -441,3 +457,24 @@ class TestVolumeList(test_objects.BaseObjectsTestCase): mock.sentinel.sorted_dirs, mock.sentinel.filters) self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) + + @mock.patch('cinder.db.volume_include_in_cluster') + def test_include_in_cluster(self, include_mock): + filters = {'host': mock.sentinel.host, + 'cluster_name': mock.sentinel.cluster_name} + cluster = 'new_cluster' + objects.VolumeList.include_in_cluster(self.context, cluster, **filters) + include_mock.assert_called_once_with(self.context, cluster, True, + **filters) + + @mock.patch('cinder.db.volume_include_in_cluster') + def test_include_in_cluster_specify_partial(self, include_mock): + filters = {'host': mock.sentinel.host, + 'cluster_name': mock.sentinel.cluster_name} + cluster = 'new_cluster' + objects.VolumeList.include_in_cluster(self.context, cluster, + mock.sentinel.partial_rename, + **filters) + include_mock.assert_called_once_with(self.context, cluster, + mock.sentinel.partial_rename, + **filters) diff --git a/cinder/tests/unit/objects/test_volume_type.py b/cinder/tests/unit/objects/test_volume_type.py index 4a4a94542..7f837acfc 100644 --- a/cinder/tests/unit/objects/test_volume_type.py +++ b/cinder/tests/unit/objects/test_volume_type.py @@ -13,6 +13,8 @@ # under the License. import mock +from oslo_utils import timeutils +import pytz import six from cinder import objects @@ -79,8 +81,12 @@ class TestVolumeType(test_objects.BaseObjectsTestCase): volume_type.name, volume_type.description) - @mock.patch('cinder.volume.volume_types.destroy') - def test_destroy(self, volume_type_destroy): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) + @mock.patch('cinder.db.sqlalchemy.api.volume_type_destroy') + def test_destroy(self, volume_type_destroy, utcnow_mock): + volume_type_destroy.return_value = { + 'deleted': True, + 'deleted_at': utcnow_mock.return_value} db_volume_type = fake_volume.fake_db_volume_type() volume_type = objects.VolumeType._from_db_object(self.context, objects.VolumeType(), @@ -89,6 +95,9 @@ class TestVolumeType(test_objects.BaseObjectsTestCase): self.assertTrue(volume_type_destroy.called) admin_context = volume_type_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) + self.assertTrue(volume_type.deleted) + self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), + volume_type.deleted_at) @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_refresh(self, volume_type_get): diff --git a/cinder/tests/unit/policy.json b/cinder/tests/unit/policy.json index c0616b791..f5533dd8a 100644 --- a/cinder/tests/unit/policy.json +++ b/cinder/tests/unit/policy.json @@ -8,6 +8,7 @@ "volume:get_all": "", "volume:get_volume_metadata": "", "volume:get_volume_image_metadata": "", + "volume:create_volume_metadata": "", "volume:delete_volume_metadata": "", "volume:update_volume_metadata": "", "volume:get_volume_admin_metadata": "rule:admin_api", @@ -97,6 +98,7 @@ "backup:restore": "", "backup:backup-import": "rule:admin_api", "backup:backup-export": "rule:admin_api", + "backup:update": "rule:admin_or_owner", "volume_extension:replication:promote": "rule:admin_api", "volume_extension:replication:reenable": "rule:admin_api", @@ -112,9 +114,30 @@ "consistencygroup:get_cgsnapshot": "", "consistencygroup:get_all_cgsnapshots": "", + "group:group_types_manage": "rule:admin_api", + "group:group_types_specs": "rule:admin_api", + "group:access_group_types_specs": "rule:admin_api", + "group:group_type_access": "rule:admin_or_owner", + + "group:create" : "", + "group:delete": "", + "group:update": "", + "group:get": "", + "group:get_all": "", + + "group:create_group_snapshot": "", + "group:delete_group_snapshot": "", + "group:update_group_snapshot": "", + "group:get_group_snapshot": "", + "group:get_all_group_snapshots": "", + "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", "message:delete": "rule:admin_or_owner", "message:get": "rule:admin_or_owner", - "message:get_all": "rule:admin_or_owner" + "message:get_all": "rule:admin_or_owner", + + "clusters:get": "rule:admin_api", + "clusters:get_all": "rule:admin_api", + "clusters:update": "rule:admin_api" } diff --git a/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py b/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py index f01c9e8bb..cf92154ee 100644 --- a/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py +++ b/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py @@ -18,22 +18,20 @@ Tests For Allocated Capacity Weigher. """ import mock -from oslo_config import cfg +from cinder.common import constants from cinder import context from cinder.scheduler import weights from cinder import test from cinder.tests.unit.scheduler import fakes from cinder.volume import utils -CONF = cfg.CONF - class AllocatedCapacityWeigherTestCase(test.TestCase): def setUp(self): super(AllocatedCapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() - self.weight_handler = weights.HostWeightHandler( + self.weight_handler = weights.OrderedHostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): @@ -43,14 +41,16 @@ class AllocatedCapacityWeigherTestCase(test.TestCase): [weights.capacity.AllocatedCapacityWeigher], hosts, weight_properties)[0] - @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') - def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): + @mock.patch('cinder.db.sqlalchemy.api.service_get_all') + def _get_all_hosts(self, _mock_service_get_all, disabled=False): ctxt = context.get_admin_context() - fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, + fakes.mock_host_manager_db_calls(_mock_service_get_all, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) - _mock_service_get_all_by_topic.assert_called_once_with( - ctxt, CONF.volume_topic, disabled=disabled) + _mock_service_get_all.assert_called_once_with( + ctxt, + None, # backend_match_level + topic=constants.VOLUME_TOPIC, disabled=disabled) return host_states def test_default_of_spreading_first(self): diff --git a/cinder/tests/unit/scheduler/test_capacity_weigher.py b/cinder/tests/unit/scheduler/test_capacity_weigher.py index fb6826a65..1c5dff624 100644 --- a/cinder/tests/unit/scheduler/test_capacity_weigher.py +++ b/cinder/tests/unit/scheduler/test_capacity_weigher.py @@ -16,23 +16,23 @@ Tests For Capacity Weigher. """ +import ddt import mock -from oslo_config import cfg +from cinder.common import constants from cinder import context from cinder.scheduler import weights from cinder import test from cinder.tests.unit.scheduler import fakes from cinder.volume import utils -CONF = cfg.CONF - +@ddt.ddt class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() - self.weight_handler = weights.HostWeightHandler( + self.weight_handler = weights.OrderedHostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_hosts(self, hosts, weight_properties=None): @@ -43,25 +43,50 @@ class CapacityWeigherTestCase(test.TestCase): hosts, weight_properties) - @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') - def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): + @mock.patch('cinder.db.sqlalchemy.api.service_get_all') + def _get_all_hosts(self, _mock_service_get_all, disabled=False): ctxt = context.get_admin_context() - fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, + fakes.mock_host_manager_db_calls(_mock_service_get_all, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) - _mock_service_get_all_by_topic.assert_called_once_with( - ctxt, CONF.volume_topic, disabled=disabled) + _mock_service_get_all.assert_called_once_with( + ctxt, + None, # backend_match_level + topic=constants.VOLUME_TOPIC, disabled=disabled) return host_states - # If thin_provisioning_support = False, use the following formula: - # free = free_space - math.floor(total * reserved) - # Otherwise, use the following formula: + # If thin and thin_provisioning_support are True, + # use the following formula: # free = (total * host_state.max_over_subscription_ratio # - host_state.provisioned_capacity_gb # - math.floor(total * reserved)) - def test_default_of_spreading_first(self): + # Otherwise, use the following formula: + # free = free_space - math.floor(total * reserved) + + @ddt.data( + {'type_key': 'provisioning:type', 'type_val': 'thin', + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host2'}, + {'type_key': 'provisioning:type', 'type_val': 'thick', + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host1'}, + {'type_key': None, 'type_val': None, + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host2'}, + {'type_key': None, 'type_val': None, + 'vol_type': 'volume_type', 'extra_specs': None, + 'winner': 'host2'}, + {'type_key': None, 'type_val': None, + 'vol_type': None, 'extra_specs': None, + 'winner': 'host2'}, + ) + @ddt.unpack + def test_default_of_spreading_first(self, type_key, type_val, + vol_type, extra_specs, winner): hostinfo_list = self._get_all_hosts() + # Results for the 1st test + # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=1024-math.floor(1024*0.1)=922 @@ -81,14 +106,45 @@ class CapacityWeigherTestCase(test.TestCase): # Norm=0.0819000819001 # so, host2 should win: - weighed_host = self._get_weighed_hosts(hostinfo_list)[0] + weight_properties = { + 'size': 1, + vol_type: { + extra_specs: { + type_key: type_val, + } + } + } + weighed_host = self._get_weighed_hosts( + hostinfo_list, + weight_properties=weight_properties)[0] self.assertEqual(1.0, weighed_host.weight) - self.assertEqual('host2', utils.extract_host(weighed_host.obj.host)) + self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) - def test_capacity_weight_multiplier1(self): + @ddt.data( + {'type_key': 'provisioning:type', 'type_val': 'thin', + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host4'}, + {'type_key': 'provisioning:type', 'type_val': 'thick', + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host2'}, + {'type_key': None, 'type_val': None, + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host4'}, + {'type_key': None, 'type_val': None, + 'vol_type': 'volume_type', 'extra_specs': None, + 'winner': 'host4'}, + {'type_key': None, 'type_val': None, + 'vol_type': None, 'extra_specs': None, + 'winner': 'host4'}, + ) + @ddt.unpack + def test_capacity_weight_multiplier1(self, type_key, type_val, + vol_type, extra_specs, winner): self.flags(capacity_weight_multiplier=-1.0) hostinfo_list = self._get_all_hosts() + # Results for the 1st test + # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=-(1024-math.floor(1024*0.1))=-922 @@ -108,14 +164,45 @@ class CapacityWeigherTestCase(test.TestCase): # Norm=-1.0 # so, host4 should win: - weighed_host = self._get_weighed_hosts(hostinfo_list)[0] + weight_properties = { + 'size': 1, + vol_type: { + extra_specs: { + type_key: type_val, + } + } + } + weighed_host = self._get_weighed_hosts( + hostinfo_list, + weight_properties=weight_properties)[0] self.assertEqual(0.0, weighed_host.weight) - self.assertEqual('host4', utils.extract_host(weighed_host.obj.host)) + self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) - def test_capacity_weight_multiplier2(self): + @ddt.data( + {'type_key': 'provisioning:type', 'type_val': 'thin', + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host2'}, + {'type_key': 'provisioning:type', 'type_val': 'thick', + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host1'}, + {'type_key': None, 'type_val': None, + 'vol_type': 'volume_type', 'extra_specs': 'extra_specs', + 'winner': 'host2'}, + {'type_key': None, 'type_val': None, + 'vol_type': 'volume_type', 'extra_specs': None, + 'winner': 'host2'}, + {'type_key': None, 'type_val': None, + 'vol_type': None, 'extra_specs': None, + 'winner': 'host2'}, + ) + @ddt.unpack + def test_capacity_weight_multiplier2(self, type_key, type_val, + vol_type, extra_specs, winner): self.flags(capacity_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() + # Results for the 1st test + # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))*2=1844 @@ -135,9 +222,19 @@ class CapacityWeigherTestCase(test.TestCase): # Norm=0.1638001638 # so, host2 should win: - weighed_host = self._get_weighed_hosts(hostinfo_list)[0] + weight_properties = { + 'size': 1, + vol_type: { + extra_specs: { + type_key: type_val, + } + } + } + weighed_host = self._get_weighed_hosts( + hostinfo_list, + weight_properties=weight_properties)[0] self.assertEqual(1.0 * 2, weighed_host.weight) - self.assertEqual('host2', utils.extract_host(weighed_host.obj.host)) + self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_no_unknown_or_infinite(self): self.flags(capacity_weight_multiplier=-1.0) diff --git a/cinder/tests/unit/scheduler/test_filter_scheduler.py b/cinder/tests/unit/scheduler/test_filter_scheduler.py index c1e3cd3df..c9ed2f1ce 100644 --- a/cinder/tests/unit/scheduler/test_filter_scheduler.py +++ b/cinder/tests/unit/scheduler/test_filter_scheduler.py @@ -20,8 +20,10 @@ import mock from cinder import context from cinder import exception +from cinder import objects from cinder.scheduler import filter_scheduler from cinder.scheduler import host_manager +from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import test_scheduler from cinder.volume import utils @@ -32,6 +34,58 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): driver_cls = filter_scheduler.FilterScheduler + def test_create_group_no_hosts(self): + # Ensure empty hosts result in NoValidHosts exception. + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project') + request_spec = {'volume_properties': {'project_id': 1, + 'size': 0}, + 'volume_type': {'name': 'Type1', + 'extra_specs': {}}} + request_spec2 = {'volume_properties': {'project_id': 1, + 'size': 0}, + 'volume_type': {'name': 'Type2', + 'extra_specs': {}}} + request_spec_list = [request_spec, request_spec2] + group_spec = {'group_type': {'name': 'GrpType'}, + 'volume_properties': {'project_id': 1, + 'size': 0}} + self.assertRaises(exception.NoValidHost, + sched.schedule_create_group, + fake_context, 'faki-id1', group_spec, + request_spec_list, {}, []) + + @mock.patch('cinder.db.service_get_all') + def test_schedule_group(self, _mock_service_get_all): + # Make sure _schedule_group() can find host successfully. + sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() + fake_context = context.RequestContext('user', 'project', + is_admin=True) + + fakes.mock_host_manager_db_calls(_mock_service_get_all) + + specs = {'capabilities:consistencygroup_support': ' True'} + request_spec = {'volume_properties': {'project_id': 1, + 'size': 0}, + 'volume_type': {'name': 'Type1', + 'extra_specs': specs}} + request_spec2 = {'volume_properties': {'project_id': 1, + 'size': 0}, + 'volume_type': {'name': 'Type2', + 'extra_specs': specs}} + request_spec_list = [request_spec, request_spec2] + group_spec = {'group_type': {'name': 'GrpType'}, + 'volume_properties': {'project_id': 1, + 'size': 0}} + weighed_host = sched._schedule_generic_group(fake_context, + group_spec, + request_spec_list, + {}, []) + self.assertIsNotNone(weighed_host.obj) + self.assertTrue(_mock_service_get_all.called) + def test_create_consistencygroup_no_hosts(self): # Ensure empty hosts result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() @@ -50,16 +104,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched.schedule_create_consistencygroup, fake_context, 'faki-id1', request_spec_list, {}) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_schedule_consistencygroup(self, - _mock_service_get_all_by_topic): + _mock_service_get_all): # Make sure _schedule_group() can find host successfully. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) - fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) + fakes.mock_host_manager_db_calls(_mock_service_get_all) specs = {'capabilities:consistencygroup_support': ' True'} request_spec = {'volume_properties': {'project_id': 1, @@ -75,12 +129,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec_list, {}) self.assertIsNotNone(weighed_host.obj) - self.assertTrue(_mock_service_get_all_by_topic.called) + self.assertTrue(_mock_service_get_all.called) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_schedule_consistencygroup_no_cg_support_in_extra_specs( self, - _mock_service_get_all_by_topic): + _mock_service_get_all): # Make sure _schedule_group() can find host successfully even # when consistencygroup_support is not specified in volume type's # extra specs @@ -89,7 +143,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): fake_context = context.RequestContext('user', 'project', is_admin=True) - fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) + fakes.mock_host_manager_db_calls(_mock_service_get_all) request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, @@ -104,7 +158,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec_list, {}) self.assertIsNotNone(weighed_host.obj) - self.assertTrue(_mock_service_get_all_by_topic.called) + self.assertTrue(_mock_service_get_all.called) def test_create_volume_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. @@ -114,7 +168,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_id': ['fake-id1']} + 'volume_id': fake.VOLUME_ID} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) @@ -127,6 +182,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, @@ -141,8 +197,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): # request_spec is missing 'volume_type' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, - 'volume_id': ['fake-id1']} - self.assertRaises(exception.InvalidVolumeType, + 'volume_id': fake.VOLUME_ID} + request_spec = objects.RequestSpec.from_primitives(request_spec) + self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, @@ -169,13 +226,14 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_id': ['fake-id1']} + 'volume_id': fake.VOLUME_ID} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) self.assertTrue(self.was_admin) - @mock.patch('cinder.db.service_get_all_by_topic') - def test_schedule_happy_day(self, _mock_service_get_all_by_topic): + @mock.patch('cinder.db.service_get_all') + def test_schedule_happy_day(self, _mock_service_get_all): # Make sure there's nothing glaringly wrong with _schedule() # by doing a happy day pass through. sched = fakes.FakeFilterScheduler() @@ -183,16 +241,48 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): fake_context = context.RequestContext('user', 'project', is_admin=True) - fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) + fakes.mock_host_manager_db_calls(_mock_service_get_all) request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host.obj) - self.assertTrue(_mock_service_get_all_by_topic.called) + self.assertTrue(_mock_service_get_all.called) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') + def test_create_volume_clear_host_different_with_group( + self, _mock_service_get_all): + # Ensure we clear those hosts whose backend is not same as + # group's backend. + sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() + fakes.mock_host_manager_db_calls(_mock_service_get_all) + fake_context = context.RequestContext('user', 'project') + request_spec = {'volume_properties': {'project_id': 1, + 'size': 1}, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'group_backend': 'host@lvmdriver'} + weighed_host = sched._schedule(fake_context, request_spec, {}) + self.assertIsNone(weighed_host) + + @mock.patch('cinder.db.service_get_all') + def test_create_volume_host_same_as_group(self, _mock_service_get_all): + # Ensure we don't clear the host whose backend is same as + # group's backend. + sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() + fakes.mock_host_manager_db_calls(_mock_service_get_all) + fake_context = context.RequestContext('user', 'project') + request_spec = {'volume_properties': {'project_id': 1, + 'size': 1}, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'group_backend': 'host1'} + weighed_host = sched._schedule(fake_context, request_spec, {}) + self.assertEqual('host1#lvm1', weighed_host.obj.host) + + @mock.patch('cinder.db.service_get_all') def test_create_volume_clear_host_different_with_cg(self, _mock_service_get_all): # Ensure we clear those hosts whose backend is not same as @@ -205,10 +295,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host@lvmdriver'} + request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNone(weighed_host) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_create_volume_host_same_as_cg(self, _mock_service_get_all): # Ensure we don't clear the host whose backend is same as # consistencygroup's backend. @@ -220,6 +311,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host1'} + request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertEqual('host1#lvm1', weighed_host.obj.host) @@ -243,6 +335,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) filter_properties = {} sched._schedule(self.context, request_spec, @@ -259,6 +352,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) filter_properties = {} sched._schedule(self.context, request_spec, @@ -275,6 +369,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=1) filter_properties = dict(retry=retry) @@ -293,6 +388,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=2) filter_properties = dict(retry=retry) @@ -338,50 +434,53 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): return (sched, fake_context) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_host_passes_filters_happy_day(self, _mock_service_get_topic): """Do a successful pass through of with host_passes_filters().""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.host_passes_filters(ctx, 'host1#lvm1', request_spec, {}) self.assertEqual('host1', utils.extract_host(ret_host.host)) self.assertTrue(_mock_service_get_topic.called) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_host_passes_filters_default_pool_happy_day( self, _mock_service_get_topic): """Do a successful pass through of with host_passes_filters().""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.host_passes_filters(ctx, 'host5#_pool0', request_spec, {}) self.assertEqual('host5', utils.extract_host(ret_host.host)) self.assertTrue(_mock_service_get_topic.called) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_host_passes_filters_no_capacity(self, _mock_service_get_topic): """Fail the host due to insufficient capacity.""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1024}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.host_passes_filters, ctx, 'host1#lvm1', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic): # Retype should pass if current host passes filters and # policy=never. host4 doesn't have enough space to hold an additional @@ -390,18 +489,19 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm4'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4#lvm4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='never') self.assertEqual('host4', utils.extract_host(host_state.host)) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_retype_with_pool_policy_never_migrate_pass( self, _mock_service_get_topic): # Retype should pass if current host passes filters and @@ -411,65 +511,69 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm3'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host3#lvm3'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='never') self.assertEqual('host3#lvm3', host_state.host) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic): # Retype should fail if current host doesn't pass filters and # policy=never. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, request_spec, filter_properties={}, migration_policy='never') - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic): # Retype should pass if current host fails filters but another host # is suitable when policy=on-demand. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='on-demand') self.assertEqual('host1', utils.extract_host(host_state.host)) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): # Retype should fail if current host doesn't pass filters and # no other suitable candidates exist even if policy=on-demand. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 2048, 'host': 'host4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, request_spec, filter_properties={}, migration_policy='on-demand') diff --git a/cinder/tests/unit/scheduler/test_host_filters.py b/cinder/tests/unit/scheduler/test_host_filters.py index 996737275..17796208b 100644 --- a/cinder/tests/unit/scheduler/test_host_filters.py +++ b/cinder/tests/unit/scheduler/test_host_filters.py @@ -47,6 +47,7 @@ class HostFiltersTestCase(test.TestCase): self.class_map[cls.__name__] = cls +@ddt.ddt class CapacityFilterTestCase(HostFiltersTestCase): def setUp(self): super(CapacityFilterTestCase, self).setUp() @@ -510,6 +511,33 @@ class CapacityFilterTestCase(HostFiltersTestCase): 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) + @ddt.data( + {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}}, + {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}}, + {'volume_type': {'extra_specs': {}}}, + {'volume_type': {}}, + {'volume_type': None}, + ) + @ddt.unpack + @mock.patch('cinder.utils.service_is_up') + def test_filter_provisioning_type(self, _mock_serv_is_up, volume_type): + _mock_serv_is_up.return_value = True + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100, + 'volume_type': volume_type} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'total_capacity_gb': 500, + 'free_capacity_gb': 100, + 'provisioned_capacity_gb': 400, + 'max_over_subscription_ratio': 2.0, + 'reserved_percentage': 0, + 'thin_provisioning_support': True, + 'thick_provisioning_support': True, + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + class AffinityFilterTestCase(HostFiltersTestCase): @mock.patch('cinder.utils.service_is_up') diff --git a/cinder/tests/unit/scheduler/test_host_manager.py b/cinder/tests/unit/scheduler/test_host_manager.py index f1f99e8e2..ceb0e1e63 100644 --- a/cinder/tests/unit/scheduler/test_host_manager.py +++ b/cinder/tests/unit/scheduler/test_host_manager.py @@ -19,9 +19,9 @@ Tests For HostManager from datetime import datetime import mock -from oslo_config import cfg from oslo_utils import timeutils +from cinder.common import constants from cinder import exception from cinder import objects from cinder.scheduler import filters @@ -30,9 +30,6 @@ from cinder import test from cinder.tests.unit.objects import test_service -CONF = cfg.CONF - - class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass @@ -121,8 +118,8 @@ class HostManagerTestCase(test.TestCase): self.assertDictMatch(expected, service_states) @mock.patch('cinder.utils.service_is_up') - @mock.patch('cinder.db.service_get_all_by_topic') - def test_has_all_capabilities(self, _mock_service_get_all_by_topic, + @mock.patch('cinder.db.service_get_all') + def test_has_all_capabilities(self, _mock_service_get_all, _mock_service_is_up): _mock_service_is_up.return_value = True services = [ @@ -133,8 +130,8 @@ class HostManagerTestCase(test.TestCase): dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), ] - _mock_service_get_all_by_topic.return_value = services - # Create host_manager again to let db.service_get_all_by_topic mock run + _mock_service_get_all.return_value = services + # Create host_manager again to let db.service_get_all mock run self.host_manager = host_manager.HostManager() self.assertFalse(self.host_manager.has_all_capabilities()) @@ -153,12 +150,12 @@ class HostManagerTestCase(test.TestCase): host3_volume_capabs) self.assertTrue(self.host_manager.has_all_capabilities()) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.utils.service_is_up') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_get_pools(self, _mock_utcnow, _mock_service_is_up, - _mock_service_get_all_by_topic): + _mock_service_get_all): """Test interaction between update and get_pools This test verifies that each time that get_pools is called it gets the @@ -182,7 +179,7 @@ class HostManagerTestCase(test.TestCase): timestamp=None, reserved_percentage=0), } - _mock_service_get_all_by_topic.return_value = services + _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning @@ -206,12 +203,12 @@ class HostManagerTestCase(test.TestCase): self.assertEqual(1, len(res)) self.assertEqual(dates[2], res[0]['capabilities']['timestamp']) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.utils.service_is_up') def test_get_all_host_states(self, _mock_service_is_up, - _mock_service_get_all_by_topic): + _mock_service_get_all): context = 'fake_context' - topic = CONF.volume_topic + topic = constants.VOLUME_TOPIC services = [ dict(id=1, host='host1', topic='volume', disabled=False, @@ -257,16 +254,16 @@ class HostManagerTestCase(test.TestCase): # First test: service_is_up is always True, host5 is disabled, # host4 has no capabilities self.host_manager.service_states = service_states - _mock_service_get_all_by_topic.return_value = services + _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warning = _mock_warning # Get all states self.host_manager.get_all_host_states(context) - _mock_service_get_all_by_topic.assert_called_with(context, - topic, - disabled=False) + _mock_service_get_all.assert_called_with(context, + disabled=False, + topic=topic) expected = [] for service in service_objs: expected.append(mock.call(service)) @@ -284,14 +281,14 @@ class HostManagerTestCase(test.TestCase): # Second test: Now service_is_up returns False for host3 _mock_service_is_up.reset_mock() _mock_service_is_up.side_effect = [True, True, False, True] - _mock_service_get_all_by_topic.reset_mock() + _mock_service_get_all.reset_mock() _mock_warning.reset_mock() # Get all states, make sure host 3 is reported as down self.host_manager.get_all_host_states(context) - _mock_service_get_all_by_topic.assert_called_with(context, - topic, - disabled=False) + _mock_service_get_all.assert_called_with(context, + disabled=False, + topic=topic) self.assertEqual(expected, _mock_service_is_up.call_args_list) self.assertGreater(_mock_warning.call_count, 0) @@ -306,10 +303,10 @@ class HostManagerTestCase(test.TestCase): test_service.TestService._compare(self, volume_node, host_state_map[host].service) - @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.utils.service_is_up') def test_get_pools(self, _mock_service_is_up, - _mock_service_get_all_by_topic): + _mock_service_get_all): context = 'fake_context' services = [ @@ -336,7 +333,7 @@ class HostManagerTestCase(test.TestCase): provisioned_capacity_gb=9300), } - _mock_service_get_all_by_topic.return_value = services + _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning diff --git a/cinder/tests/unit/scheduler/test_rpcapi.py b/cinder/tests/unit/scheduler/test_rpcapi.py index 9a0159728..040d70cf4 100644 --- a/cinder/tests/unit/scheduler/test_rpcapi.py +++ b/cinder/tests/unit/scheduler/test_rpcapi.py @@ -86,7 +86,23 @@ class SchedulerRpcAPITestCase(test.TestCase): fanout=True, version='2.0') - def test_create_volume(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) + def test_create_volume(self, can_send_version): + self._test_scheduler_api('create_volume', + rpc_method='cast', + topic='topic', + volume_id='volume_id', + snapshot_id='snapshot_id', + image_id='image_id', + request_spec='fake_request_spec', + filter_properties='filter_properties', + volume='volume', + version='2.2') + can_send_version.assert_has_calls([mock.call('2.2')]) + + @mock.patch('oslo_messaging.RPCClient.can_send_version', + return_value=False) + def test_create_volume_serialization(self, can_send_version): self._test_scheduler_api('create_volume', rpc_method='cast', topic='topic', @@ -97,6 +113,7 @@ class SchedulerRpcAPITestCase(test.TestCase): filter_properties='filter_properties', volume='volume', version='2.0') + can_send_version.assert_has_calls([mock.call('2.2')]) def test_migrate_volume_to_host(self): self._test_scheduler_api('migrate_volume_to_host', @@ -139,3 +156,16 @@ class SchedulerRpcAPITestCase(test.TestCase): rpc_method='call', filters=None, version='2.0') + + def test_create_group(self): + self._test_scheduler_api('create_group', + rpc_method='cast', + topic='topic', + group='group', + group_spec='group_spec_p', + request_spec_list=['fake_request_spec_list'], + group_filter_properties= + 'fake_group_filter_properties', + filter_properties_list= + ['fake_filter_properties_list'], + version='2.3') diff --git a/cinder/tests/unit/scheduler/test_scheduler.py b/cinder/tests/unit/scheduler/test_scheduler.py index e1f5d1370..5e362f7eb 100644 --- a/cinder/tests/unit/scheduler/test_scheduler.py +++ b/cinder/tests/unit/scheduler/test_scheduler.py @@ -130,7 +130,11 @@ class SchedulerManagerTestCase(test.TestCase): _mock_sched_create.side_effect = exception.NoValidHost(reason="") volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' - request_spec = {'volume_id': volume.id} + request_spec = {'volume_id': volume.id, + 'volume': {'id': volume.id, '_name_id': None, + 'metadata': {}, 'admin_metadata': {}, + 'glance_metadata': {}}} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, @@ -139,8 +143,8 @@ class SchedulerManagerTestCase(test.TestCase): _mock_volume_update.assert_called_once_with(self.context, volume.id, {'status': 'error'}) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) _mock_message_create.assert_called_once_with( self.context, defined_messages.UNABLE_TO_ALLOCATE, @@ -154,13 +158,14 @@ class SchedulerManagerTestCase(test.TestCase): topic = 'fake_topic' request_spec = {'volume_id': volume.id} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @@ -174,6 +179,7 @@ class SchedulerManagerTestCase(test.TestCase): topic = 'fake_topic' request_spec = {'volume_id': volume.id} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) _mock_is_ready.side_effect = [False, False, True] @@ -181,8 +187,8 @@ class SchedulerManagerTestCase(test.TestCase): request_spec=request_spec, filter_properties={}, volume=volume) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) calls = [mock.call(1)] * 2 _mock_sleep.assert_has_calls(calls) self.assertEqual(2, _mock_sleep.call_count) @@ -198,6 +204,7 @@ class SchedulerManagerTestCase(test.TestCase): topic = 'fake_topic' request_spec = {'volume_id': volume.id} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) _mock_is_ready.return_value = True @@ -205,8 +212,8 @@ class SchedulerManagerTestCase(test.TestCase): request_spec=request_spec, filter_properties={}, volume=volume) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.db.volume_get') @@ -300,7 +307,7 @@ class SchedulerManagerTestCase(test.TestCase): fake_consistencygroup.fake_consistencyobject_obj(self.context) self.manager.driver = filter_scheduler.FilterScheduler LOG = self.mock_object(manager, 'LOG') - self.stubs.Set(db, 'consistencygroup_update', mock.Mock()) + self.mock_object(db, 'consistencygroup_update') ex = exception.CinderException('test') mock_cg.side_effect = ex diff --git a/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py b/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py new file mode 100644 index 000000000..fc66d1cda --- /dev/null +++ b/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py @@ -0,0 +1,67 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for stochastic weight handler +""" + +import ddt +import mock +import random + +from cinder.scheduler import base_weight +from cinder.scheduler.weights.stochastic import StochasticHostWeightHandler +from cinder import test + + +@ddt.ddt +class StochasticWeightHandlerTestCase(test.TestCase): + """Test case for StochasticHostWeightHandler.""" + + def setUp(self): + super(StochasticWeightHandlerTestCase, self).setUp() + + @ddt.data( + (0.0, 'A'), + (0.1, 'A'), + (0.2, 'B'), + (0.3, 'B'), + (0.4, 'B'), + (0.5, 'B'), + (0.6, 'B'), + (0.7, 'C'), + (0.8, 'C'), + (0.9, 'C'), + ) + @ddt.unpack + def test_get_weighed_objects_correct(self, rand_value, expected_obj): + self.mock_object(random, + 'random', + mock.Mock(return_value=rand_value)) + + class MapWeigher(base_weight.BaseWeigher): + minval = 0 + maxval = 100 + + def _weigh_object(self, obj, weight_map): + return weight_map[obj] + + weight_map = {'A': 1, 'B': 3, 'C': 2} + objs = sorted(weight_map.keys()) + + weigher_classes = [MapWeigher] + handler = StochasticHostWeightHandler('fake_namespace') + weighted_objs = handler.get_weighed_objects(weigher_classes, + objs, + weight_map) + winner = weighted_objs[0].obj + self.assertEqual(expected_obj, winner) diff --git a/cinder/tests/unit/scheduler/test_volume_number_weigher.py b/cinder/tests/unit/scheduler/test_volume_number_weigher.py index 5512470c7..1a0527fbb 100644 --- a/cinder/tests/unit/scheduler/test_volume_number_weigher.py +++ b/cinder/tests/unit/scheduler/test_volume_number_weigher.py @@ -17,8 +17,8 @@ Tests For Volume Number Weigher. """ import mock -from oslo_config import cfg +from cinder.common import constants from cinder import context from cinder.db.sqlalchemy import api from cinder.scheduler import weights @@ -27,8 +27,6 @@ from cinder.tests.unit import fake_constants from cinder.tests.unit.scheduler import fakes from cinder.volume import utils -CONF = cfg.CONF - def fake_volume_data_get_for_host(context, host, count_only=False): host = utils.extract_host(host) @@ -58,7 +56,7 @@ class VolumeNumberWeigherTestCase(test.TestCase): read_deleted="no", overwrite=False) self.host_manager = fakes.FakeHostManager() - self.weight_handler = weights.HostWeightHandler( + self.weight_handler = weights.OrderedHostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): @@ -69,14 +67,17 @@ class VolumeNumberWeigherTestCase(test.TestCase): hosts, weight_properties)[0] - @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') - def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): + @mock.patch('cinder.db.sqlalchemy.api.service_get_all') + def _get_all_hosts(self, _mock_service_get_all, disabled=False): ctxt = context.get_admin_context() - fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, + fakes.mock_host_manager_db_calls(_mock_service_get_all, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) - _mock_service_get_all_by_topic.assert_called_once_with( - ctxt, CONF.volume_topic, disabled=disabled) + _mock_service_get_all.assert_called_once_with( + ctxt, + None, # backend_match_level + topic=constants.VOLUME_TOPIC, + disabled=disabled) return host_states def test_volume_number_weight_multiplier1(self): diff --git a/cinder/tests/unit/test_cmd.py b/cinder/tests/unit/test_cmd.py index 7de6375dd..cd3abf7a1 100644 --- a/cinder/tests/unit/test_cmd.py +++ b/cinder/tests/unit/test_cmd.py @@ -12,13 +12,14 @@ import datetime import sys +import time +import ddt import mock from oslo_config import cfg +from oslo_utils import timeutils import six -from cinder import rpc - try: import rtslib_fb except ImportError: @@ -33,11 +34,14 @@ from cinder.cmd import rtstool as cinder_rtstool from cinder.cmd import scheduler as cinder_scheduler from cinder.cmd import volume as cinder_volume from cinder.cmd import volume_usage_audit +from cinder.common import constants from cinder import context from cinder import exception from cinder.objects import fields from cinder import test +from cinder.tests.unit import fake_cluster from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_service from cinder.tests.unit import fake_volume from cinder import version @@ -50,7 +54,6 @@ class TestCinderApiCmd(test.TestCase): def setUp(self): super(TestCinderApiCmd, self).setUp() sys.argv = ['cinder-api'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderApiCmd, self).tearDown() @@ -85,7 +88,6 @@ class TestCinderBackupCmd(test.TestCase): def setUp(self): super(TestCinderBackupCmd, self).setUp() sys.argv = ['cinder-backup'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderBackupCmd, self).tearDown() @@ -115,7 +117,6 @@ class TestCinderAllCmd(test.TestCase): def setUp(self): super(TestCinderAllCmd, self).setUp() sys.argv = ['cinder-all'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderAllCmd, self).tearDown() @@ -293,7 +294,6 @@ class TestCinderSchedulerCmd(test.TestCase): def setUp(self): super(TestCinderSchedulerCmd, self).setUp() sys.argv = ['cinder-scheduler'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderSchedulerCmd, self).tearDown() @@ -323,7 +323,6 @@ class TestCinderVolumeCmd(test.TestCase): def setUp(self): super(TestCinderVolumeCmd, self).setUp() sys.argv = ['cinder-volume'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderVolumeCmd, self).tearDown() @@ -346,7 +345,8 @@ class TestCinderVolumeCmd(test.TestCase): monkey_patch.assert_called_once_with() get_launcher.assert_called_once_with() service_create.assert_called_once_with(binary='cinder-volume', - coordination=True) + coordination=True, + cluster=None) launcher.launch_service.assert_called_once_with(server) launcher.wait.assert_called_once_with() @@ -369,24 +369,31 @@ class TestCinderVolumeCmd(test.TestCase): monkey_patch.assert_called_once_with() get_launcher.assert_called_once_with() c1 = mock.call(binary='cinder-volume', host='host@backend1', - service_name='backend1', coordination=True) + service_name='backend1', coordination=True, + cluster=None) c2 = mock.call(binary='cinder-volume', host='host@backend2', - service_name='backend2', coordination=True) + service_name='backend2', coordination=True, + cluster=None) service_create.assert_has_calls([c1, c2]) self.assertEqual(len(backends), launcher.launch_service.call_count) launcher.wait.assert_called_once_with() +@ddt.ddt class TestCinderManageCmd(test.TestCase): def setUp(self): super(TestCinderManageCmd, self).setUp() sys.argv = ['cinder-manage'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderManageCmd, self).tearDown() + def _test_purge_invalid_age_in_days(self, age_in_days): + db_cmds = cinder_manage.DbCommands() + ex = self.assertRaises(SystemExit, db_cmds.purge, age_in_days) + self.assertEqual(1, ex.code) + @mock.patch('cinder.db.migration.db_sync') def test_db_commands_sync(self, db_sync): version = mock.MagicMock() @@ -422,6 +429,36 @@ class TestCinderManageCmd(test.TestCase): version_cmds.__call__() version_string.assert_called_once_with() + def test_purge_age_in_days_value_equal_to_zero(self): + age_in_days = 0 + self._test_purge_invalid_age_in_days(age_in_days) + + def test_purge_with_negative_age_in_days(self): + age_in_days = -1 + self._test_purge_invalid_age_in_days(age_in_days) + + def test_purge_exceeded_age_in_days_limit(self): + age_in_days = int(time.time() / 86400) + 1 + self._test_purge_invalid_age_in_days(age_in_days) + + @mock.patch('cinder.db.sqlalchemy.api.purge_deleted_rows') + @mock.patch('cinder.context.get_admin_context') + def test_purge_less_than_age_in_days_limit(self, get_admin_context, + purge_deleted_rows): + age_in_days = int(time.time() / 86400) - 1 + ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, + is_admin=True) + get_admin_context.return_value = ctxt + + purge_deleted_rows.return_value = None + + db_cmds = cinder_manage.DbCommands() + db_cmds.purge(age_in_days) + + get_admin_context.assert_called_once_with() + purge_deleted_rows.assert_called_once_with( + ctxt, age_in_days=age_in_days) + @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def test_host_commands_list(self, get_admin_context, service_get_all): @@ -439,7 +476,7 @@ class TestCinderManageCmd(test.TestCase): host_cmds.list() get_admin_context.assert_called_once_with() - service_get_all.assert_called_once_with(mock.sentinel.ctxt, None) + service_get_all.assert_called_once_with(mock.sentinel.ctxt) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.service_get_all') @@ -462,7 +499,7 @@ class TestCinderManageCmd(test.TestCase): host_cmds.list(zone='fake-az1') get_admin_context.assert_called_once_with() - service_get_all.assert_called_once_with(mock.sentinel.ctxt, None) + service_get_all.assert_called_once_with(mock.sentinel.ctxt) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.objects.base.CinderObjectSerializer') @@ -472,7 +509,6 @@ class TestCinderManageCmd(test.TestCase): @mock.patch('oslo_messaging.Target') def test_volume_commands_init(self, messaging_target, rpc_initialized, rpc_init, get_client, object_serializer): - CONF.set_override('volume_topic', 'fake-topic') mock_target = messaging_target.return_value mock_rpc_client = get_client.return_value @@ -481,7 +517,7 @@ class TestCinderManageCmd(test.TestCase): rpc_initialized.assert_called_once_with() rpc_init.assert_called_once_with(CONF) - messaging_target.assert_called_once_with(topic='fake-topic') + messaging_target.assert_called_once_with(topic=constants.VOLUME_TOPIC) get_client.assert_called_once_with(mock_target, serializer=object_serializer()) self.assertEqual(mock_rpc_client, rpc_client) @@ -717,7 +753,7 @@ class TestCinderManageCmd(test.TestCase): service_get_all.return_value = [service] service_is_up.return_value = True with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s" + format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s" print_format = format % ('Binary', 'Host', 'Zone', @@ -725,13 +761,11 @@ class TestCinderManageCmd(test.TestCase): 'State', 'Updated At', 'RPC Version', - 'Object Version') + 'Object Version', + 'Cluster') rpc_version = service['rpc_current_version'] - if not rpc_version: - rpc_version = rpc.LIBERTY_RPC_VERSIONS[service['binary']] object_version = service['object_current_version'] - if not object_version: - object_version = 'liberty' + cluster = service.get('cluster_name', '') service_format = format % (service['binary'], service['host'].partition('.')[0], service['availability_zone'], @@ -739,7 +773,8 @@ class TestCinderManageCmd(test.TestCase): ':-)', service['updated_at'], rpc_version, - object_version) + object_version, + cluster) expected_out = print_format + '\n' + service_format + '\n' service_cmds = cinder_manage.ServiceCommands() @@ -747,7 +782,7 @@ class TestCinderManageCmd(test.TestCase): self.assertEqual(expected_out, fake_out.getvalue()) get_admin_context.assert_called_with() - service_get_all.assert_called_with(ctxt, None) + service_get_all.assert_called_with(ctxt) def test_service_commands_list(self): service = {'binary': 'cinder-binary', @@ -756,31 +791,203 @@ class TestCinderManageCmd(test.TestCase): 'updated_at': '2014-06-30 11:22:33', 'disabled': False, 'rpc_current_version': '1.1', - 'object_current_version': '1.1'} + 'object_current_version': '1.1', + 'cluster_name': 'my_cluster'} for binary in ('volume', 'scheduler', 'backup'): service['binary'] = 'cinder-%s' % binary self._test_service_commands_list(service) - def test_service_commands_list_no_updated_at(self): + def test_service_commands_list_no_updated_at_or_cluster(self): service = {'binary': 'cinder-binary', 'host': 'fake-host.fake-domain', 'availability_zone': 'fake-zone', 'updated_at': None, 'disabled': False, - 'rpc_current_version': None, - 'object_current_version': None} + 'rpc_current_version': '1.1', + 'object_current_version': '1.1'} for binary in ('volume', 'scheduler', 'backup'): service['binary'] = 'cinder-%s' % binary self._test_service_commands_list(service) - def test_get_arg_string(self): - args1 = "foobar" - args2 = "-foo bar" - args3 = "--foo bar" + @ddt.data(('foobar', 'foobar'), ('-foo bar', 'foo bar'), + ('--foo bar', 'foo bar'), ('--foo-bar', 'foo_bar'), + ('---foo-bar', '_foo_bar')) + @ddt.unpack + def test_get_arg_string(self, arg, expected): + self.assertEqual(expected, cinder_manage.get_arg_string(arg)) - self.assertEqual("foobar", cinder_manage.get_arg_string(args1)) - self.assertEqual("foo bar", cinder_manage.get_arg_string(args2)) - self.assertEqual("foo bar", cinder_manage.get_arg_string(args3)) + def test_fetch_func_args(self): + @cinder_manage.args('--full-rename') + @cinder_manage.args('--different-dest', dest='my_dest') + @cinder_manage.args('current') + def my_func(): + pass + + expected = {'full_rename': mock.sentinel.full_rename, + 'my_dest': mock.sentinel.my_dest, + 'current': mock.sentinel.current} + + with mock.patch.object(cinder_manage, 'CONF') as mock_conf: + mock_conf.category = mock.Mock(**expected) + self.assertDictEqual(expected, + cinder_manage.fetch_func_args(my_func)) + + @mock.patch('cinder.context.get_admin_context') + @mock.patch('cinder.db.cluster_get_all') + def tests_cluster_commands_list(self, get_all_mock, get_admin_mock, + ): + now = timeutils.utcnow() + cluster = fake_cluster.fake_cluster_orm(num_hosts=4, num_down_hosts=2, + created_at=now, + last_heartbeat=now) + get_all_mock.return_value = [cluster] + + ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) + get_admin_mock.return_value = ctxt + + with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: + format_ = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s" + print_format = format_ % ('Name', + 'Binary', + 'Status', + 'State', + 'Heartbeat', + 'Hosts', + 'Down Hosts', + 'Updated At') + cluster_format = format_ % (cluster.name, cluster.binary, + 'enabled', ':-)', + cluster.last_heartbeat, + cluster.num_hosts, + cluster.num_down_hosts, + None) + expected_out = print_format + '\n' + cluster_format + '\n' + + cluster_cmds = cinder_manage.ClusterCommands() + cluster_cmds.list() + + self.assertEqual(expected_out, fake_out.getvalue()) + get_admin_mock.assert_called_with() + get_all_mock.assert_called_with(ctxt, is_up=None, + get_services=False, + services_summary=True, + read_deleted='no') + + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) + @mock.patch('cinder.context.get_admin_context') + def test_cluster_commands_remove_not_found(self, admin_ctxt_mock, + cluster_get_mock): + cluster_get_mock.side_effect = exception.ClusterNotFound(id=1) + cluster_commands = cinder_manage.ClusterCommands() + exit = cluster_commands.remove(False, 'abinary', 'acluster') + self.assertEqual(2, exit) + cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, + None, name='acluster', + binary='abinary', + get_services=False) + + @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) + @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) + @mock.patch('cinder.context.get_admin_context') + def test_cluster_commands_remove_fail_has_hosts(self, admin_ctxt_mock, + cluster_get_mock, + cluster_destroy_mock, + service_destroy_mock): + cluster = fake_cluster.fake_cluster_ovo(mock.Mock()) + cluster_get_mock.return_value = cluster + cluster_destroy_mock.side_effect = exception.ClusterHasHosts(id=1) + cluster_commands = cinder_manage.ClusterCommands() + exit = cluster_commands.remove(False, 'abinary', 'acluster') + self.assertEqual(2, exit) + cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, + None, name='acluster', + binary='abinary', + get_services=False) + cluster_destroy_mock.assert_called_once_with( + admin_ctxt_mock.return_value.elevated.return_value, cluster.id) + service_destroy_mock.assert_not_called() + + @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) + @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) + @mock.patch('cinder.context.get_admin_context') + def test_cluster_commands_remove_success_no_hosts(self, admin_ctxt_mock, + cluster_get_mock, + cluster_destroy_mock, + service_destroy_mock): + cluster = fake_cluster.fake_cluster_orm() + cluster_get_mock.return_value = cluster + cluster_commands = cinder_manage.ClusterCommands() + exit = cluster_commands.remove(False, 'abinary', 'acluster') + self.assertIsNone(exit) + cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, + None, name='acluster', + binary='abinary', + get_services=False) + cluster_destroy_mock.assert_called_once_with( + admin_ctxt_mock.return_value.elevated.return_value, cluster.id) + service_destroy_mock.assert_not_called() + + @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) + @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) + @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) + @mock.patch('cinder.context.get_admin_context') + def test_cluster_commands_remove_recursive(self, admin_ctxt_mock, + cluster_get_mock, + cluster_destroy_mock, + service_destroy_mock): + cluster = fake_cluster.fake_cluster_orm() + cluster.services = [fake_service.fake_service_orm()] + cluster_get_mock.return_value = cluster + cluster_commands = cinder_manage.ClusterCommands() + exit = cluster_commands.remove(True, 'abinary', 'acluster') + self.assertIsNone(exit) + cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, + None, name='acluster', + binary='abinary', + get_services=True) + cluster_destroy_mock.assert_called_once_with( + admin_ctxt_mock.return_value.elevated.return_value, cluster.id) + service_destroy_mock.assert_called_once_with( + admin_ctxt_mock.return_value.elevated.return_value, + cluster.services[0]['id']) + + @mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster', + auto_specs=True, return_value=1) + @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster', + auto_specs=True, return_value=2) + @mock.patch('cinder.context.get_admin_context') + def test_cluster_commands_rename(self, admin_ctxt_mock, + volume_include_mock, cg_include_mock): + """Test that cluster rename changes volumes and cgs.""" + current_cluster_name = mock.sentinel.old_cluster_name + new_cluster_name = mock.sentinel.new_cluster_name + partial = mock.sentinel.partial + cluster_commands = cinder_manage.ClusterCommands() + exit = cluster_commands.rename(partial, current_cluster_name, + new_cluster_name) + + self.assertIsNone(exit) + volume_include_mock.assert_called_once_with( + admin_ctxt_mock.return_value, new_cluster_name, partial, + cluster_name=current_cluster_name) + cg_include_mock.assert_called_once_with( + admin_ctxt_mock.return_value, new_cluster_name, partial, + cluster_name=current_cluster_name) + + @mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster', + auto_specs=True, return_value=0) + @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster', + auto_specs=True, return_value=0) + @mock.patch('cinder.context.get_admin_context') + def test_cluster_commands_rename_no_changes(self, admin_ctxt_mock, + volume_include_mock, + cg_include_mock): + """Test that we return an error when cluster rename has no effect.""" + cluster_commands = cinder_manage.ClusterCommands() + exit = cluster_commands.rename(False, 'cluster', 'new_cluster') + self.assertEqual(2, exit) @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_argv_lt_2(self, register_cli_opt): @@ -858,8 +1065,7 @@ class TestCinderManageCmd(test.TestCase): self.assertEqual(2, exit) @mock.patch('cinder.db.service_destroy') - @mock.patch('cinder.db.service_get_by_args', - return_value = {'id': '12'}) + @mock.patch('cinder.db.service_get', return_value = {'id': '12'}) def test_remove_service_success(self, mock_get_by_args, mock_service_destroy): service_commands = cinder_manage.ServiceCommands() @@ -871,7 +1077,6 @@ class TestCinderRtstoolCmd(test.TestCase): def setUp(self): super(TestCinderRtstoolCmd, self).setUp() sys.argv = ['cinder-rtstool'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) self.INITIATOR_IQN = 'iqn.2015.12.com.example.openstack.i:UNIT1' self.TARGET_IQN = 'iqn.2015.12.com.example.openstack.i:TARGET1' @@ -1469,7 +1674,6 @@ class TestCinderVolumeUsageAuditCmd(test.TestCase): def setUp(self): super(TestCinderVolumeUsageAuditCmd, self).setUp() sys.argv = ['cinder-volume-usage-audit'] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderVolumeUsageAuditCmd, self).tearDown() diff --git a/cinder/tests/unit/test_context.py b/cinder/tests/unit/test_context.py index 3cd7742d6..7508f23bf 100644 --- a/cinder/tests/unit/test_context.py +++ b/cinder/tests/unit/test_context.py @@ -58,7 +58,7 @@ class ContextTestCase(test.TestCase): def test_request_context_elevated(self): user_context = context.RequestContext( - 'fake_user', 'fake_project', admin=False) + 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) diff --git a/cinder/tests/unit/test_coprhd.py b/cinder/tests/unit/test_coprhd.py index 1ee3df223..5f3d08440 100644 --- a/cinder/tests/unit/test_coprhd.py +++ b/cinder/tests/unit/test_coprhd.py @@ -16,6 +16,7 @@ from mock import Mock from cinder import context +from cinder import exception from cinder.objects import fields from cinder import test from cinder.volume.drivers.coprhd import common as coprhd_common @@ -193,7 +194,9 @@ def get_test_volume_data(volume_type_id): 'project_id': 'project', 'display_name': 'test-vol1', 'display_description': 'test volume', - 'volume_type_id': volume_type_id} + 'volume_type_id': volume_type_id, + 'provider_id': '1', + } return test_volume @@ -261,7 +264,8 @@ def get_test_CG_snap_data(volume_type_id): 'consistencygroup_id': '123456789', 'status': fields.ConsistencyGroupStatus.AVAILABLE, 'snapshots': [], - 'consistencygroup': get_test_CG_data(volume_type_id) + 'consistencygroup': get_test_CG_data(volume_type_id), + 'cgsnapshot_id': '1', } return test_CG_snapshot @@ -351,7 +355,6 @@ class MockedEMCCoprHDDriverCommon(coprhd_common.EMCCoprHDDriverCommon): self.host_obj = Mock() self.host_obj.list_by_tenant.return_value = [] - self.host_obj.search_by_name.return_value = [] self.host_obj.list_all.return_value = [{'id': "host1_id", 'name': "host1"}] self.host_obj.list_initiators.return_value = [ @@ -398,9 +401,9 @@ class EMCCoprHDISCSIDriverTest(test.TestCase): self.volume_type_id = self.create_coprhd_volume_type() - self.stubs.Set(coprhd_iscsi.EMCCoprHDISCSIDriver, - '_get_common_driver', - self._get_mocked_common_driver) + self.mock_object(coprhd_iscsi.EMCCoprHDISCSIDriver, + '_get_common_driver', + self._get_mocked_common_driver) self.driver = coprhd_iscsi.EMCCoprHDISCSIDriver( configuration=self.configuration) @@ -503,8 +506,8 @@ class EMCCoprHDISCSIDriverTest(test.TestCase): cg_data = get_test_CG_data(self.volume_type_id) ctx = context.get_admin_context() self.driver.create_consistencygroup(ctx, cg_data) - model_update, volumes_model_update = \ - self.driver.delete_consistencygroup(ctx, cg_data, []) + model_update, volumes_model_update = ( + self.driver.delete_consistencygroup(ctx, cg_data, [])) self.assertEqual([], volumes_model_update, 'Unexpected return data') def test_create_update_delete_CG(self): @@ -515,14 +518,14 @@ class EMCCoprHDISCSIDriverTest(test.TestCase): volume = get_test_volume_data(self.volume_type_id) self.driver.create_volume(volume) - model_update, ret1, ret2 = \ - self.driver.update_consistencygroup(ctx, cg_data, [volume], []) + model_update, ret1, ret2 = ( + self.driver.update_consistencygroup(ctx, cg_data, [volume], [])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) - model_update, volumes_model_update = \ - self.driver.delete_consistencygroup(ctx, cg_data, [volume]) + model_update, volumes_model_update = ( + self.driver.delete_consistencygroup(ctx, cg_data, [volume])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([{'status': 'deleted', 'id': '1'}], @@ -532,14 +535,14 @@ class EMCCoprHDISCSIDriverTest(test.TestCase): cg_snap_data = get_test_CG_snap_data(self.volume_type_id) ctx = context.get_admin_context() - model_update, snapshots_model_update = \ - self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + model_update, snapshots_model_update = ( + self.driver.create_cgsnapshot(ctx, cg_snap_data, [])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([], snapshots_model_update, 'Unexpected return data') - model_update, snapshots_model_update = \ - self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + model_update, snapshots_model_update = ( + self.driver.delete_cgsnapshot(ctx, cg_snap_data, [])) self.assertEqual({}, model_update, 'Unexpected return data') self.assertEqual([], snapshots_model_update, 'Unexpected return data') @@ -565,9 +568,9 @@ class EMCCoprHDFCDriverTest(test.TestCase): self.volume_type_id = self.create_coprhd_volume_type() - self.stubs.Set(coprhd_fc.EMCCoprHDFCDriver, - '_get_common_driver', - self._get_mocked_common_driver) + self.mock_object(coprhd_fc.EMCCoprHDFCDriver, + '_get_common_driver', + self._get_mocked_common_driver) self.driver = coprhd_fc.EMCCoprHDFCDriver( configuration=self.configuration) @@ -640,6 +643,20 @@ class EMCCoprHDFCDriverTest(test.TestCase): self.driver.delete_volume(src_vol_data) self.driver.delete_volume(volume_data) + def test_create_volume_from_cg_snapshot(self): + ctx = context.get_admin_context() + + volume_data = get_test_volume_data(self.volume_type_id) + cg_snap_data = get_test_CG_snap_data(self.volume_type_id) + + self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + volume_data, cg_snap_data) + + self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + self.driver.delete_volume(volume_data) + def test_extend_volume(self): volume_data = get_test_volume_data(self.volume_type_id) self.driver.create_volume(volume_data) @@ -691,8 +708,8 @@ class EMCCoprHDFCDriverTest(test.TestCase): cg_data = get_test_CG_data(self.volume_type_id) ctx = context.get_admin_context() self.driver.create_consistencygroup(ctx, cg_data) - model_update, volumes_model_update = \ - self.driver.delete_consistencygroup(ctx, cg_data, []) + model_update, volumes_model_update = ( + self.driver.delete_consistencygroup(ctx, cg_data, [])) self.assertEqual([], volumes_model_update, 'Unexpected return data') def test_create_update_delete_CG(self): @@ -703,14 +720,14 @@ class EMCCoprHDFCDriverTest(test.TestCase): volume = get_test_volume_data(self.volume_type_id) self.driver.create_volume(volume) - model_update, ret1, ret2 = \ - self.driver.update_consistencygroup(ctx, cg_data, [volume], []) + model_update, ret1, ret2 = ( + self.driver.update_consistencygroup(ctx, cg_data, [volume], [])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) - model_update, volumes_model_update = \ - self.driver.delete_consistencygroup(ctx, cg_data, [volume]) + model_update, volumes_model_update = ( + self.driver.delete_consistencygroup(ctx, cg_data, [volume])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([{'status': 'deleted', 'id': '1'}], @@ -720,14 +737,14 @@ class EMCCoprHDFCDriverTest(test.TestCase): cg_snap_data = get_test_CG_snap_data(self.volume_type_id) ctx = context.get_admin_context() - model_update, snapshots_model_update = \ - self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + model_update, snapshots_model_update = ( + self.driver.create_cgsnapshot(ctx, cg_snap_data, [])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([], snapshots_model_update, 'Unexpected return data') - model_update, snapshots_model_update = \ - self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + model_update, snapshots_model_update = ( + self.driver.delete_cgsnapshot(ctx, cg_snap_data, [])) self.assertEqual({}, model_update, 'Unexpected return data') self.assertEqual([], snapshots_model_update, 'Unexpected return data') @@ -749,7 +766,7 @@ class EMCCoprHDScaleIODriverTest(test.TestCase): self.configuration.coprhd_tenant = "tenant" self.configuration.coprhd_project = "project" self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_scaleio_rest_gateway_ip = "10.10.10.11" + self.configuration.coprhd_scaleio_rest_gateway_host = "10.10.10.11" self.configuration.coprhd_scaleio_rest_gateway_port = 443 self.configuration.coprhd_scaleio_rest_server_username = ( "scaleio_username") @@ -761,12 +778,12 @@ class EMCCoprHDScaleIODriverTest(test.TestCase): self.volume_type_id = self.create_coprhd_volume_type() - self.stubs.Set(coprhd_scaleio.EMCCoprHDScaleIODriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.stubs.Set(coprhd_scaleio.EMCCoprHDScaleIODriver, - '_get_client_id', - self._get_client_id) + self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver, + '_get_common_driver', + self._get_mocked_common_driver) + self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver, + '_get_client_id', + self._get_client_id) self.driver = coprhd_scaleio.EMCCoprHDScaleIODriver( configuration=self.configuration) @@ -861,6 +878,7 @@ class EMCCoprHDScaleIODriverTest(test.TestCase): 'hostIP': '10.0.0.2', 'iopsLimit': None, 'scaleIO_volname': 'test-vol1', + 'scaleIO_volume_id': '1', 'serverIP': '10.10.10.11', 'serverPassword': 'scaleio_password', 'serverPort': 443, @@ -878,8 +896,8 @@ class EMCCoprHDScaleIODriverTest(test.TestCase): cg_data = get_test_CG_data(self.volume_type_id) ctx = context.get_admin_context() self.driver.create_consistencygroup(ctx, cg_data) - model_update, volumes_model_update = \ - self.driver.delete_consistencygroup(ctx, cg_data, []) + model_update, volumes_model_update = ( + self.driver.delete_consistencygroup(ctx, cg_data, [])) self.assertEqual([], volumes_model_update, 'Unexpected return data') def test_create_update_delete_CG(self): @@ -890,14 +908,14 @@ class EMCCoprHDScaleIODriverTest(test.TestCase): volume = get_test_volume_data(self.volume_type_id) self.driver.create_volume(volume) - model_update, ret1, ret2 = \ - self.driver.update_consistencygroup(ctx, cg_data, [volume], []) + model_update, ret1, ret2 = ( + self.driver.update_consistencygroup(ctx, cg_data, [volume], [])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) - model_update, volumes_model_update = \ - self.driver.delete_consistencygroup(ctx, cg_data, [volume]) + model_update, volumes_model_update = ( + self.driver.delete_consistencygroup(ctx, cg_data, [volume])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([{'status': 'deleted', 'id': '1'}], @@ -907,13 +925,13 @@ class EMCCoprHDScaleIODriverTest(test.TestCase): cg_snap_data = get_test_CG_snap_data(self.volume_type_id) ctx = context.get_admin_context() - model_update, snapshots_model_update = \ - self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + model_update, snapshots_model_update = ( + self.driver.create_cgsnapshot(ctx, cg_snap_data, [])) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([], snapshots_model_update, 'Unexpected return data') - model_update, snapshots_model_update = \ - self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + model_update, snapshots_model_update = ( + self.driver.delete_cgsnapshot(ctx, cg_snap_data, [])) self.assertEqual({}, model_update, 'Unexpected return data') self.assertEqual([], snapshots_model_update, 'Unexpected return data') diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index 7e60bcad1..592fbcb87 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -16,10 +16,12 @@ import datetime +import ddt import enum import mock +from oslo_config import cfg +from oslo_utils import timeutils from oslo_utils import uuidutils -import six from cinder.api import common from cinder import context @@ -33,9 +35,11 @@ from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils +CONF = cfg.CONF THREE = 3 THREE_HUNDREDS = 300 ONE_HUNDREDS = 100 +UTC_NOW = timeutils.utcnow() def _quota_reserve(context, project_id): @@ -67,45 +71,7 @@ def _quota_reserve(context, project_id): ) -class ModelsObjectComparatorMixin(object): - def _dict_from_object(self, obj, ignored_keys): - if ignored_keys is None: - ignored_keys = [] - if isinstance(obj, dict): - items = obj.items() - else: - items = obj.iteritems() - return {k: v for k, v in items - if k not in ignored_keys} - - def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): - obj1 = self._dict_from_object(obj1, ignored_keys) - obj2 = self._dict_from_object(obj2, ignored_keys) - - self.assertEqual( - len(obj1), len(obj2), - "Keys mismatch: %s" % six.text_type( - set(obj1.keys()) ^ set(obj2.keys()))) - for key, value in obj1.items(): - self.assertEqual(value, obj2[key]) - - def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): - obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) - sort_key = lambda d: [d[k] for k in sorted(d)] - conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) - - self.assertListEqual(conv_and_sort(objs1), conv_and_sort(objs2)) - - def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): - self.assertEqual(len(primitives1), len(primitives2)) - for primitive in primitives1: - self.assertIn(primitive, primitives2) - - for primitive in primitives2: - self.assertIn(primitive, primitives1) - - -class BaseTest(test.TestCase, ModelsObjectComparatorMixin): +class BaseTest(test.TestCase, test.ModelsObjectComparatorMixin): def setUp(self): super(BaseTest, self).setUp() self.ctxt = context.get_admin_context() @@ -118,6 +84,7 @@ class DBAPIServiceTestCase(BaseTest): def _get_base_values(self): return { 'host': 'fake_host', + 'cluster_name': None, 'binary': 'fake_binary', 'topic': 'fake_topic', 'report_count': 3, @@ -127,23 +94,35 @@ class DBAPIServiceTestCase(BaseTest): def _create_service(self, values): v = self._get_base_values() v.update(values) - return db.service_create(self.ctxt, v) + service = db.service_create(self.ctxt, v) + # We need to read the contents from the DB if we have set updated_at + # or created_at fields + if 'updated_at' in values or 'created_at' in values: + service = db.service_get(self.ctxt, service.id) + return service def test_service_create(self): - service = self._create_service({}) + # Add a cluster value to the service + values = {'cluster_name': 'cluster'} + service = self._create_service(values) self.assertIsNotNone(service['id']) - for key, value in self._get_base_values().items(): + expected = self._get_base_values() + expected.update(values) + for key, value in expected.items(): self.assertEqual(value, service[key]) def test_service_destroy(self): service1 = self._create_service({}) service2 = self._create_service({'host': 'fake_host2'}) - db.service_destroy(self.ctxt, service1['id']) + self.assertDictEqual( + {'deleted': True, 'deleted_at': mock.ANY}, + db.service_destroy(self.ctxt, service1['id'])) self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, service1['id']) - self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), - service2) + self._assertEqualObjects( + service2, + db.service_get(self.ctxt, service2['id'])) def test_service_update(self): service = self._create_service({}) @@ -168,6 +147,16 @@ class DBAPIServiceTestCase(BaseTest): real_service1 = db.service_get(self.ctxt, service1['id']) self._assertEqualObjects(service1, real_service1) + def test_service_get_by_cluster(self): + service = self._create_service({'cluster_name': 'cluster@backend'}) + # Search with an exact match + real_service = db.service_get(self.ctxt, + cluster_name='cluster@backend') + self._assertEqualObjects(service, real_service) + # Search without the backend + real_service = db.service_get(self.ctxt, cluster_name='cluster') + self._assertEqualObjects(service, real_service) + def test_service_get_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, 100500) @@ -175,36 +164,41 @@ class DBAPIServiceTestCase(BaseTest): def test_service_get_by_host_and_topic(self): service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) - real_service1 = db.service_get_by_host_and_topic(self.ctxt, - host='host1', - topic='topic1') + real_service1 = db.service_get(self.ctxt, host='host1', topic='topic1') self._assertEqualObjects(service1, real_service1) def test_service_get_all(self): + expired = (datetime.datetime.utcnow() + - datetime.timedelta(seconds=CONF.service_down_time + 1)) values = [ - {'host': 'host1', 'binary': 'b1'}, + # Now we are updating updated_at at creation as well so this one + # is up. + {'host': 'host1', 'binary': 'b1', 'created_at': expired}, {'host': 'host1@ceph', 'binary': 'b2'}, {'host': 'host2', 'binary': 'b2'}, - {'disabled': True} + {'disabled': True, 'created_at': expired, 'updated_at': expired}, ] services = [self._create_service(vals) for vals in values] - disabled_services = [services[-1]] + disabled_services = services[-1:] non_disabled_services = services[:-1] + up_services = services[0:3] + down_services = [services[3]] expected = services[:2] expected_bin = services[1:3] compares = [ - (services, db.service_get_all(self.ctxt, {})), (services, db.service_get_all(self.ctxt)), - (expected, db.service_get_all(self.ctxt, {'host': 'host1'})), - (expected_bin, db.service_get_all(self.ctxt, {'binary': 'b2'})), - (disabled_services, db.service_get_all(self.ctxt, - {'disabled': True})), + (expected, db.service_get_all(self.ctxt, host='host1')), + (expected_bin, db.service_get_all(self.ctxt, binary='b2')), + (disabled_services, db.service_get_all(self.ctxt, disabled=True)), (non_disabled_services, db.service_get_all(self.ctxt, - {'disabled': False})), + disabled=False)), + (up_services, db.service_get_all(self.ctxt, is_up=True)), + (down_services, db.service_get_all(self.ctxt, is_up=False)), ] - for comp in compares: - self._assertEqualListsOfObjects(*comp) + for i, comp in enumerate(compares): + self._assertEqualListsOfObjects(*comp, + msg='Error comparing %s' % i) def test_service_get_all_by_topic(self): values = [ @@ -215,7 +209,7 @@ class DBAPIServiceTestCase(BaseTest): ] services = [self._create_service(vals) for vals in values] expected = services[:3] - real = db.service_get_all_by_topic(self.ctxt, 't1') + real = db.service_get_all(self.ctxt, topic='t1') self._assertEqualListsOfObjects(expected, real) def test_service_get_all_by_binary(self): @@ -227,7 +221,7 @@ class DBAPIServiceTestCase(BaseTest): ] services = [self._create_service(vals) for vals in values] expected = services[:3] - real = db.service_get_all_by_binary(self.ctxt, 'b1') + real = db.service_get_all(self.ctxt, binary='b1') self._assertEqualListsOfObjects(expected, real) def test_service_get_by_args(self): @@ -237,60 +231,45 @@ class DBAPIServiceTestCase(BaseTest): ] services = [self._create_service(vals) for vals in values] - service1 = db.service_get_by_args(self.ctxt, 'host1', 'a') + service1 = db.service_get(self.ctxt, host='host1', binary='a') self._assertEqualObjects(services[0], service1) - service2 = db.service_get_by_args(self.ctxt, 'host2', 'b') + service2 = db.service_get(self.ctxt, host='host2', binary='b') self._assertEqualObjects(services[1], service2) + def test_service_get_all_by_cluster(self): + values = [ + {'host': 'host1', 'cluster_name': 'cluster'}, + {'host': 'host2', 'cluster_name': 'cluster'}, + {'host': 'host3', 'cluster_name': 'cluster@backend'}, + {'host': 'host4', 'cluster_name': 'cluster2'}, + ] + services = [self._create_service(vals) for vals in values] + expected = services[:3] + real = db.service_get_all(self.ctxt, cluster_name='cluster') + self._assertEqualListsOfObjects(expected, real) + def test_service_get_by_args_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, - db.service_get_by_args, - self.ctxt, 'non-exists-host', 'a') + db.service_get, + self.ctxt, host='non-exists-host', binary='a') - @mock.patch('cinder.db.sqlalchemy.api.model_query') - def test_service_get_by_args_with_case_insensitive(self, model_query): - class case_insensitive_filter(object): - def __init__(self, records): - self.records = records + @mock.patch('sqlalchemy.orm.query.Query.filter_by') + def test_service_get_by_args_with_case_insensitive(self, filter_by): + CONF.set_default('connection', 'mysql://', 'database') + db.service_get(self.ctxt, host='host', binary='a') - def filter_by(self, **kwargs): - ret = mock.Mock() - ret.all = mock.Mock() - - results = [] - for record in self.records: - for key, value in kwargs.items(): - if record[key].lower() != value.lower(): - break - else: - results.append(record) - - ret.filter_by = case_insensitive_filter(results).filter_by - ret.all.return_value = results - return ret - - values = [ - {'host': 'host', 'binary': 'a'}, - {'host': 'HOST', 'binary': 'a'} - ] - services = [self._create_service(vals) for vals in values] - - query = mock.Mock() - query.filter_by = case_insensitive_filter(services).filter_by - model_query.return_value = query - - service1 = db.service_get_by_args(self.ctxt, 'host', 'a') - self._assertEqualObjects(services[0], service1) - - service2 = db.service_get_by_args(self.ctxt, 'HOST', 'a') - self._assertEqualObjects(services[1], service2) - - self.assertRaises(exception.ServiceNotFound, - db.service_get_by_args, - self.ctxt, 'Host', 'a') + self.assertNotEqual(0, filter_by.call_count) + self.assertEqual(1, filter_by.return_value.filter.call_count) + or_op = filter_by.return_value.filter.call_args[0][0].clauses[0] + self.assertIsInstance(or_op, + sqlalchemy_api.sql.elements.BinaryExpression) + binary_op = or_op.right + self.assertIsInstance(binary_op, sqlalchemy_api.sql.functions.Function) + self.assertEqual('binary', binary_op.name) +@ddt.ddt class DBAPIVolumeTestCase(BaseTest): """Unit tests for cinder.db.api.volume_*.""" @@ -411,9 +390,13 @@ class DBAPIVolumeTestCase(BaseTest): self._assertEqualObjects(volume, db.volume_get(self.ctxt, volume['id'])) - def test_volume_destroy(self): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) + def test_volume_destroy(self, utcnow_mock): volume = db.volume_create(self.ctxt, {}) - db.volume_destroy(self.ctxt, volume['id']) + self.assertDictEqual( + {'status': 'deleted', 'deleted': True, 'deleted_at': UTC_NOW, + 'migration_status': None}, + db.volume_destroy(self.ctxt, volume['id'])) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.ctxt, volume['id']) @@ -1220,6 +1203,79 @@ class DBAPIVolumeTestCase(BaseTest): ['desc'], filters=filters) self._assertEqualListsOfObjects([], volumes) + def _create_volumes_to_test_include_in(self): + """Helper method for test_volume_include_in_* tests.""" + return [ + db.volume_create(self.ctxt, + {'host': 'host1@backend1#pool1', + 'cluster_name': 'cluster1@backend1#pool1'}), + db.volume_create(self.ctxt, + {'host': 'host1@backend2#pool2', + 'cluster_name': 'cluster1@backend2#pool2'}), + db.volume_create(self.ctxt, + {'host': 'host2@backend#poo1', + 'cluster_name': 'cluster2@backend#pool'}), + ] + + @ddt.data('host1@backend1#pool1', 'host1@backend1') + def test_volume_include_in_cluster_by_host(self, host): + """Basic volume include test filtering by host and with full rename.""" + vol = self._create_volumes_to_test_include_in()[0] + + cluster_name = 'my_cluster' + result = db.volume_include_in_cluster(self.ctxt, cluster_name, + partial_rename=False, + host=host) + self.assertEqual(1, result) + db_vol = db.volume_get(self.ctxt, vol.id) + self.assertEqual(cluster_name, db_vol.cluster_name) + + def test_volume_include_in_cluster_by_host_multiple(self): + """Partial cluster rename filtering with host level info.""" + vols = self._create_volumes_to_test_include_in()[0:2] + + host = 'host1' + cluster_name = 'my_cluster' + result = db.volume_include_in_cluster(self.ctxt, cluster_name, + partial_rename=True, + host=host) + self.assertEqual(2, result) + db_vols = [db.volume_get(self.ctxt, vols[0].id), + db.volume_get(self.ctxt, vols[1].id)] + for i in range(2): + self.assertEqual(cluster_name + vols[i].host[len(host):], + db_vols[i].cluster_name) + + @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') + def test_volume_include_in_cluster_by_cluster_name(self, cluster_name): + """Basic volume include test filtering by cluster with full rename.""" + vol = self._create_volumes_to_test_include_in()[0] + + new_cluster_name = 'cluster_new@backend1#pool' + result = db.volume_include_in_cluster(self.ctxt, new_cluster_name, + partial_rename=False, + cluster_name=cluster_name) + self.assertEqual(1, result) + db_vol = db.volume_get(self.ctxt, vol.id) + self.assertEqual(new_cluster_name, db_vol.cluster_name) + + def test_volume_include_in_cluster_by_cluster_multiple(self): + """Partial rename filtering with cluster with host level info.""" + vols = self._create_volumes_to_test_include_in()[0:2] + + cluster_name = 'cluster1' + new_cluster_name = 'my_cluster' + result = db.volume_include_in_cluster(self.ctxt, new_cluster_name, + partial_rename=True, + cluster_name=cluster_name) + self.assertEqual(2, result) + db_vols = [db.volume_get(self.ctxt, vols[0].id), + db.volume_get(self.ctxt, vols[1].id)] + for i in range(2): + self.assertEqual( + new_cluster_name + vols[i].cluster_name[len(cluster_name):], + db_vols[i].cluster_name) + class DBAPISnapshotTestCase(BaseTest): @@ -1453,6 +1509,87 @@ class DBAPISnapshotTestCase(BaseTest): self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1)) +@ddt.ddt +class DBAPIConsistencygroupTestCase(BaseTest): + def _create_cgs_to_test_include_in(self): + """Helper method for test_consistencygroup_include_in_* tests.""" + return [ + db.consistencygroup_create( + self.ctxt, {'host': 'host1@backend1#pool1', + 'cluster_name': 'cluster1@backend1#pool1'}), + db.consistencygroup_create( + self.ctxt, {'host': 'host1@backend2#pool2', + 'cluster_name': 'cluster1@backend2#pool1'}), + db.consistencygroup_create( + self.ctxt, {'host': 'host2@backend#poo1', + 'cluster_name': 'cluster2@backend#pool'}), + ] + + @ddt.data('host1@backend1#pool1', 'host1@backend1') + def test_consistencygroup_include_in_cluster_by_host(self, host): + """Basic CG include test filtering by host and with full rename.""" + cg = self._create_cgs_to_test_include_in()[0] + + cluster_name = 'my_cluster' + result = db.consistencygroup_include_in_cluster(self.ctxt, + cluster_name, + partial_rename=False, + host=host) + self.assertEqual(1, result) + db_cg = db.consistencygroup_get(self.ctxt, cg.id) + self.assertEqual(cluster_name, db_cg.cluster_name) + + def test_consistencygroup_include_in_cluster_by_host_multiple(self): + """Partial cluster rename filtering with host level info.""" + cgs = self._create_cgs_to_test_include_in()[0:2] + + host = 'host1' + cluster_name = 'my_cluster' + result = db.consistencygroup_include_in_cluster(self.ctxt, + cluster_name, + partial_rename=True, + host=host) + self.assertEqual(2, result) + db_cgs = [db.consistencygroup_get(self.ctxt, cgs[0].id), + db.consistencygroup_get(self.ctxt, cgs[1].id)] + for i in range(2): + self.assertEqual(cluster_name + cgs[i].host[len(host):], + db_cgs[i].cluster_name) + + @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') + def test_consistencygroup_include_in_cluster_by_cluster_name(self, + cluster_name): + """Basic CG include test filtering by cluster with full rename.""" + cg = self._create_cgs_to_test_include_in()[0] + + new_cluster_name = 'cluster_new@backend1#pool' + result = db.consistencygroup_include_in_cluster( + self.ctxt, new_cluster_name, partial_rename=False, + cluster_name=cluster_name) + + self.assertEqual(1, result) + db_cg = db.consistencygroup_get(self.ctxt, cg.id) + self.assertEqual(new_cluster_name, db_cg.cluster_name) + + def test_consistencygroup_include_in_cluster_by_cluster_multiple(self): + """Partial rename filtering with cluster with host level info.""" + cgs = self._create_cgs_to_test_include_in()[0:2] + + cluster_name = 'cluster1' + new_cluster_name = 'my_cluster' + result = db.consistencygroup_include_in_cluster( + self.ctxt, new_cluster_name, partial_rename=True, + cluster_name=cluster_name) + + self.assertEqual(2, result) + db_cgs = [db.consistencygroup_get(self.ctxt, cgs[0].id), + db.consistencygroup_get(self.ctxt, cgs[1].id)] + for i in range(2): + self.assertEqual( + new_cluster_name + cgs[i].cluster_name[len(cluster_name):], + db_cgs[i].cluster_name) + + class DBAPICgsnapshotTestCase(BaseTest): """Tests for cinder.db.api.cgsnapshot_*.""" @@ -1821,8 +1958,11 @@ class DBAPIQuotaClassTestCase(BaseTest): qc = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') self._assertEqualObjects(self.sample_qc, qc) - def test_quota_class_destroy(self): - db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource') + @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) + def test_quota_class_destroy(self, utcnow_mock): + self.assertDictEqual( + {'deleted': True, 'deleted_at': UTC_NOW}, + db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource')) self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, self.ctxt, 'test_qc', 'test_resource') @@ -1927,10 +2067,12 @@ class DBAPIQuotaTestCase(BaseTest): 'volumes': {'reserved': 1, 'in_use': 0}}, quota_usage) - def test_quota_destroy(self): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) + def test_quota_destroy(self, utcnow_mock): db.quota_create(self.ctxt, 'project1', 'resource1', 41) - self.assertIsNone(db.quota_destroy(self.ctxt, 'project1', - 'resource1')) + self.assertDictEqual( + {'deleted': True, 'deleted_at': UTC_NOW}, + db.quota_destroy(self.ctxt, 'project1', 'resource1')) self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, self.ctxt, 'project1', 'resource1') @@ -2152,9 +2294,13 @@ class DBAPIBackupTestCase(BaseTest): self._assertEqualObjects(updated_values, updated_backup, self._ignored_keys) - def test_backup_destroy(self): + @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) + def test_backup_destroy(self, utcnow_mock): for backup in self.created: - db.backup_destroy(self.ctxt, backup['id']) + self.assertDictEqual( + {'status': fields.BackupStatus.DELETED, 'deleted': True, + 'deleted_at': UTC_NOW}, + db.backup_destroy(self.ctxt, backup['id'])) self.assertFalse(db.backup_get_all(self.ctxt)) def test_backup_not_found(self): diff --git a/cinder/tests/unit/test_db_worker_api.py b/cinder/tests/unit/test_db_worker_api.py new file mode 100644 index 000000000..be15640e6 --- /dev/null +++ b/cinder/tests/unit/test_db_worker_api.py @@ -0,0 +1,249 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for cinder.db.api.Worker""" + +import time +import uuid + +from oslo_db import exception as db_exception +import six + +from cinder import context +from cinder import db +from cinder import exception +from cinder import test +from cinder.tests.unit import fake_constants as fake + + +class DBAPIWorkerTestCase(test.TestCase, test.ModelsObjectComparatorMixin): + worker_fields = {'resource_type': 'Volume', + 'resource_id': fake.VOLUME_ID, + 'status': 'creating'} + + def _uuid(self): + return six.text_type(uuid.uuid4()) + + def setUp(self): + super(DBAPIWorkerTestCase, self).setUp() + self.ctxt = context.get_admin_context() + + def test_worker_create_and_get(self): + """Test basic creation of a worker record.""" + worker = db.worker_create(self.ctxt, **self.worker_fields) + db_worker = db.worker_get(self.ctxt, id=worker.id) + self._assertEqualObjects(worker, db_worker) + + def test_worker_create_unique_constrains(self): + """Test when we use an already existing resource type and id.""" + db.worker_create(self.ctxt, **self.worker_fields) + self.assertRaises(exception.WorkerExists, db.worker_create, + self.ctxt, + resource_type=self.worker_fields['resource_type'], + resource_id=self.worker_fields['resource_id'], + status='not_' + self.worker_fields['status']) + + def test_worker_create_missing_required_field(self): + """Try creating a worker with a missing required field.""" + for field in self.worker_fields: + params = self.worker_fields.copy() + del params[field] + self.assertRaises(db_exception.DBError, db.worker_create, + self.ctxt, **params) + + def test_worker_create_invalid_field(self): + """Try creating a worker with a non existent db field.""" + self.assertRaises(TypeError, db.worker_create, self.ctxt, + myfield='123', **self.worker_fields) + + def test_worker_get_non_existent(self): + """Check basic non existent worker record get method.""" + db.worker_create(self.ctxt, **self.worker_fields) + self.assertRaises(exception.WorkerNotFound, db.worker_get, + self.ctxt, service_id='1', **self.worker_fields) + + def _create_workers(self, num, read_back=False, **fields): + workers = [] + base_params = self.worker_fields.copy() + base_params.update(fields) + + for i in range(num): + params = base_params.copy() + params['resource_id'] = self._uuid() + workers.append(db.worker_create(self.ctxt, **params)) + + if read_back: + for i in range(len(workers)): + workers[i] = db.worker_get(self.ctxt, id=workers[i].id) + + return workers + + def test_worker_get_all(self): + """Test basic get_all method.""" + self._create_workers(1) + service = db.service_create(self.ctxt, {}) + workers = self._create_workers(3, service_id=service.id) + + db_workers = db.worker_get_all(self.ctxt, service_id=service.id) + self._assertEqualListsOfObjects(workers, db_workers) + + def test_worker_get_all_until(self): + """Test get_all until a specific time.""" + workers = self._create_workers(3, read_back=True) + timestamp = workers[-1].updated_at + time.sleep(0.1) + self._create_workers(3) + + db_workers = db.worker_get_all(self.ctxt, until=timestamp) + self._assertEqualListsOfObjects(workers, db_workers) + + def test_worker_get_all_returns_empty(self): + """Test that get_all returns an empty list when there's no results.""" + self._create_workers(3, deleted=True) + db_workers = db.worker_get_all(self.ctxt) + self.assertListEqual([], db_workers) + + def test_worker_update_not_exists(self): + """Test worker update when the worker doesn't exist.""" + self.assertRaises(exception.WorkerNotFound, db.worker_update, + self.ctxt, 1) + + def test_worker_update(self): + """Test basic worker update.""" + worker = self._create_workers(1)[0] + worker = db.worker_get(self.ctxt, id=worker.id) + res = db.worker_update(self.ctxt, worker.id, service_id=1) + self.assertEqual(1, res) + worker.service_id = 1 + + db_worker = db.worker_get(self.ctxt, id=worker.id) + self._assertEqualObjects(worker, db_worker, ['updated_at']) + + def test_worker_update_update_orm(self): + """Test worker update updating the worker orm object.""" + worker = self._create_workers(1)[0] + res = db.worker_update(self.ctxt, worker.id, orm_worker=worker, + service_id=1) + self.assertEqual(1, res) + + db_worker = db.worker_get(self.ctxt, id=worker.id) + self._assertEqualObjects(worker, db_worker, ['updated_at']) + + def test_worker_destroy(self): + """Test that worker destroy really deletes the DB entry.""" + worker = self._create_workers(1)[0] + res = db.worker_destroy(self.ctxt, id=worker.id) + self.assertEqual(1, res) + + db_workers = db.worker_get_all(self.ctxt, read_deleted='yes') + self.assertListEqual([], db_workers) + + def test_worker_destroy_non_existent(self): + """Test that worker destroy returns 0 when entry doesn't exist.""" + res = db.worker_destroy(self.ctxt, id=1) + self.assertEqual(0, res) + + def test_worker_claim(self): + """Test worker claim of normal DB entry.""" + service_id = 1 + worker = db.worker_create(self.ctxt, resource_type='Volume', + resource_id=fake.VOLUME_ID, + status='deleting') + + res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker) + self.assertEqual(1, res) + + db_worker = db.worker_get(self.ctxt, id=worker.id) + + self._assertEqualObjects(worker, db_worker, ['updated_at']) + self.assertEqual(service_id, db_worker.service_id) + self.assertEqual(worker.service_id, db_worker.service_id) + + def test_worker_claim_fails_status_change(self): + """Test that claim fails if the work entry has changed its status.""" + worker = db.worker_create(self.ctxt, resource_type='Volume', + resource_id=fake.VOLUME_ID, + status='deleting') + worker.status = 'creating' + + res = db.worker_claim_for_cleanup(self.ctxt, 1, worker) + self.assertEqual(0, res) + + db_worker = db.worker_get(self.ctxt, id=worker.id) + self._assertEqualObjects(worker, db_worker, ['status']) + self.assertIsNone(db_worker.service_id) + + def test_worker_claim_fails_service_change(self): + """Test that claim fails on worker service change.""" + failed_service = 1 + working_service = 2 + this_service = 3 + worker = db.worker_create(self.ctxt, resource_type='Volume', + resource_id=fake.VOLUME_ID, + status='deleting', + service_id=working_service) + + worker.service_id = failed_service + res = db.worker_claim_for_cleanup(self.ctxt, this_service, worker) + self.assertEqual(0, res) + db_worker = db.worker_get(self.ctxt, id=worker.id) + self.assertEqual(working_service, db_worker.service_id) + + def test_worker_claim_same_service(self): + """Test worker claim of a DB entry that has our service_id.""" + service_id = 1 + worker = db.worker_create(self.ctxt, resource_type='Volume', + resource_id=fake.VOLUME_ID, + status='deleting', service_id=service_id) + # Read from DB to get updated_at field + worker = db.worker_get(self.ctxt, id=worker.id) + claimed_worker = db.worker_get(self.ctxt, id=worker.id) + + res = db.worker_claim_for_cleanup(self.ctxt, + service_id, + claimed_worker) + self.assertEqual(1, res) + + db_worker = db.worker_get(self.ctxt, id=worker.id) + + self._assertEqualObjects(claimed_worker, db_worker) + self._assertEqualObjects(worker, db_worker, ['updated_at']) + self.assertNotEqual(worker.updated_at, db_worker.updated_at) + + def test_worker_claim_fails_this_service_claimed(self): + """Test claim fails when worker was already claimed by this service.""" + service_id = 1 + worker = db.worker_create(self.ctxt, resource_type='Volume', + resource_id=fake.VOLUME_ID, + status='creating', + service_id=service_id) + + # Read it back to have the updated_at value + worker = db.worker_get(self.ctxt, id=worker.id) + claimed_worker = db.worker_get(self.ctxt, id=worker.id) + + time.sleep(0.1) + # Simulate that this service starts processing this entry + res = db.worker_claim_for_cleanup(self.ctxt, + service_id, + claimed_worker) + self.assertEqual(1, res) + + res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker) + self.assertEqual(0, res) + db_worker = db.worker_get(self.ctxt, id=worker.id) + self._assertEqualObjects(claimed_worker, db_worker) + self._assertEqualObjects(worker, db_worker, ['updated_at']) + self.assertNotEqual(worker.updated_at, db_worker.updated_at) diff --git a/cinder/tests/unit/test_exception.py b/cinder/tests/unit/test_exception.py index 76d75a027..5de24729f 100644 --- a/cinder/tests/unit/test_exception.py +++ b/cinder/tests/unit/test_exception.py @@ -23,6 +23,20 @@ import six import webob.util +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + # NOTE(dprince): disable format errors since we are not passing kwargs + self.flags(fatal_exception_format_errors=False) + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + class CinderExceptionTestCase(test.TestCase): def test_default_error_msg(self): class FakeCinderException(exception.CinderException): diff --git a/cinder/tests/unit/test_falconstor_fss.py b/cinder/tests/unit/test_falconstor_fss.py new file mode 100644 index 000000000..7c04ca1f4 --- /dev/null +++ b/cinder/tests/unit/test_falconstor_fss.py @@ -0,0 +1,895 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +import mock +import time + +from cinder import context +from cinder import exception +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.falconstor import fc +from cinder.volume.drivers.falconstor import iscsi +from cinder.volume.drivers.falconstor import rest_proxy as proxy + + +DRIVER_PATH = "cinder.volume.drivers.falconstor" +BASE_DRIVER = DRIVER_PATH + ".fss_common.FalconstorBaseDriver" +ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver" + +PRIMARY_IP = '10.0.0.1' +SECONDARY_IP = '10.0.0.2' +FAKE_ID = 123 +FAKE = 'fake' +FAKE_HOST = 'fakehost' +API_RESPONSE = {'rc': 0} +ISCSI_VOLUME_BACKEND_NAME = "FSSISCSIDriver" +SESSION_ID = "a76d506c-abcd-1234-efgh-710e1fd90527" +VOLUME_ID = '6068ea6d-f221-4213-bde9-f1b50aecdf36' +ADD_VOLUME_ID = '6068ed7f-f231-4283-bge9-f1b51aecdf36' +GROUP_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' + +PORTAL_RESPONSE = {'rc': 0, 'ipaddress': FAKE} +VOLUME_METADATA = {'metadata': {'FSS-vid': 1}} +EXTENT_NEW_SIZE = 3 +DATA_SERVER_INFO = 0, {'metadata': {'vendor': 'FalconStor', 'version': '1.5'}} + +FSS_SINGLE_TYPE = 'single' +RAWTIMESTAMP = '1324975390' + +VOLUME = {'id': VOLUME_ID, + 'name': "volume-" + VOLUME_ID, + 'display_name': 'fake_volume', + 'display_description': '', + 'size': 1, + 'host': "hostname@backend#%s" % FAKE_ID, + 'volume_type': None, + 'volume_type_id': None, + 'consistencygroup_id': None, + 'volume_metadata': [], + 'metadata': {"Type": "work"}} + +SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" +SRC_VOL = { + "name": "volume-" + SRC_VOL_ID, + "id": SRC_VOL_ID, + "display_name": "fake_src_vol", + "size": 1, + "host": "hostname@backend#%s" % FAKE_ID, + "volume_type": None, + "volume_type_id": None, + "volume_size": 1 +} + +VOLUME_NAME = 'cinder-' + VOLUME['id'] +SRC_VOL_NAME = 'cinder-' + SRC_VOL['id'] +DATA_OUTPUT = VOLUME_NAME, VOLUME_METADATA +SNAPSHOT_METADATA = {'fss-tm-comment': None} + +ADD_VOLUME_IN_CG = { + 'id': ADD_VOLUME_ID, + 'display_name': 'abc123', + 'display_description': '', + 'size': 1, + 'consistencygroup_id': GROUP_ID, + 'status': 'available', + 'host': "hostname@backend#%s" % FAKE_ID} + +REMOVE_VOLUME_IN_CG = { + 'id': 'fe2dbc515810451dab2f8c8a48d15bee', + 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', + 'display_description': '', + 'size': 1, + 'consistencygroup_id': GROUP_ID, + 'status': 'available', + 'host': "hostname@backend#%s" % FAKE_ID} + +CONSISTGROUP = {'id': GROUP_ID, + 'name': 'fake_group', + 'description': 'fake_group_des', + 'status': ''} +CG_SNAPSHOT = { + 'consistencygroup_id': GROUP_ID, + 'id': '3c61b0f9-842e-46bf-b061-5e0031d8083f', + 'name': 'cgsnapshot1', + 'description': 'cgsnapshot1', + 'status': ''} + +SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" +SNAPSHOT = {'name': "snapshot-" + SNAPSHOT_ID, + 'id': SNAPSHOT_ID, + 'volume_id': VOLUME_ID, + 'volume_name': "volume-" + VOLUME_ID, + 'volume_size': 2, + 'display_name': "fake_snapshot", + 'display_description': '', + 'volume': VOLUME, + 'metadata': SNAPSHOT_METADATA, + 'status': ''} + +INITIATOR_IQN = 'iqn.2015-08.org.falconstor:01:fss' +TARGET_IQN = "iqn.2015-06.com.falconstor:freestor.fss-12345abc" +TARGET_PORT = "3260" +ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"] +ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))] + +ISCSI_PORTS = {"iqn": TARGET_IQN, "lun": 1} +ISCSI_CONNECTOR = {'initiator': INITIATOR_IQN, + 'host': "hostname@backend#%s" % FAKE_ID} +ISCSI_INFO = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'discard': True, + 'encrypted': False, + 'qos_specs': None, + 'access_mode': 'rw', + 'volume_id': VOLUME_ID, + 'target_iqn': ISCSI_PORTS['iqn'], + 'target_portal': ISCSI_IPS[0] + ':' + TARGET_PORT, + 'target_lun': 1 + }, +} + +ISCSI_MULTIPATH_INFO = { + 'driver_volume_type': 'iscsi', + 'data''data': { + 'target_discovered': False, + 'discard': True, + 'encrypted': False, + 'qos_specs': None, + 'access_mode': 'rw', + 'volume_id': VOLUME_ID, + 'target_iqns': [ISCSI_PORTS['iqn']], + 'target_portals': [ISCSI_IPS[0] + ':' + TARGET_PORT], + 'target_luns': [1] + }, +} + +FC_INITIATOR_WWPNS = ['2100000d778301c3', '2101000d77a301c3'] +FC_TARGET_WWPNS = ['11000024ff2d2ca4', '11000024ff2d2ca5', + '11000024ff2d2c23', '11000024ff2d2c24'] +FC_WWNS = ['20000024ff2d2ca4', '20000024ff2d2ca5', + '20000024ff2d2c23', '20000024ff2d2c24'] +FC_CONNECTOR = {'ip': '10.10.0.1', + 'initiator': 'iqn.1988-08.org.oracle:568eb4ccbbcc', + 'wwpns': FC_INITIATOR_WWPNS, + 'wwnns': FC_WWNS, + 'host': FAKE_HOST, + 'multipath': False} +FC_INITIATOR_TARGET_MAP = { + FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], + FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]] +} +FC_DEVICE_MAPPING = { + "fabric": { + 'initiator_port_wwn_list': FC_INITIATOR_WWPNS, + 'target_port_wwn_list': FC_WWNS + } +} + +FC_INFO = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'volume_id': VOLUME_ID, + 'target_lun': 1, + 'target_wwn': FC_TARGET_WWPNS, + 'initiator_target_map': FC_INITIATOR_TARGET_MAP + } +} + + +def Fake_sleep(time): + pass + + +class FSSDriverTestCase(test.TestCase): + + def setUp(self): + super(FSSDriverTestCase, self).setUp() + self.mock_config = mock.Mock() + self.mock_config.san_ip = PRIMARY_IP + self.mock_config.san_login = FAKE + self.mock_config.san_password = FAKE + self.mock_config.fss_pool = FAKE_ID + self.mock_config.san_is_local = False + self.mock_config.fss_debug = False + self.mock_config.additional_retry_list = False + self.mock_object(time, 'sleep', Fake_sleep) + + +class TestFSSISCSIDriver(FSSDriverTestCase): + def __init__(self, method): + super(TestFSSISCSIDriver, self).__init__(method) + + def setUp(self): + super(TestFSSISCSIDriver, self).setUp() + self.mock_config.use_chap_auth = False + self.mock_config.use_multipath_for_image_xfer = False + self.mock_config.volume_backend_name = ISCSI_VOLUME_BACKEND_NAME + self.driver = iscsi.FSSISCSIDriver(configuration=self.mock_config) + self.mock_utils = mock.Mock() + self.driver.driver_utils = self.mock_utils + + def tearDown(self): + super(TestFSSISCSIDriver, self).tearDown() + + def test_initialized_should_set_fss_info(self): + self.assertEqual(self.driver.proxy.fss_host, + self.driver.configuration.san_ip) + self.assertEqual(self.driver.proxy.fss_username, + self.driver.configuration.san_login) + self.assertEqual(self.driver.proxy.fss_password, + self.driver.configuration.san_password) + self.assertEqual(self.driver.proxy.fss_defined_pool, + self.driver.configuration.fss_pool) + + def test_check_for_setup_error(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + @mock.patch.object(proxy.RESTProxy, 'create_vdev', + return_value=DATA_OUTPUT) + def test_create_volume(self, mock_create_vdev): + self.driver.create_volume(VOLUME) + mock_create_vdev.assert_called_once_with(VOLUME) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name', + return_value=VOLUME_NAME) + def test_extend_volume(self, mock__get_fss_volume_name): + """Volume extended_volume successfully.""" + self.driver.proxy.extend_vdev = mock.Mock() + result = self.driver.extend_volume(VOLUME, EXTENT_NEW_SIZE) + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + self.driver.proxy.extend_vdev.assert_called_once_with(VOLUME_NAME, + VOLUME["size"], + EXTENT_NEW_SIZE) + self.assertIsNone(result) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') + def test_clone_volume(self, mock__get_fss_volume_name): + mock__get_fss_volume_name.side_effect = [VOLUME_NAME, SRC_VOL_NAME] + self.driver.proxy.clone_volume = mock.Mock( + return_value=VOLUME_METADATA) + self.driver.proxy.extend_vdev = mock.Mock() + + self.driver.create_cloned_volume(VOLUME, SRC_VOL) + self.driver.proxy.clone_volume.assert_called_with(VOLUME_NAME, + SRC_VOL_NAME) + + mock__get_fss_volume_name.assert_any_call(VOLUME) + mock__get_fss_volume_name.assert_any_call(SRC_VOL) + self.assertEqual(2, mock__get_fss_volume_name.call_count) + + self.driver.proxy.extend_vdev(VOLUME_NAME, VOLUME["size"], + SRC_VOL["size"]) + self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME, + VOLUME["size"], + SRC_VOL["size"]) + + @mock.patch.object(proxy.RESTProxy, 'delete_vdev') + def test_delete_volume(self, mock_delete_vdev): + result = self.driver.delete_volume(VOLUME) + mock_delete_vdev.assert_called_once_with(VOLUME) + self.assertIsNone(result) + + @mock.patch.object(proxy.RESTProxy, 'create_snapshot', + return_value=API_RESPONSE) + def test_create_snapshot(self, mock_create_snapshot): + snap_name = SNAPSHOT.get('display_name') + SNAPSHOT_METADATA["fss-tm-comment"] = snap_name + result = self.driver.create_snapshot(SNAPSHOT) + mock_create_snapshot.assert_called_once_with(SNAPSHOT) + self.assertEqual(result, {'metadata': SNAPSHOT_METADATA}) + + @mock.patch.object(proxy.RESTProxy, 'delete_snapshot', + return_value=API_RESPONSE) + def test_delete_snapshot(self, mock_delete_snapshot): + result = self.driver.delete_snapshot(SNAPSHOT) + mock_delete_snapshot.assert_called_once_with(SNAPSHOT) + self.assertIsNone(result) + + @mock.patch.object(proxy.RESTProxy, 'create_volume_from_snapshot', + return_value=(VOLUME_NAME, VOLUME_METADATA)) + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name', + return_value=VOLUME_NAME) + def test_create_volume_from_snapshot(self, mock__get_fss_volume_name, + mock_create_volume_from_snapshot): + vol_size = VOLUME['size'] + snap_size = SNAPSHOT['volume_size'] + self.driver.proxy.extend_vdev = mock.Mock() + + self.assertEqual( + self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT), + dict(metadata=VOLUME_METADATA)) + mock_create_volume_from_snapshot.assert_called_once_with(VOLUME, + SNAPSHOT) + + if vol_size != snap_size: + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + self.driver.proxy.extend_vdev(VOLUME_NAME, snap_size, vol_size) + self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME, + snap_size, + vol_size) + + @mock.patch.object(proxy.RESTProxy, 'create_group') + def test_create_consistency_group(self, mock_create_group): + ctxt = context.get_admin_context() + model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP) + mock_create_group.assert_called_once_with(CONSISTGROUP) + self.assertDictMatch({'status': 'available'}, model_update) + + @mock.patch.object(proxy.RESTProxy, 'destroy_group') + @mock.patch(BASE_DRIVER + ".delete_volume", autospec=True) + def test_delete_consistency_group(self, mock_delete_vdev, + mock_destroy_group): + mock_cgroup = mock.MagicMock() + mock_cgroup.id = FAKE_ID + mock_cgroup['status'] = "deleted" + mock_context = mock.Mock() + mock_volume = mock.MagicMock() + expected_volume_updates = [{ + 'id': mock_volume.id, + 'status': 'deleted' + }] + model_update, volumes = self.driver.delete_consistencygroup( + mock_context, mock_cgroup, [mock_volume]) + + mock_destroy_group.assert_called_with(mock_cgroup) + self.assertEqual(expected_volume_updates, volumes) + self.assertEqual(mock_cgroup['status'], model_update['status']) + mock_delete_vdev.assert_called_with(self.driver, mock_volume) + + @mock.patch.object(proxy.RESTProxy, 'set_group') + def test_update_consistency_group(self, mock_set_group): + ctxt = context.get_admin_context() + add_vols = [ + {'name': 'vol1', 'id': 'vol1', 'display_name': ''}, + {'name': 'vol2', 'id': 'vol2', 'display_name': ''} + ] + remove_vols = [ + {'name': 'vol3', 'id': 'vol3', 'display_name': ''}, + {'name': 'vol4', 'id': 'vol4', 'display_name': ''} + ] + + expected_addvollist = ["cinder-%s" % volume['id'] for volume in + add_vols] + expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols] + + self.driver.update_consistencygroup(ctxt, CONSISTGROUP, + add_volumes=add_vols, + remove_volumes=remove_vols) + mock_set_group.assert_called_with(GROUP_ID, + addvollist=expected_addvollist, + remvollist=expected_remvollist) + + @mock.patch.object(proxy.RESTProxy, 'create_cgsnapshot') + def test_create_cgsnapshot(self, mock_create_cgsnapshot): + mock_cgsnap = CG_SNAPSHOT + mock_context = mock.Mock() + mock_snap = mock.MagicMock() + model_update, snapshots = self.driver.create_cgsnapshot(mock_context, + mock_cgsnap, + [mock_snap]) + mock_create_cgsnapshot.assert_called_once_with(mock_cgsnap) + self.assertEqual({'status': 'available'}, model_update) + expected_snapshot_update = [{ + 'id': mock_snap.id, + 'status': 'available' + }] + self.assertEqual(expected_snapshot_update, snapshots) + + @mock.patch.object(proxy.RESTProxy, 'delete_cgsnapshot') + def test_delete_cgsnapshot(self, mock_delete_cgsnapshot): + mock_cgsnap = mock.Mock() + mock_cgsnap.id = FAKE_ID + mock_cgsnap.status = 'deleted' + mock_context = mock.Mock() + mock_snap = mock.MagicMock() + + model_update, snapshots = self.driver.delete_cgsnapshot(mock_context, + mock_cgsnap, + [mock_snap]) + mock_delete_cgsnapshot.assert_called_once_with(mock_cgsnap) + self.assertEqual({'status': mock_cgsnap.status}, model_update) + + expected_snapshot_update = [dict(id=mock_snap.id, status='deleted')] + self.assertEqual(expected_snapshot_update, snapshots) + + @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi', + return_value=ISCSI_PORTS) + def test_initialize_connection(self, mock_initialize_connection_iscsi): + FSS_HOSTS = [] + FSS_HOSTS.append(PRIMARY_IP) + ret = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR) + mock_initialize_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR, + FSS_HOSTS) + result = deepcopy(ISCSI_INFO) + self.assertDictMatch(result, ret) + + @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi') + @mock.patch(ISCSI_DRIVER + "._check_multipath", autospec=True) + def test_initialize_connection_multipath(self, mock__check_multipath, + mock_initialize_connection_iscsi): + fss_hosts = [] + fss_hosts.append(self.mock_config.san_ip) + mock_initialize_connection_iscsi.return_value = ISCSI_PORTS + mock__check_multipath.retuen_value = True + + self.mock_config.use_multipath_for_image_xfer = True + self.mock_config.san_secondary_ip = SECONDARY_IP + multipath_connector = deepcopy(ISCSI_CONNECTOR) + multipath_connector["multipath"] = True + fss_hosts.append(SECONDARY_IP) + + self.driver.initialize_connection(VOLUME, multipath_connector) + mock_initialize_connection_iscsi.assert_called_once_with( + VOLUME, + multipath_connector, + fss_hosts) + + @mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi') + def test_terminate_connection(self, mock_terminate_connection_iscsi): + self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) + mock_terminate_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR) + + @mock.patch.object(proxy.RESTProxy, '_manage_existing_volume') + @mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid') + def test_manage_existing(self, mock__get_existing_volume_ref_vid, + mock__manage_existing_volume): + ref_vid = 1 + volume_ref = {'source-id': ref_vid} + self.driver.manage_existing(VOLUME, volume_ref) + mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref) + mock__manage_existing_volume.assert_called_once_with( + volume_ref['source-id'], VOLUME) + + @mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid', + return_value=5120) + def test_manage_existing_get_size(self, mock__get_existing_volume_ref_vid): + ref_vid = 1 + volume_ref = {'source-id': ref_vid} + expected_size = 5 + size = self.driver.manage_existing_get_size(VOLUME, volume_ref) + mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref) + self.assertEqual(expected_size, size) + + @mock.patch.object(proxy.RESTProxy, 'unmanage') + def test_unmanage(self, mock_unmanage): + self.driver.unmanage(VOLUME) + mock_unmanage.assert_called_once_with(VOLUME) + + +class TestFSSFCDriver(FSSDriverTestCase): + + def setUp(self): + super(TestFSSFCDriver, self).setUp() + self.driver = fc.FSSFCDriver(configuration=self.mock_config) + self.driver._lookup_service = mock.Mock() + + @mock.patch.object(proxy.RESTProxy, 'fc_initialize_connection') + def test_initialize_connection(self, mock_fc_initialize_connection): + fss_hosts = [] + fss_hosts.append(PRIMARY_IP) + self.driver.initialize_connection(VOLUME, FC_CONNECTOR) + mock_fc_initialize_connection.assert_called_once_with( + VOLUME, + FC_CONNECTOR, + fss_hosts) + + @mock.patch.object(proxy.RESTProxy, '_check_fc_host_devices_empty', + return_value=False) + @mock.patch.object(proxy.RESTProxy, 'fc_terminate_connection', + return_value=FAKE_ID) + def test_terminate_connection(self, mock_fc_terminate_connection, + mock__check_fc_host_devices_empty): + self.driver.terminate_connection(VOLUME, FC_CONNECTOR) + mock_fc_terminate_connection.assert_called_once_with( + VOLUME, + FC_CONNECTOR) + mock__check_fc_host_devices_empty.assert_called_once_with(FAKE_ID) + + +class TestRESTProxy(test.TestCase): + """Test REST Proxy Driver.""" + + def setUp(self): + super(TestRESTProxy, self).setUp() + configuration = mock.Mock(conf.Configuration) + configuration.san_ip = FAKE + configuration.san_login = FAKE + configuration.san_password = FAKE + configuration.fss_pool = FAKE_ID + configuration.fss_debug = False + configuration.additional_retry_list = None + + self.proxy = proxy.RESTProxy(configuration) + self.FSS_MOCK = mock.MagicMock() + self.proxy.FSS = self.FSS_MOCK + self.FSS_MOCK._fss_request.return_value = API_RESPONSE + self.stubs.Set(time, 'sleep', Fake_sleep) + + def tearDown(self): + super(TestRESTProxy, self).tearDown() + + def test_do_setup(self): + self.proxy.do_setup() + self.FSS_MOCK.fss_login.assert_called_once_with() + self.assertNotEqual(self.proxy.session_id, SESSION_ID) + + def test_create_volume(self): + sizemb = self.proxy._convert_size_to_mb(VOLUME['size']) + volume_name = self.proxy._get_fss_volume_name(VOLUME) + + params = dict(storagepoolid=self.proxy.fss_defined_pool, + sizemb=sizemb, + category="virtual", + name=volume_name) + self.proxy.create_vdev(VOLUME) + self.FSS_MOCK.create_vdev.assert_called_once_with(params) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + def test_extend_volume(self, mock__get_fss_vid_from_name): + size = self.proxy._convert_size_to_mb(EXTENT_NEW_SIZE - VOLUME['size']) + params = dict( + action='expand', + sizemb=size + ) + volume_name = self.proxy._get_fss_volume_name(VOLUME) + self.proxy.extend_vdev(volume_name, VOLUME["size"], EXTENT_NEW_SIZE) + + mock__get_fss_vid_from_name.assert_called_once_with(volume_name, + FSS_SINGLE_TYPE) + self.FSS_MOCK.extend_vdev.assert_called_once_with(FAKE_ID, params) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + def test_delete_volume(self, mock__get_fss_vid_from_name): + volume_name = self.proxy._get_fss_volume_name(VOLUME) + self.proxy.delete_vdev(VOLUME) + mock__get_fss_vid_from_name.assert_called_once_with(volume_name, + FSS_SINGLE_TYPE) + self.FSS_MOCK.delete_vdev.assert_called_once_with(FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + def test_clone_volume(self, mock__get_fss_vid_from_name): + self.FSS_MOCK.create_mirror.return_value = API_RESPONSE + self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE + mirror_params = dict( + category='virtual', + selectioncriteria='anydrive', + mirrortarget="virtual", + storagepoolid=self.proxy.fss_defined_pool + ) + ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME) + + self.FSS_MOCK.create_mirror.assert_called_once_with(FAKE_ID, + mirror_params) + self.FSS_MOCK.sync_mirror.assert_called_once_with(FAKE_ID) + self.FSS_MOCK.promote_mirror.assert_called_once_with(FAKE_ID, + VOLUME_NAME) + self.assertNotEqual(ret, VOLUME_METADATA) + + @mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot') + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap', + return_value=VOLUME_NAME) + def test_create_snapshot(self, mock__get_vol_name_from_snap, + mock__get_fss_vid_from_name, + mock_create_vdev_snapshot): + self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = [ + False, False, SNAPSHOT['volume_size']] + + self.proxy.create_snapshot(SNAPSHOT) + self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_once_with( + FAKE_ID) + sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size']) + mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb) + self.FSS_MOCK.create_timemark_policy.assert_called_once_with( + FAKE_ID, + storagepoolid=self.proxy.fss_defined_pool) + self.FSS_MOCK.create_timemark.assert_called_once_with( + FAKE_ID, + SNAPSHOT["display_name"]) + + @mock.patch.object(proxy.RESTProxy, '_get_timestamp', + return_value=RAWTIMESTAMP) + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap', + return_value=VOLUME_NAME) + def test_delete_snapshot(self, mock__get_vol_name_from_snap, + mock__get_fss_vid_from_name, + mock__get_timestamp): + timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) + + self.proxy.delete_snapshot(SNAPSHOT) + mock__get_vol_name_from_snap.assert_called_once_with(SNAPSHOT) + self.FSS_MOCK.delete_timemark.assert_called_once_with(timestamp) + self.FSS_MOCK.get_timemark.assert_any_call(FAKE_ID) + self.assertEqual(2, self.FSS_MOCK.get_timemark.call_count) + + @mock.patch.object(proxy.RESTProxy, '_get_timestamp') + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap') + def test_create_volume_from_snapshot(self, mock__get_vol_name_from_snap, + mock__get_fss_vid_from_name, + mock__get_timestamp): + tm_info = {"rc": 0, + "data": + { + "guid": "497bad5e-e589-bb0a-e0e7-00004eeac169", + "name": "SANDisk-001", + "total": "1", + "timemark": [ + { + "size": 131072, + "comment": "123test456", + "hastimeview": False, + "priority": "low", + "quiescent": "yes", + "timeviewdata": "notkept", + "rawtimestamp": "1324975390", + "timestamp": "2015-10-11 16:43:10" + }] + } + } + mock__get_vol_name_from_snap.return_value = VOLUME_NAME + new_vol_name = self.proxy._get_fss_volume_name(VOLUME) + mock__get_fss_vid_from_name.return_value = FAKE_ID + + self.FSS_MOCK.get_timemark.return_value = tm_info + mock__get_timestamp.return_value = RAWTIMESTAMP + timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) + + self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT) + self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID) + mock__get_timestamp.assert_called_once_with(tm_info, + SNAPSHOT['display_name']) + self.FSS_MOCK.copy_timemark.assert_called_once_with( + timestamp, + storagepoolid=self.proxy.fss_defined_pool, + name=new_vol_name) + + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_create_consistency_group(self, mock__get_group_name_from_id): + + mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] + params = dict(name=CONSISTGROUP['name']) + self.proxy.create_group(CONSISTGROUP) + self.FSS_MOCK.create_group.assert_called_once_with(params) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_delete_consistency_group(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name): + mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] + mock__get_fss_gid_from_name.return_value = FAKE_ID + + self.proxy.destroy_group(CONSISTGROUP) + mock__get_group_name_from_id.assert_called_once_with( + CONSISTGROUP['id']) + mock__get_fss_gid_from_name.assert_called_once_with( + CONSISTGROUP['name']) + self.FSS_MOCK.destroy_group.assert_called_once_with(FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_update_consistency_group(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name, + mock__get_fss_vid_from_name): + join_vid_list = [1, 2] + leave_vid_list = [3, 4] + mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] + mock__get_fss_gid_from_name.return_value = FAKE_ID + mock__get_fss_vid_from_name.side_effect = [join_vid_list, + leave_vid_list] + add_vols = [ + {'name': 'vol1', 'id': 'vol1'}, + {'name': 'vol2', 'id': 'vol2'} + ] + remove_vols = [ + {'name': 'vol3', 'id': 'vol3'}, + {'name': 'vol4', 'id': 'vol4'} + ] + expected_addvollist = ["cinder-%s" % volume['id'] for volume in + add_vols] + expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols] + + self.proxy.set_group(CONSISTGROUP, addvollist=expected_addvollist, + remvollist=expected_remvollist) + + if expected_addvollist: + mock__get_fss_vid_from_name.assert_any_call(expected_addvollist) + + if expected_remvollist: + mock__get_fss_vid_from_name.assert_any_call(expected_remvollist) + self.assertEqual(2, mock__get_fss_vid_from_name.call_count) + + join_params = dict() + leave_params = dict() + + join_params.update( + action='join', + virtualdevices=join_vid_list + ) + leave_params.update( + action='leave', + virtualdevices=leave_vid_list + ) + self.FSS_MOCK.set_group.assert_called_once_with(FAKE_ID, join_params, + leave_params) + + @mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot') + @mock.patch.object(proxy.RESTProxy, 'create_group_timemark') + @mock.patch.object(proxy.RESTProxy, '_get_vdev_id_from_group_id') + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_create_cgsnapshot(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name, + mock__get_vdev_id_from_group_id, + mock_create_group_timemark, + mock_create_vdev_snapshot + ): + vid_list = [1] + + group_name = "cinder-consisgroup-%s" % CG_SNAPSHOT[ + 'consistencygroup_id'] + mock__get_group_name_from_id.return_value = group_name + mock__get_fss_gid_from_name.return_value = FAKE_ID + mock__get_vdev_id_from_group_id.return_value = vid_list + gsnap_name = self.proxy._encode_name(CG_SNAPSHOT['id']) + self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = ( + False, + False, + 1024) + + self.proxy.create_cgsnapshot(CG_SNAPSHOT) + mock__get_group_name_from_id.assert_called_once_with( + CG_SNAPSHOT['consistencygroup_id']) + mock__get_fss_gid_from_name.assert_called_once_with(group_name) + mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID) + + for vid in vid_list: + self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid) + mock_create_vdev_snapshot.assert_called_once_with(vid, 1024) + self.FSS_MOCK.create_timemark_policy.assert_called_once_with( + vid, + storagepoolid=self.proxy.fss_defined_pool) + + mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name) + + @mock.patch.object(proxy.RESTProxy, 'delete_group_timemark') + @mock.patch.object(proxy.RESTProxy, '_get_fss_group_membercount') + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_delete_cgsnapshot(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name, + mock__get_fss_group_membercount, + mock_delete_group_timemark): + tm_info = { + "rc": 0, + "data": + { + "name": "GroupTestABC", + "total": 1, + "timemark": [{ + "size": 65536, + "comment": "cinder-PGGwaaaaaaaar+wYV4AMdgIPw", + "priority": "low", + "quiescent": "yes", + "hastimeview": "false", + "timeviewdata": "notkept", + "rawtimestamp": "1324974940", + "timestamp": "2015-10-15 16:35:40"}] + } + } + final_tm_data = { + "rc": 0, + "data": + {"name": "GroupTestABC", + "total": 1, + "timemark": [] + }} + + mock__get_group_name_from_id.return_value = CG_SNAPSHOT[ + 'consistencygroup_id'] + mock__get_fss_gid_from_name.return_value = FAKE_ID + self.FSS_MOCK.get_group_timemark.side_effect = [tm_info, final_tm_data] + encode_snap_name = self.proxy._encode_name(CG_SNAPSHOT['id']) + self.proxy.delete_cgsnapshot(CG_SNAPSHOT) + mock__get_fss_group_membercount.assert_called_once_with(FAKE_ID) + + self.assertEqual(2, self.FSS_MOCK.get_group_timemark.call_count) + self.FSS_MOCK.get_group_timemark.assert_any_call(FAKE_ID) + rawtimestamp = self.proxy._get_timestamp(tm_info, encode_snap_name) + timestamp = '%s_%s' % (FAKE_ID, rawtimestamp) + mock_delete_group_timemark.assert_called_once_with(timestamp) + self.FSS_MOCK.delete_group_timemark_policy.assert_called_once_with( + FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi') + def test_iscsi_initialize_connection(self, + mock_initialize_connection_iscsi): + fss_hosts = [] + fss_hosts.append(PRIMARY_IP) + self.proxy.initialize_connection_iscsi(VOLUME, ISCSI_CONNECTOR, + fss_hosts) + mock_initialize_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR, + fss_hosts) + + @mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi') + def test_iscsi_terminate_connection(self, mock_terminate_connection_iscsi): + self.FSS_MOCK._get_target_info.return_value = (FAKE_ID, INITIATOR_IQN) + + self.proxy.terminate_connection_iscsi(VOLUME, ISCSI_CONNECTOR) + mock_terminate_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR) + + @mock.patch.object(proxy.RESTProxy, 'rename_vdev') + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') + def test_manage_existing(self, mock__get_fss_volume_name, + mock_rename_vdev): + new_vol_name = 'rename-vol' + mock__get_fss_volume_name.return_value = new_vol_name + + self.proxy._manage_existing_volume(FAKE_ID, VOLUME) + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + mock_rename_vdev.assert_called_once_with(FAKE_ID, new_vol_name) + + @mock.patch.object(proxy.RESTProxy, 'list_volume_info') + def test_manage_existing_get_size(self, mock_list_volume_info): + volume_ref = {'source-id': FAKE_ID} + vdev_info = { + "rc": 0, + "data": { + "name": "cinder-2ab1f70a-6c89-432c-84e3-5fa6c187fb92", + "type": "san", + "category": "virtual", + "sizemb": 1020 + }} + + mock_list_volume_info.return_value = vdev_info + self.proxy._get_existing_volume_ref_vid(volume_ref) + mock_list_volume_info.assert_called_once_with(FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, 'rename_vdev') + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') + def test_unmanage(self, mock__get_fss_volume_name, + mock__get_fss_vid_from_name, + mock_rename_vdev): + + mock__get_fss_volume_name.return_value = VOLUME_NAME + mock__get_fss_vid_from_name.return_value = FAKE_ID + unmanaged_vol_name = VOLUME_NAME + "-unmanaged" + + self.proxy.unmanage(VOLUME) + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + mock__get_fss_vid_from_name.assert_called_once_with(VOLUME_NAME, + FSS_SINGLE_TYPE) + mock_rename_vdev.assert_called_once_with(FAKE_ID, unmanaged_vol_name) diff --git a/cinder/tests/unit/test_hitachi_hnas_backend.py b/cinder/tests/unit/test_hitachi_hnas_backend.py deleted file mode 100644 index 780245bc2..000000000 --- a/cinder/tests/unit/test_hitachi_hnas_backend.py +++ /dev/null @@ -1,679 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import time - -import mock -from oslo_concurrency import processutils as putils -from oslo_config import cfg - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume.drivers.hitachi import hnas_backend -from cinder.volume.drivers.hitachi import hnas_nfs as nfs - -CONF = cfg.CONF - -HNAS_RESULT1 = "\n\ -FS ID FS Label FS Permanent ID EVS ID EVS Label\n\ ------ ----------- ------------------ ------ ---------\n\ - 1026 gold 0xaadee0e035cfc0b7 1 EVSTest1\n\ - 1025 fs01-husvm 0xaada5dff78668800 1 EVSTest1\n\ - 1027 large-files 0xaadee0ef012a0d54 1 EVSTest1\n\ - 1028 platinun 0xaadee1ea49d1a32c 1 EVSTest1\n\ - 1029 test_hdp 0xaadee09634acfcac 1 EVSTest1\n\ - 1030 cinder1 0xaadfcf742fba644e 1 EVSTest1\n\ - 1031 cinder2 0xaadfcf7e0769a6bc 1 EVSTest1\n\ - 1024 fs02-husvm 0xaac8715e2e9406cd 2 EVSTest2\n\ -\n" - -HNAS_RESULT2 = "cluster MAC: 83-68-96-AA-DA-5D" - -HNAS_RESULT3 = "\n\ -Model: HNAS 4040 \n\ -Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\ -Hardware: NAS Platform (M2SEKW1339109) \n\ -board MMB1 \n\ -mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\ -board MFB1 \n\ -mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \ - RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \ - WD v00E2 DI v001A FC v0002 \n\ -Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\ -board MCP \n\ -Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\ -\n" - -HNAS_RESULT4 = "\n\ -EVS Type Label IP Address Mask Port \n\ ----------- --------------- ------------------ --------------- ------\n\ -admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\ -admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\ -evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ -evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ -evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\ -\n" - -HNAS_RESULT5 = "\n\ - ID Label EVS Size Used Snapshots Deduped\ - Avail Thin ThinSize ThinAvail \ - FS Type \n\ ----- ----------- --- ------- ------------- --------- -------\ -- ------------- ---- -------- --------- ---------------------\ -------------- \n\ -1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA \ - 228 GB (91%) No 32 KB,\ - WFS-2,128 DSBs\n\ -1026 gold 1 19.9 GB 2.30 GB (12% NA 0 B (0%)\ - 17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\ - dedupe enabled\n\ -1027 large-files 1 19.8 GB 2.43 GB (12%) 0 B (0%) NA \ - 17.3 GB (88%) No 32 KB,\ - WFS-2,128 DSBs\n\ -1028 platinun 1 19.9 GB 2.30 GB (12%) NA 0 B (0%)\ - 17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\ - dedupe enabled\n\ -1029 silver 1 19.9 GB 3.19 GB (16%) 0 B (0%) NA \ - 6.7 GB (84%) No 4 KB,\ - WFS-2,128 DSBs\n\ -1030 cinder1 1 40.8 GB 2.24 GB (5%) 0 B (0%) NA \ - 38.5 GB (95%) No 4 KB,\ - WFS-2,128 DSBs\n\ -1031 cinder2 1 39.8 GB 2.23 GB (6%) 0 B (0%) NA \ - 37.6 GB (94%) No 4 KB,\ - WFS-2,128 DSBs\n\ -1024 fs02-husvm 2 49.8 GB 3.54 GB (7%) 0 B (0%) NA \ - 46.2 GB (93%) No 32 KB,\ - WFS-2,128 DSBs\n\ -1032 test 2 3.97 GB 2.12 GB (53%) 0 B (0%) NA \ - 1.85 GB (47%) No 4 KB,\ - WFS-2,128 DSBs\n\ -1058 huge_FS 7 1.50 TB Not determined\n\ -1053 fs-unmounted 4 108 GB Not mounted \ - NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\ - WFS-2,128 DSBs,dedupe enabled\n\ -\n" - -HNAS_RESULT6 = "\n\ -ID Label EVS Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- --- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -HNAS_RESULT7 = "\n\ -Export configuration: \n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: test_hdp \n\ -File system size: 250 GB \n\ -File system free space: 228 GB \n\ -File system state: \n\ -formatted = Yes \n\ -mounted = Yes \n\ -failed = No \n\ -thin provisioned = No \n\ -Access snapshots: Yes \n\ -Display snapshots: Yes \n\ -Read Caching: Disabled \n\ -Disaster recovery setting: \n\ -Recovered = No \n\ -Transfer setting = Use file system default \n\ -\n" - -HNAS_RESULT8 = "Logical unit creation started at 2014-12-24 00:38:30+00:00." -HNAS_RESULT9 = "Logical unit deleted successfully." -HNAS_RESULT10 = "" -HNAS_RESULT11 = "Logical unit expansion started at 2014-12-24 01:25:03+00:00." - -HNAS_RESULT12 = "\n\ -Alias : test_iqn \n\ -Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\ -Comment : \n\ -Secret : test_secret \n\ -Authentication : Enabled \n\ -Logical units : No logical units. \n\ -\n" - -HNAS_RESULT13 = "Logical unit added successfully." -HNAS_RESULT14 = "Logical unit removed successfully." -HNAS_RESULT15 = "Target created successfully." -HNAS_RESULT16 = "" - -HNAS_RESULT17 = "\n\ -EVS Type Label IP Address Mask Port \n\ ----------- --------------- ------------------ --------------- ------\n\ -evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ -evs 2 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ -\n" - -HNAS_RESULT18 = "Version: 11.1.3225.01\n\ -Directory: /u/u60/_Eng_Axalon_SMU/OfficialBuilds/fish/angel/3225.01/main/bin/\ -x86_64_linux-bart_libc-2.7_release\n\ -Date: Feb 22 2013, 04:10:09\n\ -\n" - -HNAS_RESULT19 = " ID Label Size Used Snapshots \ -Deduped Avail Thin ThinSize ThinAvail FS Type\n\ ----- ------------- ------- ------------- --------- ------- -------------\ ----- -------- --------- -------------------\n\ -1025 fs01-husvm 250 GB 47.1 GB (19%) 0 B (0%) NA 203 GB (81%)\ - No 4 KB,WFS-2,128 DSBs\n\ -1047 manage_test02 19.9 GB 9.29 GB (47%) 0 B (0%) NA 10.6 GB (53%)\ - No 4 KB,WFS-2,128 DSBs\n\ -1058 huge_FS 7 1.50 TB Not determined\n\ -1053 fs-unmounted 4 108 GB Not mounted \ - NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\ - WFS-2,128 DSBs,dedupe enabled\n\ -\n" - -HNAS_RESULT20 = "\n\ -Alias : test_iqn \n\ -Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\ -Comment : \n\ -Secret : \n\ -Authentication : Enabled \n\ -Logical units : No logical units. \n\ -\n" - -HNAS_RESULT20 = "Target does not exist." - -HNAS_RESULT21 = "Target created successfully." - -HNAS_RESULT22 = "Failed to establish SSC connection" - -HNAS_RESULT23 = "\n\ -Alias : cinder-Gold\n\ -Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-gold\n\ -Comment :\n\ -Secret : None\n\ -Authentication : Enabled\n\ -Logical units : No logical units.\n\ -Access configuration :\n\ -\n\ -Alias : cinder-GoldIsh\n\ -Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\ -Comment :\n\ -Secret : None\n\ -Authentication : Enabled\n\ -Logical units : No logical units.\n\ -Access configuration :\n\ -\n\ -Alias : cinder-default\n\ -Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\ -Comment :\n\ -Secret : pxr6U37LZZJBoMc\n\ -Authentication : Disabled\n\ -Logical units : Logical units :\n\ -\n\ - LUN Logical Unit\n\ - ---- --------------------------------\n\ - 0 volume-8ddd1a54-9daf-4fa5-842...\n\ - 1 volume-99da7ae7-1e7f-4d57-8bf...\n\ -\n\ -Access configuration :\n\ -" -HNAS_RESULT24 = "Logical unit modified successfully." - -HNAS_RESULT25 = "Current selected file system: HNAS-iSCSI-TEST, number(32)." - -HNAS_RESULT26 = "Name : volume-test \n\ -Comment: \n\ -Path : /.cinder/volume-test.iscsi \n\ -Size : 2 GB \n\ -File System : fs1 \n\ -File System Mounted : YES \n\ -Logical Unit Mounted: No" - -HNAS_RESULT27 = "Connection reset" - - -HNAS_CMDS = { - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsfs', 'list'): - ["%s" % HNAS_RESULT1, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'cluster-getmac',): - ["%s" % HNAS_RESULT2, ""], - ('ssh', '-version',): ["%s" % HNAS_RESULT18, ""], - ('ssh', '-u', 'supervisor', '-p', 'supervisor', '0.0.0.0', 'ver',): - ["%s" % HNAS_RESULT3, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'ver',): - ["%s" % HNAS_RESULT3, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-l'): - ["%s" % HNAS_RESULT4, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a'): - ["%s" % HNAS_RESULT5, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-f', 'test_hdp'): - ["%s" % HNAS_RESULT6, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'for-each-evs', '-q', - 'nfs-export', 'list'): - ["%s" % HNAS_RESULT7, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-lu', 'add', '-e', 'test_name', - 'test_hdp', '/.cinder/test_name.iscsi', - '1M'): - ["%s" % HNAS_RESULT8, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-lu', 'del', '-d', '-f', - 'test_lun'): - ["%s" % HNAS_RESULT9, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'file-clone-create', '-f', 'fs01-husvm', - '/.cinder/test_lu.iscsi', 'cloned_lu'): - ["%s" % HNAS_RESULT10, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-lu', 'expand', 'expanded_lu', - '1M'): - ["%s" % HNAS_RESULT11, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-target', 'list', 'test_iqn'): - ["%s" % HNAS_RESULT12, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-target', 'addlu', 'test_iqn', - 'test_lun', '0'): - ["%s" % HNAS_RESULT13, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-target', 'dellu', 'test_iqn', - 0): - ["%s" % HNAS_RESULT14, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-target', 'add', 'myTarget', - 'secret'): - ["%s" % HNAS_RESULT15, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-target', 'mod', '-s', - 'test_secret', '-a', 'enable', 'test_iqn'): ["%s" % HNAS_RESULT15, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-lu', 'clone', '-e', 'test_lu', - 'test_clone', - '/.cinder/test_clone.iscsi'): - ["%s" % HNAS_RESULT16, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-e', '1'): - ["%s" % HNAS_RESULT17, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'console-context', '--evs', '1', 'iscsi-target', 'list'): - ["%s" % HNAS_RESULT23, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', - '1', 'iscsi-target', 'addlu', 'cinder-default', - 'volume-8ddd1a54-0000-0000-0000', '2'): - ["%s" % HNAS_RESULT13, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', - '1', 'selectfs', 'fs01-husvm'): - ["%s" % HNAS_RESULT25, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', - '1', 'iscsi-lu', 'list', 'test_lun'): - ["%s" % HNAS_RESULT26, ""], - ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', - '1', 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test'): - ["%s" % HNAS_RESULT24, ""] -} - -DRV_CONF = {'ssh_enabled': 'True', - 'mgmt_ip0': '0.0.0.0', - 'cluster_admin_ip0': None, - 'ssh_port': '22', - 'ssh_private_key': 'test_key', - 'username': 'supervisor', - 'password': 'supervisor'} - -UTILS_EXEC_OUT = ["output: test_cmd", ""] - - -def m_run_cmd(*args, **kargs): - return HNAS_CMDS.get(args) - - -class HDSHNASBendTest(test.TestCase): - - def __init__(self, *args, **kwargs): - super(HDSHNASBendTest, self).__init__(*args, **kwargs) - - @mock.patch.object(nfs, 'factory_bend') - def setUp(self, m_factory_bend): - super(HDSHNASBendTest, self).setUp() - self.hnas_bend = hnas_backend.HnasBackend(DRV_CONF) - - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('paramiko.RSAKey.from_private_key_file') - @mock.patch('paramiko.SSHClient') - @mock.patch.object(putils, 'ssh_execute', - return_value=(HNAS_RESULT5, '')) - @mock.patch.object(utils, 'execute') - @mock.patch.object(time, 'sleep') - def test_run_cmd(self, m_sleep, m_utl, m_ssh, m_ssh_cli, m_pvt_key, - m_file, m_open): - self.flags(ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts', - state_path='/var/lib/cinder') - - # Test main flow - self.hnas_bend.drv_configs['ssh_enabled'] = 'True' - out, err = self.hnas_bend.run_cmd('ssh', '0.0.0.0', - 'supervisor', 'supervisor', - 'df', '-a') - self.assertIn('fs01-husvm', out) - self.assertIn('WFS-2,128 DSBs', out) - - # Test exception throwing when not using SSH - m_utl.side_effect = putils.ProcessExecutionError(stdout='', - stderr=HNAS_RESULT22, - exit_code=255) - self.hnas_bend.drv_configs['ssh_enabled'] = 'False' - self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd, - 'ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'df', '-a') - - m_utl.side_effect = putils.ProcessExecutionError(stdout='', - stderr=HNAS_RESULT27, - exit_code=255) - self.hnas_bend.drv_configs['ssh_enabled'] = 'False' - self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd, - 'ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'df', '-a') - - # Test exception throwing when using SSH - m_ssh.side_effect = putils.ProcessExecutionError(stdout='', - stderr=HNAS_RESULT22, - exit_code=255) - self.hnas_bend.drv_configs['ssh_enabled'] = 'True' - self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd, - 'ssh', '0.0.0.0', 'supervisor', 'supervisor', - 'df', '-a') - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - @mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT) - def test_get_version(self, m_cmd, m_exec): - out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor", - "supervisor") - self.assertIn('11.2.3319.14', out) - self.assertIn('83-68-96-AA-DA-5D', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_get_version_ssh_cluster(self, m_cmd): - self.hnas_bend.drv_configs['ssh_enabled'] = 'True' - self.hnas_bend.drv_configs['cluster_admin_ip0'] = '1.1.1.1' - out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor", - "supervisor") - self.assertIn('11.2.3319.14', out) - self.assertIn('83-68-96-AA-DA-5D', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - @mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT) - def test_get_version_ssh_disable(self, m_cmd, m_exec): - self.hnas_bend.drv_configs['ssh_enabled'] = 'False' - out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor", - "supervisor") - self.assertIn('11.2.3319.14', out) - self.assertIn('83-68-96-AA-DA-5D', out) - self.assertIn('Utility_version', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_get_iscsi_info(self, m_execute): - out = self.hnas_bend.get_iscsi_info("ssh", "0.0.0.0", "supervisor", - "supervisor") - - self.assertIn('172.24.44.20', out) - self.assertIn('172.24.44.21', out) - self.assertIn('10.0.0.20', out) - self.assertEqual(4, len(out.split('\n'))) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') - def test_get_hdp_info(self, m_run_cmd): - # tests when there is two or more evs - m_run_cmd.return_value = (HNAS_RESULT5, "") - out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor", - "supervisor") - - self.assertEqual(10, len(out.split('\n'))) - self.assertIn('gold', out) - self.assertIn('silver', out) - line1 = out.split('\n')[0] - self.assertEqual(12, len(line1.split())) - - # test when there is only one evs - m_run_cmd.return_value = (HNAS_RESULT19, "") - out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor", - "supervisor") - self.assertEqual(3, len(out.split('\n'))) - self.assertIn('fs01-husvm', out) - self.assertIn('manage_test02', out) - line1 = out.split('\n')[0] - self.assertEqual(12, len(line1.split())) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_get_nfs_info(self, m_run_cmd): - out = self.hnas_bend.get_nfs_info("ssh", "0.0.0.0", "supervisor", - "supervisor") - - self.assertEqual(2, len(out.split('\n'))) - self.assertIn('/export01-husvm', out) - self.assertIn('172.24.44.20', out) - self.assertIn('10.0.0.20', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_create_lu(self, m_cmd): - out = self.hnas_bend.create_lu("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_hdp", "1", - "test_name") - - self.assertIn('successfully created', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_delete_lu(self, m_cmd): - out = self.hnas_bend.delete_lu("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_hdp", "test_lun") - - self.assertIn('deleted successfully', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_create_dup(self, m_cmd): - - out = self.hnas_bend.create_dup("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_lu", "test_hdp", - "1", "test_clone") - - self.assertIn('successfully created', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_file_clone(self, m_cmd): - out = self.hnas_bend.file_clone("ssh", "0.0.0.0", "supervisor", - "supervisor", "fs01-husvm", - "/.cinder/test_lu.iscsi", "cloned_lu") - - self.assertIn('LUN cloned_lu HDP', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_extend_vol(self, m_cmd): - out = self.hnas_bend.extend_vol("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_hdp", "test_lun", - "1", "expanded_lu") - - self.assertIn('successfully extended', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_add_iscsi_conn(self, m_cmd): - out = self.hnas_bend.add_iscsi_conn("ssh", "0.0.0.0", "supervisor", - "supervisor", - "volume-8ddd1a54-0000-0000-0000", - "test_hdp", "test_port", - "cinder-default", "test_init") - - self.assertIn('successfully paired', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_del_iscsi_conn(self, m_cmd): - out = self.hnas_bend.del_iscsi_conn("ssh", "0.0.0.0", "supervisor", - "supervisor", "1", "test_iqn", 0) - - self.assertIn('already deleted', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=0) - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') - def test_get_targetiqn(self, m_cmd, m_get_evs): - - m_cmd.side_effect = [[HNAS_RESULT12, '']] - out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_iqn", - "test_hdp", "test_secret") - - self.assertEqual('test_iqn', out) - - m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']] - out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_iqn2", - "test_hdp", "test_secret") - - self.assertEqual('test_iqn2', out) - - m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']] - out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_iqn3", - "test_hdp", "") - - self.assertEqual('test_iqn3', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - side_effect=m_run_cmd) - def test_set_targetsecret(self, m_execute): - self.hnas_bend.set_targetsecret("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_iqn", - "test_hdp", "test_secret") - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') - def test_get_targetsecret(self, m_run_cmd): - # test when target has secret - m_run_cmd.return_value = (HNAS_RESULT12, "") - out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_iqn", - "test_hdp") - - self.assertEqual('test_secret', out) - - # test when target don't have secret - m_run_cmd.return_value = (HNAS_RESULT20, "") - out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor", - "supervisor", "test_iqn", - "test_hdp") - self.assertEqual('', out) - - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') - def test_get_targets(self, m_run_cmd): - # Test normal behaviour - m_run_cmd.return_value = (HNAS_RESULT23, "") - tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor", - "supervisor", 1) - self.assertEqual(3, len(tgt_list)) - self.assertEqual(2, len(tgt_list[2]['luns'])) - - # Test calling with parameter - tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor", - "supervisor", 1, - 'cinder-default') - self.assertEqual(1, len(tgt_list)) - self.assertEqual(2, len(tgt_list[0]['luns'])) - - # Test error in BE command - m_run_cmd.side_effect = putils.ProcessExecutionError - tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor", - "supervisor", 1) - self.assertEqual(0, len(tgt_list)) - - @mock.patch.object(hnas_backend.HnasBackend, - 'run_cmd', side_effect=m_run_cmd) - def test_check_targets(self, m_run_cmd): - result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0", - "supervisor", - "supervisor", "test_hdp", - "cinder-default") - self.assertTrue(result) - self.assertEqual('cinder-default', tgt['alias']) - - result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0", - "supervisor", - "supervisor", "test_hdp", - "cinder-no-target") - self.assertFalse(result) - self.assertIsNone(tgt) - - @mock.patch.object(hnas_backend.HnasBackend, - 'run_cmd', side_effect=m_run_cmd) - def test_check_lu(self, m_run_cmd): - ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor", - "supervisor", - "volume-8ddd1a54-9daf-4fa5-842", - "test_hdp") - result, lunid, tgt = ret - self.assertTrue(result) - self.assertEqual('0', lunid) - - ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor", - "supervisor", - "volume-8ddd1a54-0000-0000-000", - "test_hdp") - result, lunid, tgt = ret - self.assertFalse(result) - - @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1) - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - return_value = (HNAS_RESULT26, "")) - def test_get_existing_lu_info(self, m_run_cmd, m_get_evs): - - out = self.hnas_bend.get_existing_lu_info("ssh", "0.0.0.0", - "supervisor", - "supervisor", "fs01-husvm", - "test_lun") - - m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', - 'supervisor', 'fs01-husvm') - m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', - 'supervisor', 'console-context', - '--evs', 1, 'iscsi-lu', 'list', - 'test_lun') - - self.assertEqual(HNAS_RESULT26, out) - - @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1) - @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', - return_value=(HNAS_RESULT24, "")) - def test_rename_existing_lu(self, m_run_cmd, m_get_evs): - - out = self.hnas_bend.rename_existing_lu("ssh", "0.0.0.0", - "supervisor", - "supervisor", "fs01-husvm", - "vol_test", - "new_vol_test") - - m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', - 'supervisor', 'fs01-husvm') - m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', - 'supervisor', 'console-context', - '--evs', 1, 'iscsi-lu', 'mod', - '-n', 'vol_test', 'new_vol_test') - - self.assertEqual(HNAS_RESULT24, out) diff --git a/cinder/tests/unit/test_hitachi_hnas_iscsi.py b/cinder/tests/unit/test_hitachi_hnas_iscsi.py deleted file mode 100644 index 9db1bec5c..000000000 --- a/cinder/tests/unit/test_hitachi_hnas_iscsi.py +++ /dev/null @@ -1,576 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Unified Storage (HUS-HNAS) platform. -""" - -import os -import tempfile -import time - -import mock -from oslo_concurrency import processutils as putils -import six - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hnas_iscsi as iscsi -from cinder.volume import volume_types - -HNASCONF = """ - - ssc - True - 172.17.44.15 - supervisor - supervisor - - default - 172.17.39.132 - fs2 - - - silver - 172.17.39.133 - fs2 - - -""" - -HNAS_WRONG_CONF1 = """ - - ssc - 172.17.44.15 - supervisor - supervisor - default - 172.17.39.132:/cinder - - -""" - -HNAS_WRONG_CONF2 = """ - - ssc - 172.17.44.15 - supervisor - supervisor - - default - - - silver - - -""" - -# The following information is passed on to tests, when creating a volume -_VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128, - 'volume_type': 'silver', 'volume_type_id': '1', - 'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-4182' - '-afb8-1755025c35b8', 'id': 'abcdefg', - 'host': 'host1@hnas-iscsi-backend#silver'} - - -class SimulatedHnasBackend(object): - """Simulation Back end. Talks to HNAS.""" - - # these attributes are shared across object instances - start_lun = 0 - init_index = 0 - target_index = 0 - hlun = 0 - - def __init__(self): - self.type = 'HNAS' - self.out = '' - self.volumes = [] - # iSCSI connections - self.connections = [] - - def rename_existing_lu(self, cmd, ip0, user, pw, fslabel, - vol_name, vol_ref_name): - return 'Logical unit modified successfully.' - - def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun): - out = "Name : volume-test \n\ - Comment: \n\ - Path : /.cinder/volume-test.iscsi \n\ - Size : 20 GB \n\ - File System : manage_iscsi_test \n\ - File System Mounted : Yes \n\ - Logical Unit Mounted: Yes" - return out - - def deleteVolume(self, name): - volume = self.getVolume(name) - if volume: - self.volumes.remove(volume) - return True - else: - return False - - def deleteVolumebyProvider(self, provider): - volume = self.getVolumebyProvider(provider) - if volume: - self.volumes.remove(volume) - return True - else: - return False - - def getVolumes(self): - return self.volumes - - def getVolume(self, name): - if self.volumes: - for volume in self.volumes: - if str(volume['name']) == name: - return volume - return None - - def getVolumebyProvider(self, provider): - if self.volumes: - for volume in self.volumes: - if str(volume['provider_location']) == provider: - return volume - return None - - def createVolume(self, name, provider, sizeMiB, comment): - new_vol = {'additionalStates': [], - 'adminSpace': {'freeMiB': 0, - 'rawReservedMiB': 384, - 'reservedMiB': 128, - 'usedMiB': 128}, - 'baseId': 115, - 'copyType': 1, - 'creationTime8601': '2012-10-22T16:37:57-07:00', - 'creationTimeSec': 1350949077, - 'failedStates': [], - 'id': 115, - 'provider_location': provider, - 'name': name, - 'comment': comment, - 'provisioningType': 1, - 'readOnly': False, - 'sizeMiB': sizeMiB, - 'state': 1, - 'userSpace': {'freeMiB': 0, - 'rawReservedMiB': 41984, - 'reservedMiB': 31488, - 'usedMiB': 31488}, - 'usrSpcAllocLimitPct': 0, - 'usrSpcAllocWarningPct': 0, - 'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243', - 'wwn': '50002AC00073383D'} - self.volumes.append(new_vol) - - def create_lu(self, cmd, ip0, user, pw, hdp, size, name): - vol_id = name - _out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" % - (self.start_lun, size)) - self.createVolume(name, vol_id, size, "create-lu") - self.start_lun += 1 - return _out - - def delete_lu(self, cmd, ip0, user, pw, hdp, lun): - _out = "" - id = "myID" - - self.deleteVolumebyProvider(id + '.' + str(lun)) - return _out - - def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name): - _out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" % - (self.start_lun, size)) - - id = name - self.createVolume(name, id + '.' + str(self.start_lun), size, - "create-dup") - self.start_lun += 1 - return _out - - def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp, - port, iqn, initiator): - ctl = "" - conn = (self.hlun, lun, initiator, self.init_index, iqn, - self.target_index, ctl, port) - _out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \ - and Target: %s @ index %d is successfully paired @ CTL: %s, \ - Port: %s" % conn) - self.init_index += 1 - self.target_index += 1 - self.hlun += 1 - self.connections.append(conn) - return _out - - def del_iscsi_conn(self, cmd, ip0, user, pw, port, iqn, initiator): - - self.connections.pop() - - _out = ("H-LUN: successfully deleted from target") - return _out - - def extend_vol(self, cmd, ip0, user, pw, hdp, lu, size, name): - _out = ("LUN: %s successfully extended to %s MB" % (lu, size)) - id = name - self.out = _out - v = self.getVolumebyProvider(id + '.' + str(lu)) - if v: - v['sizeMiB'] = size - return _out - - def get_luns(self): - return len(self.alloc_lun) - - def get_conns(self): - return len(self.connections) - - def get_out(self): - return str(self.out) - - def get_version(self, cmd, ver, ip0, user, pw): - self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ - "version: 11.2.3319.09 LU: 256" \ - " RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" - return self.out - - def get_iscsi_info(self, cmd, ip0, user, pw): - self.out = "CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up\n" \ - "CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up" - return self.out - - def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None): - self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \ - "70 Normal fs1\n" \ - "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2" - return self.out - - def get_targetiqn(self, cmd, ip0, user, pw, id, hdp, secret): - self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget""" - return self.out - - def set_targetsecret(self, cmd, ip0, user, pw, target, hdp, secret): - self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget""" - return self.out - - def get_targetsecret(self, cmd, ip0, user, pw, target, hdp): - self.out = """wGkJhTpXaaYJ5Rv""" - return self.out - - def get_evs(self, cmd, ip0, user, pw, fsid): - return '1' - - def check_lu(self, cmd, ip0, user, pw, volume_name, hdp): - return True, 1, {'alias': 'cinder-default', 'secret': 'mysecret', - 'iqn': 'iqn.1993-08.org.debian:01:11f90746eb2'} - - def check_target(self, cmd, ip0, user, pw, hdp, target_alias): - return False, None - - -class HNASiSCSIDriverTest(test.TestCase): - """Test HNAS iSCSI volume driver.""" - def __init__(self, *args, **kwargs): - super(HNASiSCSIDriverTest, self).__init__(*args, **kwargs) - - @mock.patch.object(iscsi, 'factory_bend') - def setUp(self, _factory_bend): - super(HNASiSCSIDriverTest, self).setUp() - - self.backend = SimulatedHnasBackend() - _factory_bend.return_value = self.backend - - self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') - self.addCleanup(self.config_file.close) - self.config_file.write(HNASCONF) - self.config_file.flush() - - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.hds_hnas_iscsi_config_file = self.config_file.name - self.configuration.hds_svc_iscsi_chap_enabled = True - self.driver = iscsi.HDSISCSIDriver(configuration=self.configuration) - self.driver.do_setup("") - - def _create_volume(self): - loc = self.driver.create_volume(_VOLUME) - vol = _VOLUME.copy() - vol['provider_location'] = loc['provider_location'] - return vol - - @mock.patch('six.moves.builtins.open') - @mock.patch.object(os, 'access') - def test_read_config(self, m_access, m_open): - # Test exception when file is not found - m_access.return_value = False - m_open.return_value = six.StringIO(HNASCONF) - self.assertRaises(exception.NotFound, iscsi._read_config, '') - - # Test exception when config file has parsing errors - # due to missing tag - m_access.return_value = True - m_open.return_value = six.StringIO(HNAS_WRONG_CONF1) - self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '') - - # Test exception when config file has parsing errors - # due to missing tag - m_open.return_value = six.StringIO(HNAS_WRONG_CONF2) - self.configuration.hds_hnas_iscsi_config_file = '' - self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '') - - def test_create_volume(self): - loc = self.driver.create_volume(_VOLUME) - self.assertNotEqual(loc, None) - self.assertNotEqual(loc['provider_location'], None) - # cleanup - self.backend.deleteVolumebyProvider(loc['provider_location']) - - def test_get_volume_stats(self): - stats = self.driver.get_volume_stats(True) - self.assertEqual("HDS", stats["vendor_name"]) - self.assertEqual("iSCSI", stats["storage_protocol"]) - self.assertEqual(2, len(stats['pools'])) - - def test_delete_volume(self): - vol = self._create_volume() - self.driver.delete_volume(vol) - # should not be deletable twice - prov_loc = self.backend.getVolumebyProvider(vol['provider_location']) - self.assertIsNone(prov_loc) - - def test_extend_volume(self): - vol = self._create_volume() - new_size = _VOLUME['size'] * 2 - self.driver.extend_volume(vol, new_size) - # cleanup - self.backend.deleteVolumebyProvider(vol['provider_location']) - - @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') - def test_create_snapshot(self, m_id_to_vol): - vol = self._create_volume() - m_id_to_vol.return_value = vol - svol = vol.copy() - svol['volume_size'] = svol['size'] - loc = self.driver.create_snapshot(svol) - self.assertNotEqual(loc, None) - svol['provider_location'] = loc['provider_location'] - # cleanup - self.backend.deleteVolumebyProvider(svol['provider_location']) - self.backend.deleteVolumebyProvider(vol['provider_location']) - - @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') - def test_create_clone(self, m_id_to_vol): - - src_vol = self._create_volume() - m_id_to_vol.return_value = src_vol - src_vol['volume_size'] = src_vol['size'] - - dst_vol = self._create_volume() - dst_vol['volume_size'] = dst_vol['size'] - - loc = self.driver.create_cloned_volume(dst_vol, src_vol) - self.assertNotEqual(loc, None) - # cleanup - self.backend.deleteVolumebyProvider(src_vol['provider_location']) - self.backend.deleteVolumebyProvider(loc['provider_location']) - - @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') - @mock.patch.object(iscsi.HDSISCSIDriver, 'extend_volume') - def test_create_clone_larger_size(self, m_extend_volume, m_id_to_vol): - - src_vol = self._create_volume() - m_id_to_vol.return_value = src_vol - src_vol['volume_size'] = src_vol['size'] - - dst_vol = self._create_volume() - dst_vol['size'] = 256 - dst_vol['volume_size'] = dst_vol['size'] - - loc = self.driver.create_cloned_volume(dst_vol, src_vol) - self.assertNotEqual(loc, None) - m_extend_volume.assert_called_once_with(dst_vol, 256) - # cleanup - self.backend.deleteVolumebyProvider(src_vol['provider_location']) - self.backend.deleteVolumebyProvider(loc['provider_location']) - - @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') - def test_delete_snapshot(self, m_id_to_vol): - svol = self._create_volume() - - lun = svol['provider_location'] - m_id_to_vol.return_value = svol - self.driver.delete_snapshot(svol) - self.assertIsNone(self.backend.getVolumebyProvider(lun)) - - def test_create_volume_from_snapshot(self): - svol = self._create_volume() - svol['volume_size'] = svol['size'] - vol = self.driver.create_volume_from_snapshot(_VOLUME, svol) - self.assertNotEqual(vol, None) - # cleanup - self.backend.deleteVolumebyProvider(svol['provider_location']) - self.backend.deleteVolumebyProvider(vol['provider_location']) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') - def test_initialize_connection(self, m_update_vol_location, m_sleep): - connector = {} - connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' - connector['host'] = 'dut_1.lab.hds.com' - vol = self._create_volume() - conn = self.driver.initialize_connection(vol, connector) - self.assertIn('3260', conn['data']['target_portal']) - self.assertIs(type(conn['data']['target_lun']), int) - - self.backend.add_iscsi_conn = mock.MagicMock() - self.backend.add_iscsi_conn.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetAttachFailed, - self.driver.initialize_connection, vol, connector) - - # cleanup - self.backend.deleteVolumebyProvider(vol['provider_location']) - - @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') - def test_terminate_connection(self, m_update_vol_location): - connector = {} - connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' - connector['host'] = 'dut_1.lab.hds.com' - - vol = self._create_volume() - vol['provider_location'] = "portal," +\ - connector['initiator'] +\ - ",18-48-A5-A1-80-13.0,ctl,port,hlun" - - conn = self.driver.initialize_connection(vol, connector) - num_conn_before = self.backend.get_conns() - self.driver.terminate_connection(vol, conn) - num_conn_after = self.backend.get_conns() - self.assertNotEqual(num_conn_before, num_conn_after) - # cleanup - self.backend.deleteVolumebyProvider(vol['provider_location']) - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - return_value={'key': 'type', 'service_label': 'silver'}) - def test_get_pool(self, m_ext_spec): - label = self.driver.get_pool(_VOLUME) - self.assertEqual('silver', label) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') - def test_get_service_target(self, m_update_vol_location, m_sleep): - - vol = _VOLUME.copy() - self.backend.check_lu = mock.MagicMock() - self.backend.check_target = mock.MagicMock() - - # Test the case where volume is not already mapped - CHAP enabled - self.backend.check_lu.return_value = (False, 0, None) - self.backend.check_target.return_value = (False, None) - ret = self.driver._get_service_target(vol) - iscsi_ip, iscsi_port, ctl, svc_port, hdp, alias, secret = ret - self.assertEqual('evs1-tgt0', alias) - - # Test the case where volume is not already mapped - CHAP disabled - self.driver.config['chap_enabled'] = 'False' - ret = self.driver._get_service_target(vol) - iscsi_ip, iscsi_port, ctl, svc_port, hdp, alias, secret = ret - self.assertEqual('evs1-tgt0', alias) - - # Test the case where all targets are full - fake_tgt = {'alias': 'fake', 'luns': range(0, 32)} - self.backend.check_lu.return_value = (False, 0, None) - self.backend.check_target.return_value = (True, fake_tgt) - self.assertRaises(exception.NoMoreTargets, - self.driver._get_service_target, vol) - - @mock.patch.object(iscsi.HDSISCSIDriver, '_get_service') - def test_unmanage(self, get_service): - get_service.return_value = ('fs2') - - self.driver.unmanage(_VOLUME) - get_service.assert_called_once_with(_VOLUME) - - def test_manage_existing_get_size(self): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'manage_iscsi_test/volume-test'} - - out = self.driver.manage_existing_get_size(vol, existing_vol_ref) - self.assertEqual(20, out) - - def test_manage_existing_get_size_error(self): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'invalid_FS/vol-not-found'} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, vol, - existing_vol_ref) - - def test_manage_existing_get_size_without_source_name(self): - vol = _VOLUME.copy() - existing_vol_ref = { - 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, vol, - existing_vol_ref) - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs') - def test_manage_existing(self, m_get_extra_specs): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'fs2/volume-test'} - version = {'provider_location': '18-48-A5-A1-80-13.testvol'} - - m_get_extra_specs.return_value = {'key': 'type', - 'service_label': 'silver'} - - out = self.driver.manage_existing(vol, existing_vol_ref) - - m_get_extra_specs.assert_called_once_with('1') - self.assertEqual(version, out) - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs') - def test_manage_existing_invalid_pool(self, m_get_extra_specs): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'fs2/volume-test'} - - m_get_extra_specs.return_value = {'key': 'type', - 'service_label': 'gold'} - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, vol, existing_vol_ref) - m_get_extra_specs.assert_called_once_with('1') - - def test_manage_existing_invalid_volume_name(self): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'fs2/t/est_volume'} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol, existing_vol_ref) - - def test_manage_existing_without_volume_name(self): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'fs2/'} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol, existing_vol_ref) - - def test_manage_existing_with_FS_and_spaces(self): - vol = _VOLUME.copy() - existing_vol_ref = {'source-name': 'fs2/ '} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol, existing_vol_ref) diff --git a/cinder/tests/unit/test_hitachi_hnas_nfs.py b/cinder/tests/unit/test_hitachi_hnas_nfs.py deleted file mode 100644 index c18a51fad..000000000 --- a/cinder/tests/unit/test_hitachi_hnas_nfs.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -import tempfile - -import mock -import six - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hnas_nfs as nfs -from cinder.volume.drivers import nfs as drivernfs -from cinder.volume.drivers import remotefs -from cinder.volume import volume_types - -SHARESCONF = """172.17.39.132:/cinder -172.17.39.133:/cinder""" - -HNASCONF = """ - - ssc - 172.17.44.15 - supervisor - supervisor - - default - 172.17.39.132:/cinder - - - silver - 172.17.39.133:/cinder - - -""" - -HNAS_WRONG_CONF1 = """ - - ssc - 172.17.44.15 - supervisor - supervisor - default - 172.17.39.132:/cinder - - -""" - -HNAS_WRONG_CONF2 = """ - - ssc - 172.17.44.15 - supervisor - supervisor - - default - - - silver - - -""" - -HNAS_WRONG_CONF3 = """ - - ssc - 172.17.44.15 - - supervisor - - default - 172.17.39.132:/cinder - - - silver - 172.17.39.133:/cinder - - -""" - -HNAS_WRONG_CONF4 = """ - - ssc - 172.17.44.15 - super - supervisor - - default - 172.17.39.132:/cinder - - - silver - 172.17.39.133:/cinder - - -""" - -HNAS_FULL_CONF = """ - - ssc - 172.17.44.15 - super - supervisor - True - 2222 - True - /etc/cinder/ssh_priv - 10.0.0.1 - - default - 172.17.39.132:/cinder - - - silver - 172.17.39.133:/cinder/silver - - - gold - 172.17.39.133:/cinder/gold - - - platinum - 172.17.39.133:/cinder/platinum - - -""" - - -# The following information is passed on to tests, when creating a volume -_SERVICE = ('Test_hdp', 'Test_path', 'Test_label') -_SHARE = '172.17.39.132:/cinder' -_SHARE2 = '172.17.39.133:/cinder' -_EXPORT = '/cinder' -_VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', - 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', - 'size': 128, - 'volume_type': 'silver', - 'volume_type_id': 'test', - 'metadata': [{'key': 'type', - 'service_label': 'silver'}], - 'provider_location': None, - 'id': 'bcc48c61-9691-4e5f-897c-793686093190', - 'status': 'available', - 'host': 'host1@hnas-iscsi-backend#silver'} -_SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', - 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', - 'size': 128, - 'volume_type': None, - 'provider_location': None, - 'volume_size': 128, - 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', - 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191', - 'host': 'host1@hnas-iscsi-backend#silver'} - -_VOLUME_NFS = {'name': 'volume-61da3-8d23-4bb9-3136-ca819d89e7fc', - 'id': '61da3-8d23-4bb9-3136-ca819d89e7fc', - 'size': 4, - 'metadata': [{'key': 'type', - 'service_label': 'silver'}], - 'volume_type': 'silver', - 'volume_type_id': 'silver', - 'provider_location': '172.24.44.34:/silver/', - 'volume_size': 128, - 'host': 'host1@hnas-nfs#silver'} - -GET_ID_VOL = { - ("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME], - ("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME] -} - - -def id_to_vol(arg): - return GET_ID_VOL.get(arg) - - -class SimulatedHnasBackend(object): - """Simulation Back end. Talks to HNAS.""" - - # these attributes are shared across object instances - start_lun = 0 - - def __init__(self): - self.type = 'HNAS' - self.out = '' - - def file_clone(self, cmd, ip0, user, pw, fslabel, source_path, - target_path): - return "" - - def get_version(self, ver, cmd, ip0, user, pw): - self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ - "version: 11.2.3319.09 LU: 256 " \ - "RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" - return self.out - - def get_hdp_info(self, ip0, user, pw): - self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: 70 " \ - "Normal fs1\n" \ - "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 " \ - "Normal fs2" - return self.out - - def get_nfs_info(self, cmd, ip0, user, pw): - self.out = "Export: /cinder Path: /volumes HDP: fs1 FSID: 1024 " \ - "EVS: 1 IPS: 172.17.39.132\n" \ - "Export: /cinder Path: /volumes HDP: fs2 FSID: 1025 " \ - "EVS: 1 IPS: 172.17.39.133" - return self.out - - -class HDSNFSDriverTest(test.TestCase): - """Test HNAS NFS volume driver.""" - - def __init__(self, *args, **kwargs): - super(HDSNFSDriverTest, self).__init__(*args, **kwargs) - - @mock.patch.object(nfs, 'factory_bend') - def setUp(self, m_factory_bend): - super(HDSNFSDriverTest, self).setUp() - - self.backend = SimulatedHnasBackend() - m_factory_bend.return_value = self.backend - - self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') - self.addCleanup(self.config_file.close) - self.config_file.write(HNASCONF) - self.config_file.flush() - - self.shares_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') - self.addCleanup(self.shares_file.close) - self.shares_file.write(SHARESCONF) - self.shares_file.flush() - - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.max_over_subscription_ratio = 20.0 - self.configuration.reserved_percentage = 0 - self.configuration.hds_hnas_nfs_config_file = self.config_file.name - self.configuration.nfs_shares_config = self.shares_file.name - self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt' - self.configuration.nfs_mount_options = None - self.configuration.nas_host = None - self.configuration.nas_share_path = None - self.configuration.nas_mount_options = None - - self.driver = nfs.HDSNFSDriver(configuration=self.configuration) - self.driver.do_setup("") - - @mock.patch('six.moves.builtins.open') - @mock.patch.object(os, 'access') - def test_read_config(self, m_access, m_open): - # Test exception when file is not found - m_access.return_value = False - m_open.return_value = six.StringIO(HNASCONF) - self.assertRaises(exception.NotFound, nfs._read_config, '') - - # Test exception when config file has parsing errors - # due to missing tag - m_access.return_value = True - m_open.return_value = six.StringIO(HNAS_WRONG_CONF1) - self.assertRaises(exception.ConfigNotFound, nfs._read_config, '') - - # Test exception when config file has parsing errors - # due to missing tag - m_open.return_value = six.StringIO(HNAS_WRONG_CONF2) - self.configuration.hds_hnas_iscsi_config_file = '' - self.assertRaises(exception.ParameterNotFound, nfs._read_config, '') - - # Test exception when config file has parsing errors - # due to blank tag - m_open.return_value = six.StringIO(HNAS_WRONG_CONF3) - self.configuration.hds_hnas_iscsi_config_file = '' - self.assertRaises(exception.ParameterNotFound, nfs._read_config, '') - - # Test when config file has parsing errors due invalid svc_number - m_open.return_value = six.StringIO(HNAS_WRONG_CONF4) - self.configuration.hds_hnas_iscsi_config_file = '' - config = nfs._read_config('') - self.assertEqual(1, len(config['services'])) - - # Test config with full options - # due invalid svc_number - m_open.return_value = six.StringIO(HNAS_FULL_CONF) - self.configuration.hds_hnas_iscsi_config_file = '' - config = nfs._read_config('') - self.assertEqual(4, len(config['services'])) - - @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') - @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') - @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') - @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') - def test_create_snapshot(self, m_get_volume_location, m_get_export_path, - m_get_provider_location, m_id_to_vol): - svol = _SNAPVOLUME.copy() - m_id_to_vol.return_value = svol - - m_get_provider_location.return_value = _SHARE - m_get_volume_location.return_value = _SHARE - m_get_export_path.return_value = _EXPORT - - loc = self.driver.create_snapshot(svol) - out = "{'provider_location': \'" + _SHARE + "'}" - self.assertEqual(out, str(loc)) - - @mock.patch.object(nfs.HDSNFSDriver, '_get_service') - @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) - @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') - @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') - def test_create_cloned_volume(self, m_get_volume_location, - m_get_provider_location, m_id_to_vol, - m_get_service): - vol = _VOLUME.copy() - svol = _SNAPVOLUME.copy() - - m_get_service.return_value = _SERVICE - m_get_provider_location.return_value = _SHARE - m_get_volume_location.return_value = _SHARE - - loc = self.driver.create_cloned_volume(vol, svol) - - out = "{'provider_location': \'" + _SHARE + "'}" - self.assertEqual(out, str(loc)) - - @mock.patch.object(nfs.HDSNFSDriver, '_get_service') - @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) - @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') - @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') - @mock.patch.object(nfs.HDSNFSDriver, 'extend_volume') - def test_create_cloned_volume_larger(self, m_extend_volume, - m_get_volume_location, - m_get_provider_location, - m_id_to_vol, m_get_service): - vol = _VOLUME.copy() - svol = _SNAPVOLUME.copy() - - m_get_service.return_value = _SERVICE - m_get_provider_location.return_value = _SHARE - m_get_volume_location.return_value = _SHARE - - svol['size'] = 256 - - loc = self.driver.create_cloned_volume(svol, vol) - - out = "{'provider_location': \'" + _SHARE + "'}" - self.assertEqual(out, str(loc)) - m_extend_volume.assert_called_once_with(svol, svol['size']) - - @mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted') - @mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume') - @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) - @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') - @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') - def test_create_volume(self, m_get_volume_location, - m_get_provider_location, m_id_to_vol, - m_do_create_volume, m_ensure_shares_mounted): - - vol = _VOLUME.copy() - - m_get_provider_location.return_value = _SHARE2 - m_get_volume_location.return_value = _SHARE2 - - loc = self.driver.create_volume(vol) - - out = "{'provider_location': \'" + _SHARE2 + "'}" - self.assertEqual(str(loc), out) - - @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') - @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') - @mock.patch.object(nfs.HDSNFSDriver, '_volume_not_present') - def test_delete_snapshot(self, m_volume_not_present, - m_get_provider_location, m_id_to_vol): - svol = _SNAPVOLUME.copy() - - m_id_to_vol.return_value = svol - m_get_provider_location.return_value = _SHARE - - m_volume_not_present.return_value = True - - self.driver.delete_snapshot(svol) - self.assertIsNone(svol['provider_location']) - - @mock.patch.object(nfs.HDSNFSDriver, '_get_service') - @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) - @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') - @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') - @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') - def test_create_volume_from_snapshot(self, m_get_volume_location, - m_get_export_path, - m_get_provider_location, m_id_to_vol, - m_get_service): - vol = _VOLUME.copy() - svol = _SNAPVOLUME.copy() - - m_get_service.return_value = _SERVICE - m_get_provider_location.return_value = _SHARE - m_get_export_path.return_value = _EXPORT - m_get_volume_location.return_value = _SHARE - - loc = self.driver.create_volume_from_snapshot(vol, svol) - out = "{'provider_location': \'" + _SHARE + "'}" - self.assertEqual(out, str(loc)) - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - return_value={'key': 'type', 'service_label': 'silver'}) - def test_get_pool(self, m_ext_spec): - vol = _VOLUME.copy() - - self.assertEqual('silver', self.driver.get_pool(vol)) - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs') - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/gold') - @mock.patch.object(utils, 'resolve_hostname', return_value='172.24.44.34') - @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') - def test_manage_existing(self, m_ensure_shares, m_resolve, m_mount_point, - m_isfile, m_get_extra_specs): - vol = _VOLUME_NFS.copy() - - m_get_extra_specs.return_value = {'key': 'type', - 'service_label': 'silver'} - self.driver._mounted_shares = ['172.17.39.133:/cinder'] - existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} - - with mock.patch.object(self.driver, '_execute'): - out = self.driver.manage_existing(vol, existing_vol_ref) - - loc = {'provider_location': '172.17.39.133:/cinder'} - self.assertEqual(loc, out) - - m_get_extra_specs.assert_called_once_with('silver') - m_isfile.assert_called_once_with('/mnt/gold/volume-test') - m_mount_point.assert_called_once_with('172.17.39.133:/cinder') - m_resolve.assert_called_with('172.17.39.133') - m_ensure_shares.assert_called_once_with() - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs') - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/gold') - @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') - @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') - def test_manage_existing_move_fails(self, m_ensure_shares, m_resolve, - m_mount_point, m_isfile, - m_get_extra_specs): - vol = _VOLUME_NFS.copy() - - m_get_extra_specs.return_value = {'key': 'type', - 'service_label': 'silver'} - self.driver._mounted_shares = ['172.17.39.133:/cinder'] - existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} - self.driver._execute = mock.Mock(side_effect=OSError) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing, vol, existing_vol_ref) - m_get_extra_specs.assert_called_once_with('silver') - m_isfile.assert_called_once_with('/mnt/gold/volume-test') - m_mount_point.assert_called_once_with('172.17.39.133:/cinder') - m_resolve.assert_called_with('172.17.39.133') - m_ensure_shares.assert_called_once_with() - - @mock.patch.object(volume_types, 'get_volume_type_extra_specs') - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/gold') - @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') - @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') - def test_manage_existing_invalid_pool(self, m_ensure_shares, m_resolve, - m_mount_point, m_isfile, - m_get_extra_specs): - vol = _VOLUME_NFS.copy() - m_get_extra_specs.return_value = {'key': 'type', - 'service_label': 'gold'} - self.driver._mounted_shares = ['172.17.39.133:/cinder'] - existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} - self.driver._execute = mock.Mock(side_effect=OSError) - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, vol, existing_vol_ref) - m_get_extra_specs.assert_called_once_with('silver') - m_isfile.assert_called_once_with('/mnt/gold/volume-test') - m_mount_point.assert_called_once_with('172.17.39.133:/cinder') - m_resolve.assert_called_with('172.17.39.133') - m_ensure_shares.assert_called_once_with() - - @mock.patch.object(utils, 'get_file_size', return_value=4000000000) - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/gold') - @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') - @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') - def test_manage_existing_get_size(self, m_ensure_shares, m_resolve, - m_mount_point, - m_isfile, m_file_size): - - vol = _VOLUME_NFS.copy() - - self.driver._mounted_shares = ['172.17.39.133:/cinder'] - existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} - - out = self.driver.manage_existing_get_size(vol, existing_vol_ref) - - self.assertEqual(vol['size'], out) - m_file_size.assert_called_once_with('/mnt/gold/volume-test') - m_isfile.assert_called_once_with('/mnt/gold/volume-test') - m_mount_point.assert_called_once_with('172.17.39.133:/cinder') - m_resolve.assert_called_with('172.17.39.133') - m_ensure_shares.assert_called_once_with() - - @mock.patch.object(utils, 'get_file_size', return_value='badfloat') - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/gold') - @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') - @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') - def test_manage_existing_get_size_error(self, m_ensure_shares, m_resolve, - m_mount_point, - m_isfile, m_file_size): - vol = _VOLUME_NFS.copy() - - self.driver._mounted_shares = ['172.17.39.133:/cinder'] - existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, vol, - existing_vol_ref) - m_file_size.assert_called_once_with('/mnt/gold/volume-test') - m_isfile.assert_called_once_with('/mnt/gold/volume-test') - m_mount_point.assert_called_once_with('172.17.39.133:/cinder') - m_resolve.assert_called_with('172.17.39.133') - m_ensure_shares.assert_called_once_with() - - def test_manage_existing_get_size_without_source_name(self): - vol = _VOLUME.copy() - existing_vol_ref = { - 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, vol, - existing_vol_ref) - - @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/gold') - def test_unmanage(self, m_mount_point): - with mock.patch.object(self.driver, '_execute'): - vol = _VOLUME_NFS.copy() - self.driver.unmanage(vol) - - m_mount_point.assert_called_once_with('172.24.44.34:/silver/') diff --git a/cinder/tests/unit/test_migrations.py b/cinder/tests/unit/test_migrations.py index 77be3d25f..beb713e9b 100644 --- a/cinder/tests/unit/test_migrations.py +++ b/cinder/tests/unit/test_migrations.py @@ -807,6 +807,13 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin): self.assertIsInstance(reservations.c.allocated_id.type, self.INTEGER_TYPE) + def __check_cinderbase_fields(self, columns): + """Check fields inherited from CinderBase ORM class.""" + self.assertIsInstance(columns.created_at.type, self.TIME_TYPE) + self.assertIsInstance(columns.updated_at.type, self.TIME_TYPE) + self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE) + self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE) + def _check_067(self, engine, data): iscsi_targets = db_utils.get_table(engine, 'iscsi_targets') fkey, = iscsi_targets.c.volume_id.foreign_keys @@ -839,6 +846,204 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin): self.assertIsInstance(messages.c.resource_type.type, self.VARCHAR_TYPE) + def _check_075(self, engine, data): + """Test adding cluster table and cluster_id fields.""" + self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters')) + clusters = db_utils.get_table(engine, 'clusters') + columns = clusters.c + self.__check_cinderbase_fields(columns) + + # Cluster specific fields + self.assertIsInstance(columns.id.type, self.INTEGER_TYPE) + self.assertIsInstance(columns.name.type, self.VARCHAR_TYPE) + self.assertIsInstance(columns.binary.type, self.VARCHAR_TYPE) + self.assertIsInstance(columns.disabled.type, self.BOOL_TYPE) + self.assertIsInstance(columns.disabled_reason.type, self.VARCHAR_TYPE) + + # Check that we have added cluster_name field to all required tables + for table_name in ('services', 'consistencygroups', 'volumes'): + table = db_utils.get_table(engine, table_name) + self.assertIsInstance(table.c.cluster_name.type, + self.VARCHAR_TYPE) + + def _check_076(self, engine, data): + workers = db_utils.get_table(engine, 'workers') + columns = workers.c + self.__check_cinderbase_fields(columns) + + # Workers specific fields + self.assertIsInstance(columns.id.type, self.INTEGER_TYPE) + self.assertIsInstance(columns.resource_type.type, self.VARCHAR_TYPE) + self.assertIsInstance(columns.resource_id.type, self.VARCHAR_TYPE) + self.assertIsInstance(columns.status.type, self.VARCHAR_TYPE) + self.assertIsInstance(columns.service_id.type, self.INTEGER_TYPE) + + def _check_077(self, engine, data): + """Test adding group types and specs tables.""" + self.assertTrue(engine.dialect.has_table(engine.connect(), + "group_types")) + group_types = db_utils.get_table(engine, 'group_types') + + self.assertIsInstance(group_types.c.id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_types.c.name.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_types.c.description.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_types.c.created_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_types.c.updated_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_types.c.deleted_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_types.c.deleted.type, + self.BOOL_TYPE) + self.assertIsInstance(group_types.c.is_public.type, + self.BOOL_TYPE) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "group_type_specs")) + group_specs = db_utils.get_table(engine, 'group_type_specs') + + self.assertIsInstance(group_specs.c.id.type, + self.INTEGER_TYPE) + self.assertIsInstance(group_specs.c.key.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_specs.c.value.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_specs.c.group_type_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_specs.c.created_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_specs.c.updated_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_specs.c.deleted_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_specs.c.deleted.type, + self.BOOL_TYPE) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "group_type_projects")) + type_projects = db_utils.get_table(engine, 'group_type_projects') + + self.assertIsInstance(type_projects.c.id.type, + self.INTEGER_TYPE) + self.assertIsInstance(type_projects.c.created_at.type, + self.TIME_TYPE) + self.assertIsInstance(type_projects.c.updated_at.type, + self.TIME_TYPE) + self.assertIsInstance(type_projects.c.deleted_at.type, + self.TIME_TYPE) + self.assertIsInstance(type_projects.c.deleted.type, + self.BOOL_TYPE) + self.assertIsInstance(type_projects.c.group_type_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(type_projects.c.project_id.type, + self.VARCHAR_TYPE) + + def _check_078(self, engine, data): + """Test adding groups tables.""" + self.assertTrue(engine.dialect.has_table(engine.connect(), + "groups")) + groups = db_utils.get_table(engine, 'groups') + + self.assertIsInstance(groups.c.id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.name.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.description.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.created_at.type, + self.TIME_TYPE) + self.assertIsInstance(groups.c.updated_at.type, + self.TIME_TYPE) + self.assertIsInstance(groups.c.deleted_at.type, + self.TIME_TYPE) + self.assertIsInstance(groups.c.deleted.type, + self.BOOL_TYPE) + self.assertIsInstance(groups.c.user_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.project_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.host.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.availability_zone.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.group_type_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.status.type, + self.VARCHAR_TYPE) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "group_volume_type_mapping")) + mapping = db_utils.get_table(engine, 'group_volume_type_mapping') + + self.assertIsInstance(mapping.c.id.type, + self.INTEGER_TYPE) + self.assertIsInstance(mapping.c.created_at.type, + self.TIME_TYPE) + self.assertIsInstance(mapping.c.updated_at.type, + self.TIME_TYPE) + self.assertIsInstance(mapping.c.deleted_at.type, + self.TIME_TYPE) + self.assertIsInstance(mapping.c.deleted.type, + self.BOOL_TYPE) + self.assertIsInstance(mapping.c.volume_type_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(mapping.c.group_id.type, + self.VARCHAR_TYPE) + + volumes = db_utils.get_table(engine, 'volumes') + self.assertIsInstance(volumes.c.group_id.type, + self.VARCHAR_TYPE) + + quota_classes = db_utils.get_table(engine, 'quota_classes') + rows = quota_classes.count().\ + where(quota_classes.c.resource == 'groups').\ + execute().scalar() + self.assertEqual(1, rows) + + def _check_079(self, engine, data): + """Test adding group_snapshots tables.""" + self.assertTrue(engine.dialect.has_table(engine.connect(), + "group_snapshots")) + group_snapshots = db_utils.get_table(engine, 'group_snapshots') + + self.assertIsInstance(group_snapshots.c.id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.name.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.description.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.created_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_snapshots.c.updated_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_snapshots.c.deleted_at.type, + self.TIME_TYPE) + self.assertIsInstance(group_snapshots.c.deleted.type, + self.BOOL_TYPE) + self.assertIsInstance(group_snapshots.c.user_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.project_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.group_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.group_type_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(group_snapshots.c.status.type, + self.VARCHAR_TYPE) + + snapshots = db_utils.get_table(engine, 'snapshots') + self.assertIsInstance(snapshots.c.group_snapshot_id.type, + self.VARCHAR_TYPE) + + groups = db_utils.get_table(engine, 'groups') + self.assertIsInstance(groups.c.group_snapshot_id.type, + self.VARCHAR_TYPE) + self.assertIsInstance(groups.c.source_group_id.type, + self.VARCHAR_TYPE) + def test_walk_versions(self): self.walk_versions(False, False) diff --git a/cinder/tests/unit/test_misc.py b/cinder/tests/unit/test_misc.py deleted file mode 100644 index 5f44f6220..000000000 --- a/cinder/tests/unit/test_misc.py +++ /dev/null @@ -1,62 +0,0 @@ - -# Copyright 2010 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glob -import os - - -from cinder import exception -from cinder.i18n import _ -from cinder import test - - -class ExceptionTestCase(test.TestCase): - @staticmethod - def _raise_exc(exc): - raise exc() - - def test_exceptions_raise(self): - # NOTE(dprince): disable format errors since we are not passing kwargs - self.flags(fatal_exception_format_errors=False) - for name in dir(exception): - exc = getattr(exception, name) - if isinstance(exc, type): - self.assertRaises(exc, self._raise_exc, exc) - - -class ProjectTestCase(test.TestCase): - def test_all_migrations_have_downgrade(self): - topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../') - py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy", - "migrate_repo", "versions", "*.py") - downgrades = [] - for path in glob.iglob(py_glob): - has_upgrade = False - has_downgrade = False - with open(path, "r") as f: - for line in f: - if 'def upgrade(' in line: - has_upgrade = True - if 'def downgrade(' in line: - has_downgrade = True - - if has_upgrade and has_downgrade: - fname = os.path.basename(path) - downgrades.append(fname) - - helpful_msg = (_("The following migrations have a downgrade, " - "which are not allowed: " - "\n\t%s") % '\n\t'.join(sorted(downgrades))) - self.assertFalse(downgrades, msg=helpful_msg) diff --git a/cinder/tests/unit/test_netapp.py b/cinder/tests/unit/test_netapp.py deleted file mode 100644 index 4df763e45..000000000 --- a/cinder/tests/unit/test_netapp.py +++ /dev/null @@ -1,1331 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. -# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for NetApp volume driver.""" - -from lxml import etree -import mock -import six -from six.moves import BaseHTTPServer -from six.moves import http_client - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes -from cinder.volume import configuration as conf -from cinder.volume.drivers.netapp import common -from cinder.volume.drivers.netapp.dataontap import block_7mode -from cinder.volume.drivers.netapp.dataontap import block_cmode -from cinder.volume.drivers.netapp.dataontap.client import client_7mode -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode -from cinder.volume.drivers.netapp.dataontap.utils import capabilities -from cinder.volume.drivers.netapp import options -from cinder.volume.drivers.netapp import utils - - -FAKE_CONNECTION_HTTP = { - 'transport_type': 'http', - 'username': 'admin', - 'password': 'pass', - 'hostname': '127.0.0.1', - 'port': None, - 'vserver': 'openstack', -} - - -def create_configuration(): - configuration = conf.Configuration(None) - configuration.append_config_values(options.netapp_connection_opts) - configuration.append_config_values(options.netapp_transport_opts) - configuration.append_config_values(options.netapp_basicauth_opts) - configuration.append_config_values(options.netapp_cluster_opts) - configuration.append_config_values(options.netapp_7mode_opts) - configuration.append_config_values(options.netapp_provisioning_opts) - return configuration - - -class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): - """HTTP handler that doesn't spam the log.""" - - def log_message(self, format, *args): - pass - - -class FakeHttplibSocket(object): - """A fake socket implementation for http_client.HTTPResponse.""" - def __init__(self, value): - self._rbuffer = six.BytesIO(value) - self._wbuffer = six.BytesIO() - oldclose = self._wbuffer.close - - def newclose(): - self.result = self._wbuffer.getvalue() - oldclose() - self._wbuffer.close = newclose - - def makefile(self, mode, *args): - """Returns the socket's internal buffer""" - if mode == 'r' or mode == 'rb': - return self._rbuffer - if mode == 'w' or mode == 'wb': - return self._wbuffer - - def close(self): - pass - - -RESPONSE_PREFIX_DIRECT_CMODE = b""" -""" - -RESPONSE_PREFIX_DIRECT_7MODE = b""" -""" - -RESPONSE_PREFIX_DIRECT = b""" -""" - -RESPONSE_SUFFIX_DIRECT = b"""""" - - -class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): - """HTTP handler that fakes enough stuff to allow the driver to run.""" - - def do_GET(s): - """Respond to a GET request.""" - if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: - s.send_response(404) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "text/xml; charset=utf-8") - s.end_headers() - out = s.wfile - out.write('' - '') - - def do_POST(s): # noqa - """Respond to a POST request.""" - if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: - s.send_response(404) - s.end_headers - return - request_xml = s.rfile.read(int(s.headers['Content-Length'])) - root = etree.fromstring(request_xml) - body = [x for x in root.iterchildren()] - request = body[0] - tag = request.tag - api = etree.QName(tag).localname or tag - if 'lun-get-iter' == api: - tag = \ - FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') - if tag is None: - body = """ - - indeterminate - 512 - 1354536362 - - false - true - - falselinux - - true/vol/navneet/lun1 - 0 - false2FfGI$APyN68 - none20971520 - 0false - 0 - cec1f3d7-3d41-11e2-9cf4-123478563412 - navneetben_vserver - - <lun-get-iter-key-td> - <key-0>ben_vserver</key-0> - <key-1>/vol/navneet/lun2</key-1> - <key-2>navneet</key-2> - <key-3></key-3> - <key-4>lun2</key-4> - </lun-get-iter-key-td> - 1""" - else: - body = """ - - indeterminate - 512 - 1354536362 - - false - true - - falselinux - - true/vol/navneet/lun3 - 0 - false2FfGI$APyN68 - - none20971520 - 0false - 0 - cec1f3d7-3d41-11e2-9cf4-123478563412 - navneetben_vserver - - 1""" - elif 'volume-get-iter' == api: - tag = \ - FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') - if tag is None: - body = """ - - iscsi - Openstack - - - 214748364 - - true - - falseonline - - - nfsvol - openstack - - - 247483648 - - true - - falseonline - - - <volume-get-iter-key-td> - <key-0>openstack</key-0> - <key-1>nfsvol</key-1> - </volume-get-iter-key-td> - 2""" - else: - body = """ - - iscsi - Openstack - - - 4147483648 - - true - - falseonline - - - nfsvol - openstack - - - 8147483648 - - true - - falseonline - - - 2""" - elif 'lun-create-by-size' == api: - body = """ - 22020096""" - elif 'lun-destroy' == api: - body = """""" - elif 'igroup-get-iter' == api: - init_found = True - query = FakeDirectCMODEServerHandler._get_child_by_name(request, - 'query') - if query is not None: - igroup_info = FakeDirectCMODEServerHandler._get_child_by_name( - query, 'initiator-group-info') - if igroup_info is not None: - inits = FakeDirectCMODEServerHandler._get_child_by_name( - igroup_info, 'initiators') - if inits is not None: - init_info = \ - FakeDirectCMODEServerHandler._get_child_by_name( - inits, 'initiator-info') - init_name = \ - FakeDirectCMODEServerHandler._get_child_content( - init_info, - 'initiator-name') - if init_name == 'iqn.1993-08.org.debian:01:10': - init_found = True - else: - init_found = False - if init_found: - tag = \ - FakeDirectCMODEServerHandler._get_child_by_name( - request, 'tag') - if tag is None: - body = """ - - openstack-01f5297b-00f7-4170-bf30-69b1314b2118 - - windows - iscsi - - - iqn.1993-08.org.debian:01:10 - - openstack - - <igroup-get-iter-key-td> - <key-0>openstack</key-0> - <key-1> - openstack-01f5297b-00f7-4170-bf30-69b1314b2118< - /key-1> - </igroup-get-iter-key-td> - 1""" - else: - body = """ - - openstack-01f5297b-00f7-4170-bf30-69b1314b2118 - - linux - iscsi - - - iqn.1993-08.org.debian:01:10 - - openstack - 1""" - else: - body = """ - 0 - """ - elif 'lun-map-get-iter' == api: - tag = \ - FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') - if tag is None: - body = """ - - openstack-44c5e7e1-3306-4800-9623-259e57d56a83 - - 948ae304-06e9-11e2 - 0 - 5587e563-06e9-11e2-9cf4-123478563412 - /vol/openvol/lun1 - openstack - - - <lun-map-get-iter-key-td> - <key-0>openstack</key-0> - <key-1>openstack-01f5297b-00f7-4170-bf30-69b1314b2118< - /key-1> - </lun-map-get-iter-key-td> - - 1 - """ - else: - body = """ - - openstack-44c5e7e1-3306-4800-9623-259e57d56a83 - - 948ae304-06e9-11e2 - 0 - 5587e563-06e9-11e2-9cf4-123478563412 - /vol/openvol/lun1 - openstack - 1 - """ - elif 'lun-map' == api: - body = """1 - - """ - elif 'lun-get-geometry' == api: - body = """256 - 512 - 3221225472512 - 2147483648 - 256""" - elif 'iscsi-service-get-iter' == api: - body = """ - - openstack - true - iqn.1992-08.com.netapp:sn.fa9:vs.105 - openstack - 1""" - elif 'iscsi-interface-get-iter' == api: - body = """ - - fas3170rre-cmode-01 - e1b-1165 - - iscsi_data_if - 10.63.165.216 - 3260true - - 5 - iscsi_data_if - 1038 - openstack - - 1""" - elif 'igroup-create' == api: - body = """""" - elif 'igroup-add' == api: - body = """""" - elif 'clone-create' == api: - body = """""" - elif 'lun-unmap' == api: - body = """""" - elif 'system-get-ontapi-version' == api: - body = """ - 1 - 19 - """ - elif 'vserver-get-iter' == api: - body = """ - - vserver - node - - - 1""" - elif 'ems-autosupport-log' == api: - body = """""" - elif 'lun-resize' == api: - body = """""" - elif 'lun-get-geometry' == api: - body = """ - 1 - 2 - 8 - 2 - 4 - 5 - """ - elif 'volume-options-list-info' == api: - body = """ - - - - """ - elif 'lun-move' == api: - body = """""" - else: - # Unknown API - s.send_response(500) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "text/xml; charset=utf-8") - s.end_headers() - s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE) - s.wfile.write(RESPONSE_PREFIX_DIRECT) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - s.wfile.write(body) - s.wfile.write(RESPONSE_SUFFIX_DIRECT) - - @staticmethod - def _get_child_by_name(self, name): - for child in self.iterchildren(): - if child.tag == name or etree.QName(child.tag).localname == name: - return child - return None - - @staticmethod - def _get_child_content(self, name): - """Get the content of the child.""" - for child in self.iterchildren(): - if child.tag == name or etree.QName(child.tag).localname == name: - return child.text - return None - - -class FakeDirectCmodeHTTPConnection(object): - """A fake http_client.HTTPConnection for netapp tests - - Requests made via this connection actually get translated and routed into - the fake direct handler above, we then turn the response into - the http_client.HTTPResponse that the caller expects. - """ - def __init__(self, host, timeout=None): - self.host = host - - def request(self, method, path, data=None, headers=None): - if not headers: - headers = {} - req_str = '%s %s HTTP/1.1\r\n' % (method, path) - for key, value in headers.items(): - req_str += "%s: %s\r\n" % (key, value) - if isinstance(req_str, six.text_type): - req_str = req_str.encode('latin1') - if data: - req_str += b'\r\n' + data - - # NOTE(vish): normally the http transport normalizes from unicode - sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) - # NOTE(vish): stop the server from trying to look up address from - # the fake socket - FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1' - self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None) - - self.sock = FakeHttplibSocket(sock.result) - self.http_response = http_client.HTTPResponse(self.sock) - - def set_debuglevel(self, level): - pass - - def getresponse(self): - self.http_response.begin() - return self.http_response - - def getresponsebody(self): - return self.sock.result - - def close(self): - pass - - -class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase): - """Test case for NetAppISCSIDriver""" - - volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None, 'host': 'hostname@backend#vol1'} - snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1', - 'volume_size': 2, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1', - 'volume_size': 1, 'project_id': 'project'} - volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - volume_clone = {'name': 'cl_sm', 'size': 3, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'cl_sm', - 'id': 'lun1', 'provider_auth': None, - 'project_id': 'project', 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None} - volume_clone_large = {'name': 'cl_lg', 'size': 6, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'cl_lg', - 'id': 'lun1', 'provider_auth': None, - 'project_id': 'project', 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None} - connector = {'initiator': 'iqn.1993-08.org.debian:01:10'} - vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None, 'host': 'hostname@backend#vol1'} - - def setUp(self): - super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp() - self._custom_setup() - - def _custom_setup(self): - self.mock_object(utils, 'OpenStackInfo') - self.mock_object(perf_7mode, 'Performance7modeLibrary') - self.mock_object(capabilities, 'CapabilitiesLibrary') - self.mock_object(client_base.Client, '_init_ssh_client') - - configuration = self._set_config(create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - self.stubs.Set(http_client, 'HTTPConnection', - FakeDirectCmodeHTTPConnection) - driver.do_setup(context='') - self.driver = driver - self.mock_object(self.driver.library.zapi_client, '_init_ssh_client') - - def _set_config(self, configuration): - configuration.netapp_storage_protocol = 'iscsi' - configuration.netapp_login = 'admin' - configuration.netapp_password = 'pass' - configuration.netapp_server_hostname = '127.0.0.1' - configuration.netapp_transport_type = 'http' - configuration.netapp_server_port = None - configuration.netapp_vserver = 'openstack' - return configuration - - def test_connect(self): - self.driver.library.zapi_client = mock.MagicMock() - self.driver.library.zapi_client.get_ontapi_version.return_value = \ - (1, 20) - self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary, - '_get_flexvol_to_pool_map', - mock.Mock(return_value=fakes.FAKE_CMODE_POOL_MAP)) - self.driver.check_for_setup_error() - - def test_do_setup_all_default(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = self._set_config(create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - mock_client = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - mock_client.assert_called_with(**FAKE_CONNECTION_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - def test_do_setup_http_default_port(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'http' - driver = common.NetAppDriver(configuration=configuration) - mock_client = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - mock_client.assert_called_with(**FAKE_CONNECTION_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - def test_do_setup_https_default_port(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'https' - driver = common.NetAppDriver(configuration=configuration) - driver.library._get_root_volume_name = mock.Mock() - mock_client = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - FAKE_CONNECTION_HTTPS = dict(FAKE_CONNECTION_HTTP, - transport_type='https') - mock_client.assert_called_with(**FAKE_CONNECTION_HTTPS) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - def test_do_setup_http_non_default_port(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = self._set_config(create_configuration()) - configuration.netapp_server_port = 81 - driver = common.NetAppDriver(configuration=configuration) - mock_client = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - FAKE_CONNECTION_HTTP_PORT = dict(FAKE_CONNECTION_HTTP, port=81) - mock_client.assert_called_with(**FAKE_CONNECTION_HTTP_PORT) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - def test_do_setup_https_non_default_port(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'https' - configuration.netapp_server_port = 446 - driver = common.NetAppDriver(configuration=configuration) - driver.library._get_root_volume_name = mock.Mock() - mock_client = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - FAKE_CONNECTION_HTTPS_PORT = dict(FAKE_CONNECTION_HTTP, port=446, - transport_type='https') - mock_client.assert_called_with(**FAKE_CONNECTION_HTTPS_PORT) - - def test_create_destroy(self): - self.driver.create_volume(self.volume) - self.driver.delete_volume(self.volume) - - def test_create_vol_snapshot_destroy(self): - self.driver.create_volume(self.volume) - self.mock_object(client_7mode.Client, '_check_clone_status') - self.mock_object(self.driver.library, '_clone_lun') - self.driver.create_snapshot(self.snapshot) - self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) - self.driver.delete_snapshot(self.snapshot) - self.driver.delete_volume(self.volume) - - def test_map_unmap(self): - self.mock_object(client_cmode.Client, 'get_igroup_by_initiators') - self.mock_object(client_cmode.Client, 'get_iscsi_target_details') - self.mock_object(client_cmode.Client, 'get_iscsi_service_details') - self.mock_object(self.driver.library, '_get_or_create_igroup') - self.mock_object(self.driver.library, '_map_lun') - self.mock_object(self.driver.library, '_unmap_lun') - FAKE_PREFERRED_TARGET = {'address': 'http://host:8080', 'port': 80} - FAKE_CONN_PROPERTIES = {'driver_volume_type': 'iscsi', 'data': 'test'} - self.mock_object(self.driver.library, - '_get_preferred_target_from_list', - mock.Mock(return_value=FAKE_PREFERRED_TARGET)) - self.mock_object(common.na_utils, 'get_iscsi_connection_properties', - mock.Mock(return_value=FAKE_CONN_PROPERTIES)) - self.mock_object(client_cmode.Client, - 'get_operational_lif_addresses', - mock.Mock(return_value=[])) - self.driver.create_volume(self.volume) - updates = self.driver.create_export(None, self.volume, {}) - self.assertTrue(updates['provider_location']) - self.volume['provider_location'] = updates['provider_location'] - - connection_info = self.driver.initialize_connection(self.volume, - self.connector) - self.assertEqual('iscsi', connection_info['driver_volume_type']) - properties = connection_info['data'] - if not properties: - raise AssertionError('Target portal is none') - self.driver.terminate_connection(self.volume, self.connector) - self.driver.delete_volume(self.volume) - - def test_cloned_volume_destroy(self): - self.driver.create_volume(self.volume) - self.mock_object(self.driver.library, '_clone_lun') - self.driver.create_cloned_volume(self.snapshot, self.volume) - self.driver.delete_volume(self.snapshot) - self.driver.delete_volume(self.volume) - - def test_map_by_creating_igroup(self): - FAKE_IGROUP_INFO = {'initiator-group-name': 'debian', - 'initiator-group-os-type': 'linux', - 'initiator-group-type': 'igroup'} - FAKE_PREFERRED_TARGET = {'address': 'http://host:8080', 'port': 80} - FAKE_CONN_PROPERTIES = {'driver_volume_type': 'iscsi', 'data': 'test'} - self.mock_object(client_cmode.Client, 'get_igroup_by_initiators', - mock.Mock(return_value=[FAKE_IGROUP_INFO])) - self.mock_object(client_cmode.Client, - 'get_operational_lif_addresses', - mock.Mock(return_value=[])) - self.mock_object(client_cmode.Client, 'get_iscsi_target_details') - self.mock_object(client_cmode.Client, 'get_iscsi_service_details') - self.mock_object(self.driver.library, - '_get_preferred_target_from_list', - mock.Mock(return_value=FAKE_PREFERRED_TARGET)) - self.mock_object(common.na_utils, 'get_iscsi_connection_properties', - mock.Mock(return_value=FAKE_CONN_PROPERTIES)) - self.driver.create_volume(self.volume) - updates = self.driver.create_export(None, self.volume, {}) - self.assertTrue(updates['provider_location']) - self.volume['provider_location'] = updates['provider_location'] - connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'} - connection_info = self.driver.initialize_connection(self.volume, - connector_new) - self.assertEqual('iscsi', connection_info['driver_volume_type']) - properties = connection_info['data'] - if not properties: - raise AssertionError('Target portal is none') - - def test_vol_stats(self): - self.mock_object(client_base.Client, 'provide_ems') - mock_update_vol_stats = self.mock_object(self.driver.library, - '_update_volume_stats') - self.driver.get_volume_stats(refresh=True) - self.assertEqual(mock_update_vol_stats.call_count, 1) - - def test_create_vol_snapshot_diff_size_resize(self): - self.driver.create_volume(self.volume) - self.mock_object(self.driver.library, '_clone_source_to_destination') - self.mock_object(self.driver.library, '_clone_lun') - self.driver.create_snapshot(self.snapshot) - self.driver.create_volume_from_snapshot( - self.volume_clone, self.snapshot) - self.driver.delete_snapshot(self.snapshot) - self.driver.delete_volume(self.volume) - - def test_create_vol_snapshot_diff_size_subclone(self): - self.driver.create_volume(self.volume) - self.mock_object(self.driver.library, '_clone_lun') - self.mock_object(self.driver.library, '_clone_source_to_destination') - self.driver.create_snapshot(self.snapshot) - self.driver.create_volume_from_snapshot( - self.volume_clone_large, self.snapshot) - self.driver.delete_snapshot(self.snapshot) - self.driver.delete_volume(self.volume) - - def test_extend_vol_same_size(self): - self.driver.create_volume(self.volume) - self.driver.extend_volume(self.volume, self.volume['size']) - - def test_extend_vol_direct_resize(self): - self.mock_object(self.driver.library.zapi_client, - 'get_lun_geometry', mock.Mock(return_value=None)) - self.mock_object(self.driver.library, '_do_sub_clone_resize') - self.driver.create_volume(self.volume) - self.driver.extend_volume(self.volume, 3) - - def test_extend_vol_sub_lun_clone(self): - self.mock_object(self.driver.library.zapi_client, - 'get_lun_geometry', mock.Mock(return_value=None)) - self.mock_object(self.driver.library, '_do_sub_clone_resize') - self.driver.create_volume(self.volume) - self.driver.extend_volume(self.volume, 4) - - -class NetAppDriverNegativeTestCase(test.TestCase): - """Test case for NetAppDriver""" - - def setUp(self): - super(NetAppDriverNegativeTestCase, self).setUp() - - def test_incorrect_family(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = create_configuration() - configuration.netapp_storage_family = 'xyz_abc' - try: - common.NetAppDriver(configuration=configuration) - raise AssertionError('Wrong storage family is getting accepted.') - except exception.InvalidInput: - pass - - def test_incorrect_protocol(self): - self.mock_object(utils, 'OpenStackInfo') - configuration = create_configuration() - configuration.netapp_storage_family = 'ontap' - configuration.netapp_storage_protocol = 'ontap' - try: - common.NetAppDriver(configuration=configuration) - raise AssertionError('Wrong storage protocol is getting accepted.') - except exception.InvalidInput: - pass - - -class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler): - """HTTP handler that fakes enough stuff to allow the driver to run.""" - - def do_GET(s): - """Respond to a GET request.""" - if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: - s.send_response(404) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "text/xml; charset=utf-8") - s.end_headers() - out = s.wfile - out.write('' - '') - - def do_POST(s): - """Respond to a POST request.""" - if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: - s.send_response(404) - s.end_headers - return - request_xml = s.rfile.read(int(s.headers['Content-Length'])) - root = etree.fromstring(request_xml) - body = [x for x in root.iterchildren()] - request = body[0] - tag = request.tag - api = etree.QName(tag).localname or tag - if 'lun-list-info' == api: - body = """ - false - false - - - /vol/vol1/lun1 - 20971520 - true - false - false - false - none - linux - e867d844-c2c0-11e0-9282-00a09825b3b5 - P3lgP4eTyaNl - 512 - true - 0 - indeterminate - - - /vol/vol1/lun1 - 20971520 - true - false - false - false - none - linux - 8e1e9284-c288-11e0-9282-00a09825b3b5 - P3lgP4eTc3lp - 512 - true - 0 - indeterminate - - - """ - elif 'volume-list-info' == api: - body = """ - - - vol0 - 019c8f7a-9243-11e0-9281-00a09825b3b5 - flex - 32_bit - online - 576914493440 - 13820354560 - 563094110208 - 2 - 20 - 140848264 - 0 - 0 - 0 - 0 - 20907162 - 7010 - 518 - 31142 - 31142 - 0 - false - aggr0 - - - disabled - idle - idle for 70:36:44 - regular - sun-sat@0 - Mon Aug 8 09:34:15 EST 2011 - - Mon Aug 8 09:34:15 EST 2011 - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - false - - volume - true - 14 - raid_dp,sis - block - true - false - false - false - false - unmirrored - 3 - 1 - - - /aggr0/plex0 - true - false - - - - - vol1 - 2d50ecf4-c288-11e0-9282-00a09825b3b5 - flex - 32_bit - online - 42949672960 - 44089344 - 42905583616 - 0 - 20 - 10485760 - 8192 - 8192 - 0 - 0 - 1556480 - 110 - 504 - 31142 - 31142 - 0 - false - aggr1 - - - disabled - idle - idle for 89:19:59 - regular - sun-sat@0 - Sun Aug 7 14:51:00 EST 2011 - - Sun Aug 7 14:51:00 EST 2011 - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - false - - volume - true - 7 - raid4,sis - block - true - false - false - false - false - unmirrored - 2 - 1 - - - /aggr1/plex0 - true - false - - - - - """ - elif 'volume-options-list-info' == api: - body = """ - - - snapmirrored - off - - - root - false - - - ha_policy - cfo - - - striping - not_striped - - - compression - off - - - """ - elif 'lun-create-by-size' == api: - body = """ - 22020096""" - elif 'lun-destroy' == api: - body = """""" - elif 'igroup-list-info' == api: - body = """ - - - openstack-8bc96490 - iscsi - b8e1d274-c378-11e0 - linux - 0 - false - - false - false - true - - - - iqn.1993-08.org.debian:01:10 - - - - - iscsi_group - iscsi - ccb8cbe4-c36f - linux - 0 - false - - false - false - true - - - - iqn.1993-08.org.debian:01:10ca - - - - - """ - elif 'lun-map-list-info' == api: - body = """ - - """ - elif 'lun-map' == api: - body = """1 - - """ - elif 'iscsi-node-get-name' == api: - body = """ - iqn.1992-08.com.netapp:sn.135093938 - """ - elif 'iscsi-portal-list-info' == api: - body = """ - - - 10.61.176.156 - 3260 - 1000 - e0a - - - """ - elif 'igroup-create' == api: - body = """""" - elif 'igroup-add' == api: - body = """""" - elif 'clone-start' == api: - body = """ - - - 2d50ecf4-c288-11e0-9282-00a09825b3b5 - 11 - - - """ - elif 'clone-list-status' == api: - body = """ - - - completed - - - """ - elif 'lun-unmap' == api: - body = """""" - elif 'system-get-ontapi-version' == api: - body = """ - 1 - 8 - """ - elif 'lun-set-space-reservation-info' == api: - body = """""" - elif 'ems-autosupport-log' == api: - body = """""" - elif 'lun-resize' == api: - body = """""" - elif 'lun-get-geometry' == api: - body = """ - 1 - 2 - 8 - 2 - 4 - 5 - """ - elif 'volume-options-list-info' == api: - body = """ - - - - """ - elif 'lun-move' == api: - body = """""" - else: - # Unknown API - s.send_response(500) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "text/xml; charset=utf-8") - s.end_headers() - s.wfile.write(RESPONSE_PREFIX_DIRECT_7MODE) - s.wfile.write(RESPONSE_PREFIX_DIRECT) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - s.wfile.write(body) - s.wfile.write(RESPONSE_SUFFIX_DIRECT) - - -class FakeDirect7modeHTTPConnection(object): - """A fake http_client.HTTPConnection for netapp tests - - Requests made via this connection actually get translated and routed into - the fake direct handler above, we then turn the response into - the http_client.HTTPResponse that the caller expects. - """ - def __init__(self, host, timeout=None): - self.host = host - - def request(self, method, path, data=None, headers=None): - if not headers: - headers = {} - req_str = '%s %s HTTP/1.1\r\n' % (method, path) - for key, value in headers.items(): - req_str += "%s: %s\r\n" % (key, value) - if isinstance(req_str, six.text_type): - req_str = req_str.encode('latin1') - if data: - req_str += b'\r\n' + data - - # NOTE(vish): normally the http transport normailizes from unicode - sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) - # NOTE(vish): stop the server from trying to look up address from - # the fake socket - FakeDirect7MODEServerHandler.address_string = lambda x: '127.0.0.1' - self.app = FakeDirect7MODEServerHandler(sock, '127.0.0.1:80', None) - - self.sock = FakeHttplibSocket(sock.result) - self.http_response = http_client.HTTPResponse(self.sock) - - def set_debuglevel(self, level): - pass - - def getresponse(self): - self.http_response.begin() - return self.http_response - - def getresponsebody(self): - return self.sock.result - - def close(self): - pass - - -class NetAppDirect7modeISCSIDriverTestCase_NV(test.TestCase): - """Test case for NetAppISCSIDriver without vfiler""" - volume = { - 'name': 'lun1', - 'size': 2, - 'volume_name': 'lun1', - 'os_type': 'linux', - 'provider_location': 'lun1', - 'id': 'lun1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None, - 'host': 'hostname@backend#vol1', - } - - def setUp(self): - super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp() - self._custom_setup() - - def _custom_setup(self): - self.mock_object(utils, 'OpenStackInfo') - - configuration = self._set_config(create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(client_base.Client, '_init_ssh_client') - self.stubs.Set(http_client, 'HTTPConnection', - FakeDirect7modeHTTPConnection) - self.mock_object(driver.library, '_get_root_volume_name', mock.Mock( - return_value='root')) - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - driver.root_volume_name = 'root' - self.driver = driver - - def _set_config(self, configuration): - configuration.netapp_storage_family = 'ontap_7mode' - configuration.netapp_storage_protocol = 'iscsi' - configuration.netapp_login = 'admin' - configuration.netapp_password = 'pass' - configuration.netapp_server_hostname = '127.0.0.1' - configuration.netapp_transport_type = 'http' - configuration.netapp_server_port = None - return configuration - - def test_create_on_select_vol(self): - self.driver.volume_list = ['vol0', 'vol1'] - self.driver.create_volume(self.volume) - self.driver.delete_volume(self.volume) - self.driver.volume_list = [] - - def test_connect(self): - self.driver.library.zapi_client = mock.MagicMock() - self.driver.library.zapi_client.get_ontapi_version.\ - return_value = (1, 20) - self.mock_object(block_7mode.NetAppBlockStorage7modeLibrary, - '_get_filtered_pools', - mock.Mock(return_value=fakes.FAKE_7MODE_POOLS)) - self.driver.check_for_setup_error() - - def test_check_for_setup_error_version(self): - drv = self.driver - self.mock_object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=None)) - # check exception raises when version not found - self.assertRaises(exception.VolumeBackendAPIException, - drv.check_for_setup_error) - - self.mock_object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 8))) - - # check exception raises when not supported version - self.assertRaises(exception.VolumeBackendAPIException, - drv.check_for_setup_error) - - -class NetAppDirect7modeISCSIDriverTestCase_WV( - NetAppDirect7modeISCSIDriverTestCase_NV): - """Test case for NetAppISCSIDriver with vfiler""" - def setUp(self): - super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp() - - def _custom_setup(self): - self.mock_object(utils, 'OpenStackInfo') - - configuration = self._set_config(create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(client_base.Client, '_init_ssh_client') - self.stubs.Set(http_client, 'HTTPConnection', - FakeDirect7modeHTTPConnection) - self.mock_object(driver.library, '_get_root_volume_name', - mock.Mock(return_value='root')) - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - self.driver = driver - self.driver.root_volume_name = 'root' - - def _set_config(self, configuration): - configuration.netapp_storage_family = 'ontap_7mode' - configuration.netapp_storage_protocol = 'iscsi' - configuration.netapp_login = 'admin' - configuration.netapp_password = 'pass' - configuration.netapp_server_hostname = '127.0.0.1' - configuration.netapp_transport_type = 'http' - configuration.netapp_server_port = None - configuration.netapp_vfiler = 'openstack' - return configuration diff --git a/cinder/tests/unit/test_netapp_nfs.py b/cinder/tests/unit/test_netapp_nfs.py deleted file mode 100644 index cd848e867..000000000 --- a/cinder/tests/unit/test_netapp_nfs.py +++ /dev/null @@ -1,1589 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for the NetApp-specific NFS driver module.""" - -import itertools -import os -import shutil - -from lxml import etree -import mock -from mox3 import mox as mox_lib -import six - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_volume -from cinder import utils as cinder_utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.netapp import common -from cinder.volume.drivers.netapp.dataontap import (nfs_7mode - as netapp_nfs_7mode) -from cinder.volume.drivers.netapp.dataontap import (nfs_cmode - as netapp_nfs_cmode) -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_7mode -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp.dataontap import nfs_base -from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode -from cinder.volume.drivers.netapp import utils - - -CONNECTION_INFO = { - 'hostname': 'fake_host', - 'transport_type': 'https', - 'port': 443, - 'username': 'admin', - 'password': 'passw0rd', -} - -FAKE_CONNECTION_INFO_HTTP = { - 'hostname': '127.0.0.1', - 'transport_type': 'http', - 'port': None, - 'username': 'admin', - 'password': 'pass', - 'vserver': 'openstack', -} - -FAKE_CONNECTION_INFO_HTTPS = dict(FAKE_CONNECTION_INFO_HTTP, - transport_type='https') - -FAKE_7MODE_CONNECTION_INFO_HTTP = dict(FAKE_CONNECTION_INFO_HTTP) -FAKE_7MODE_CONNECTION_INFO_HTTP.pop('vserver') -FAKE_7MODE_CONNECTION_INFO_HTTP['vfiler'] = 'test_vfiler' - -FAKE_7MODE_CONNECTION_INFO_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTP, - transport_type='https') - -SEVEN_MODE_CONNECTION_INFO = dict( - itertools.chain(CONNECTION_INFO.items(), - {'vfiler': 'test_vfiler'}.items())) - -FAKE_VSERVER = 'fake_vserver' - - -def create_configuration(): - configuration = mox_lib.MockObject(conf.Configuration) - configuration.append_config_values(mox_lib.IgnoreArg()) - configuration.max_over_subscription_ratio = 20.0 - configuration.reserved_percentage = 0 - configuration.nfs_mount_point_base = '/mnt/test' - configuration.nfs_mount_options = None - configuration.nas_mount_options = None - configuration.netapp_server_hostname = CONNECTION_INFO['hostname'] - configuration.netapp_transport_type = CONNECTION_INFO['transport_type'] - configuration.netapp_server_port = CONNECTION_INFO['port'] - configuration.netapp_login = CONNECTION_INFO['username'] - configuration.netapp_password = CONNECTION_INFO['password'] - configuration.netapp_vfiler = SEVEN_MODE_CONNECTION_INFO['vfiler'] - return configuration - - -class FakeVolume(object): - def __init__(self, host='', size=0): - self.size = size - self.id = hash(self) - self.name = None - self.host = host - - def __getitem__(self, key): - return self.__dict__[key] - - def __setitem__(self, key, val): - self.__dict__[key] = val - - -class FakeSnapshot(object): - def __init__(self, volume_size=0): - self.volume_name = None - self.name = None - self.volume_id = None - self.volume_size = volume_size - self.user_id = None - self.status = None - - def __getitem__(self, key): - return self.__dict__[key] - - -class FakeResponse(object): - def __init__(self, status): - """Initialize FakeResponse. - - :param status: Either 'failed' or 'passed' - """ - self.Status = status - - if status == 'failed': - self.Reason = 'Sample error' - - -class NetAppCmodeNfsDriverTestCase(test.TestCase): - """Test direct NetApp C Mode driver.""" - - TEST_NFS_HOST = 'nfs-host1' - TEST_NFS_SHARE_PATH = '/export' - TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH) - TEST_NFS_EXPORT2 = 'nfs-host2:/export' - TEST_MNT_POINT = '/mnt/nfs' - - def setUp(self): - super(NetAppCmodeNfsDriverTestCase, self).setUp() - self._custom_setup() - - def _custom_setup(self): - self.mock_object(utils, 'OpenStackInfo') - kwargs = {} - kwargs['netapp_mode'] = 'proxy' - kwargs['configuration'] = create_configuration() - - self.mock_object(nfs_base, 'LOG') - self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs) - self._driver.zapi_client = mock.Mock() - self._driver.ssc_library = mock.Mock() - config = self._driver.configuration - config.netapp_vserver = FAKE_VSERVER - self.context = context.get_admin_context() - - def test_create_snapshot(self): - """Test snapshot can be created and deleted.""" - mox = self.mox - drv = self._driver - - mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') - drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()) - mox.ReplayAll() - - drv.create_snapshot(FakeSnapshot()) - - mox.VerifyAll() - - def test_create_volume_from_snapshot(self): - """Tests volume creation from snapshot.""" - drv = self._driver - mox = self.mox - location = '127.0.0.1:/nfs' - host = 'hostname@backend#' + location - volume = FakeVolume(host, 1) - snapshot = FakeSnapshot(1) - - expected_result = {'provider_location': location} - mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') - mox.StubOutWithMock(drv, '_get_volume_location') - mox.StubOutWithMock(drv, 'local_path') - mox.StubOutWithMock(drv, '_discover_file_till_timeout') - mox.StubOutWithMock(drv, '_set_rw_permissions') - drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()) - drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location) - drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt') - drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) - drv._set_rw_permissions(mox_lib.IgnoreArg()) - - mox.ReplayAll() - - self.mock_object(drv, '_do_qos_for_volume') - self.mock_object(utils, 'get_volume_extra_specs') - - loc = drv.create_volume_from_snapshot(volume, snapshot) - - self.assertEqual(expected_result, loc) - - mox.VerifyAll() - - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup') - @mock.patch.object(client_cmode.Client, '__init__', return_value=None) - def test_do_setup(self, mock_client_init, mock_super_do_setup): - context = mock.Mock() - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - self._driver.do_setup(context) - mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER, - **CONNECTION_INFO) - mock_super_do_setup.assert_called_once_with(context) - - @mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error') - def test_check_for_setup_error(self, mock_super_check_for_setup_error): - self._driver.zapi_client = mock.Mock() - self._driver._start_periodic_tasks = mock.Mock() - - self._driver.check_for_setup_error() - - (self._driver.ssc_library.check_api_permissions. - assert_called_once_with()) - mock_super_check_for_setup_error.assert_called_once_with() - self._driver._start_periodic_tasks.assert_called_once_with() - - def _prepare_clone_mock(self, status): - drv = self._driver - mox = self.mox - - volume = FakeVolume() - setattr(volume, 'provider_location', '127.0.0.1:/nfs') - - drv.zapi_client = mox.CreateMockAnything() - mox.StubOutWithMock(drv, '_get_host_ip') - mox.StubOutWithMock(drv, '_get_export_path') - - drv.zapi_client.get_if_info_by_ip('127.0.0.1').AndReturn( - self._prepare_info_by_ip_response()) - drv.zapi_client.get_vol_by_junc_vserver('openstack', '/nfs').AndReturn( - 'nfsvol') - drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name', - 'openstack') - drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1') - drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs') - return mox - - def _prepare_info_by_ip_response(self): - res = """ - -
127.0.0.1
- up - fas3170rre-cmode-01 - e1b-1165 - - nfs - - none - - disabled - data - fas3170rre-cmode-01 - e1b-1165 - nfs_data1 - false - true - 255.255.255.0 - 24 - up - data - c10.63.165.0/24 - disabled - openstack -
""" - response_el = etree.XML(res) - return netapp_api.NaElement(response_el).get_children() - - def test_clone_backing_file_for_volume(self): - drv = self._driver - mox = self._prepare_clone_mock('pass') - - mox.ReplayAll() - - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + six.text_type(hash(volume_name)) - share = 'ip:/share' - - drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id, - share) - - mox.VerifyAll() - - def test_register_img_in_cache_noshare(self): - volume = {'id': '1', 'name': 'testvol'} - volume['provider_location'] = '10.61.170.1:/share/path' - drv = self._driver - mox = self.mox - mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') - - drv._do_clone_rel_img_cache('testvol', 'img-cache-12345', - '10.61.170.1:/share/path', - 'img-cache-12345') - - mox.ReplayAll() - drv._register_image_in_cache(volume, '12345') - mox.VerifyAll() - - def test_register_img_in_cache_with_share(self): - volume = {'id': '1', 'name': 'testvol'} - volume['provider_location'] = '10.61.170.1:/share/path' - drv = self._driver - mox = self.mox - mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') - - drv._do_clone_rel_img_cache('testvol', 'img-cache-12345', - '10.61.170.1:/share/path', - 'img-cache-12345') - - mox.ReplayAll() - drv._register_image_in_cache(volume, '12345') - mox.VerifyAll() - - def test_find_image_in_cache_no_shares(self): - drv = self._driver - drv._mounted_shares = [] - result = drv._find_image_in_cache('image_id') - if not result: - pass - else: - self.fail('Return result is unexpected') - - def test_find_image_in_cache_shares(self): - drv = self._driver - mox = self.mox - drv._mounted_shares = ['testshare'] - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(os.path, 'isfile') - - drv._get_mount_point_for_share('testshare').AndReturn('/mnt') - os.path.isfile('/mnt/img-cache-id').AndReturn(True) - mox.ReplayAll() - result = drv._find_image_in_cache('id') - (share, file_name) = result[0] - mox.VerifyAll() - drv._mounted_shares.remove('testshare') - - if (share == 'testshare' and file_name == 'img-cache-id'): - pass - else: - self.fail('Return result is unexpected') - - def test_find_old_cache_files_notexists(self): - drv = self._driver - mox = self.mox - cmd = ['find', '/mnt', '-maxdepth', '1', '-name', - 'img-cache*', '-amin', '+720'] - setattr(drv.configuration, 'expiry_thres_minutes', 720) - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(drv, '_execute') - - drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') - drv._execute(*cmd, run_as_root=True).AndReturn((None, '')) - mox.ReplayAll() - res = drv._find_old_cache_files('share') - mox.VerifyAll() - if len(res) == 0: - pass - else: - self.fail('No files expected but got return values.') - - def test_find_old_cache_files_exists(self): - drv = self._driver - mox = self.mox - cmd = ['find', '/mnt', '-maxdepth', '1', '-name', - 'img-cache*', '-amin', '+720'] - setattr(drv.configuration, 'expiry_thres_minutes', '720') - files = '/mnt/img-id1\n/mnt/img-id2\n' - r_files = ['img-id1', 'img-id2'] - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(drv, '_execute') - mox.StubOutWithMock(drv, '_shortlist_del_eligible_files') - - drv._get_mount_point_for_share('share').AndReturn('/mnt') - drv._execute(*cmd, run_as_root=True).AndReturn((files, None)) - drv._shortlist_del_eligible_files( - mox_lib.IgnoreArg(), r_files).AndReturn(r_files) - mox.ReplayAll() - res = drv._find_old_cache_files('share') - mox.VerifyAll() - if len(res) == len(r_files): - for f in res: - r_files.remove(f) - else: - self.fail('Returned files not same as expected.') - - def test_delete_files_till_bytes_free_success(self): - drv = self._driver - mox = self.mox - files = [('img-cache-1', 230), ('img-cache-2', 380)] - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(drv, '_delete_file_at_path') - - drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') - drv._delete_file_at_path('/mnt/img-cache-2').AndReturn(True) - drv._delete_file_at_path('/mnt/img-cache-1').AndReturn(True) - mox.ReplayAll() - drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024) - mox.VerifyAll() - - def test_clean_image_cache_exec(self): - drv = self._driver - mox = self.mox - drv.configuration.thres_avl_size_perc_start = 20 - drv.configuration.thres_avl_size_perc_stop = 50 - drv._mounted_shares = ['testshare'] - - mox.StubOutWithMock(drv, '_find_old_cache_files') - mox.StubOutWithMock(drv, '_delete_files_till_bytes_free') - mox.StubOutWithMock(drv, '_get_capacity_info') - - drv._get_capacity_info('testshare').AndReturn((100, 19)) - drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2']) - drv._delete_files_till_bytes_free( - ['f1', 'f2'], 'testshare', bytes_to_free=31) - mox.ReplayAll() - drv._clean_image_cache() - mox.VerifyAll() - drv._mounted_shares.remove('testshare') - if not drv.cleaning: - pass - else: - self.fail('Clean image cache failed.') - - def test_clean_image_cache_noexec(self): - drv = self._driver - mox = self.mox - drv.configuration.thres_avl_size_perc_start = 20 - drv.configuration.thres_avl_size_perc_stop = 50 - drv._mounted_shares = ['testshare'] - - mox.StubOutWithMock(drv, '_get_capacity_info') - - drv._get_capacity_info('testshare').AndReturn((100, 30, 70)) - mox.ReplayAll() - drv._clean_image_cache() - mox.VerifyAll() - drv._mounted_shares.remove('testshare') - if not drv.cleaning: - pass - else: - self.fail('Clean image cache failed.') - - def test_clone_image_fromcache(self): - drv = self._driver - mox = self.mox - volume = {'name': 'vol', 'size': '20'} - mox.StubOutWithMock(utils, 'get_volume_extra_specs') - mox.StubOutWithMock(drv, '_find_image_in_cache') - mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') - mox.StubOutWithMock(drv, '_post_clone_image') - mox.StubOutWithMock(drv, '_is_share_clone_compatible') - - utils.get_volume_extra_specs(mox_lib.IgnoreArg()) - drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn( - [('share', 'file_name')]) - drv._is_share_clone_compatible(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(True) - drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name') - drv._post_clone_image(volume) - - mox.ReplayAll() - drv.clone_image('', - volume, - ('image_location', None), - {'id': 'image_id'}, '') - mox.VerifyAll() - - def get_img_info(self, format): - class img_info(object): - def __init__(self, fmt): - self.file_format = fmt - - return img_info(format) - - def test_clone_image_cloneableshare_nospace(self): - drv = self._driver - mox = self.mox - volume = {'name': 'vol', 'size': '20'} - mox.StubOutWithMock(utils, 'get_volume_extra_specs') - mox.StubOutWithMock(drv, '_find_image_in_cache') - mox.StubOutWithMock(drv, '_is_cloneable_share') - mox.StubOutWithMock(drv, '_is_share_clone_compatible') - - utils.get_volume_extra_specs(mox_lib.IgnoreArg()) - drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) - drv._is_cloneable_share( - mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share') - drv._is_share_clone_compatible(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(False) - - mox.ReplayAll() - (prop, cloned) = drv.clone_image( - '', - volume, - ('nfs://127.0.0.1:/share/img-id', None), - {'id': 'image_id'}, - '') - mox.VerifyAll() - if not cloned and not prop['provider_location']: - pass - else: - self.fail('Expected not cloned, got cloned.') - - def test_clone_image_cloneableshare_raw(self): - drv = self._driver - mox = self.mox - volume = fake_volume.fake_volume_obj(self.context, size=20) - volume_name = 'volume-%s' % volume.id - mox.StubOutWithMock(utils, 'get_volume_extra_specs') - mox.StubOutWithMock(drv, '_find_image_in_cache') - mox.StubOutWithMock(drv, '_is_cloneable_share') - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(image_utils, 'qemu_img_info') - mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') - mox.StubOutWithMock(drv, '_discover_file_till_timeout') - mox.StubOutWithMock(drv, '_set_rw_permissions') - mox.StubOutWithMock(drv, '_resize_image_file') - mox.StubOutWithMock(drv, '_is_share_clone_compatible') - - utils.get_volume_extra_specs(mox_lib.IgnoreArg()) - drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) - drv._is_cloneable_share( - mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share') - drv._is_share_clone_compatible(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(True) - drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') - image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ - AndReturn(self.get_img_info('raw')) - drv._clone_backing_file_for_volume( - 'img-id', volume_name, share='127.0.0.1:/share', volume_id=None) - drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') - drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) - drv._set_rw_permissions('/mnt/%s' % volume_name) - drv._resize_image_file({'name': volume_name}, mox_lib.IgnoreArg()) - - mox.ReplayAll() - drv.clone_image( - '', - volume, - ('nfs://127.0.0.1:/share/img-id', None), - {'id': 'image_id'}, - '') - mox.VerifyAll() - - def test_clone_image_cloneableshare_notraw(self): - drv = self._driver - mox = self.mox - volume = fake_volume.fake_volume_obj(self.context, size=20) - volume_name = 'volume-%s' % volume.id - mox.StubOutWithMock(utils, 'get_volume_extra_specs') - mox.StubOutWithMock(drv, '_find_image_in_cache') - mox.StubOutWithMock(drv, '_is_cloneable_share') - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(image_utils, 'qemu_img_info') - mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') - mox.StubOutWithMock(drv, '_discover_file_till_timeout') - mox.StubOutWithMock(drv, '_set_rw_permissions') - mox.StubOutWithMock(drv, '_resize_image_file') - mox.StubOutWithMock(image_utils, 'convert_image') - mox.StubOutWithMock(drv, '_register_image_in_cache') - mox.StubOutWithMock(drv, '_is_share_clone_compatible') - mox.StubOutWithMock(drv, '_do_qos_for_volume') - - utils.get_volume_extra_specs(mox_lib.IgnoreArg()) - drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) - drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( - '127.0.0.1:/share') - drv._is_share_clone_compatible(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(True) - drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') - image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ - AndReturn(self.get_img_info('notraw')) - image_utils.convert_image(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg(), - 'raw', run_as_root=True) - image_utils.qemu_img_info('/mnt/%s' % volume_name, run_as_root=True).\ - AndReturn(self.get_img_info('raw')) - drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - - drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') - drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) - drv._set_rw_permissions('/mnt/%s' % volume_name) - drv._resize_image_file({'name': volume_name}, mox_lib.IgnoreArg()) - - mox.ReplayAll() - drv.clone_image( - '', - volume, - ('nfs://127.0.0.1/share/img-id', None), - {'id': 'image_id'}, - '') - mox.VerifyAll() - - def test_clone_image_file_not_discovered(self): - drv = self._driver - mox = self.mox - volume = fake_volume.fake_volume_obj(self.context, size=20) - volume_name = 'volume-%s' % volume.id - mox.StubOutWithMock(utils, 'get_volume_extra_specs') - mox.StubOutWithMock(drv, '_find_image_in_cache') - mox.StubOutWithMock(drv, '_is_cloneable_share') - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(image_utils, 'qemu_img_info') - mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') - mox.StubOutWithMock(drv, '_discover_file_till_timeout') - mox.StubOutWithMock(image_utils, 'convert_image') - mox.StubOutWithMock(drv, '_register_image_in_cache') - mox.StubOutWithMock(drv, '_is_share_clone_compatible') - mox.StubOutWithMock(drv, '_do_qos_for_volume') - mox.StubOutWithMock(drv, 'local_path') - - utils.get_volume_extra_specs(mox_lib.IgnoreArg()) - drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) - drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( - '127.0.0.1:/share') - drv._is_share_clone_compatible(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(True) - drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') - image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ - AndReturn(self.get_img_info('notraw')) - image_utils.convert_image(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg(), - 'raw', run_as_root=True) - image_utils.qemu_img_info('/mnt/%s' % volume_name, run_as_root=True).\ - AndReturn(self.get_img_info('raw')) - drv._register_image_in_cache(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()) - drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/%s' % volume_name) - drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False) - - mox.ReplayAll() - vol_dict, result = drv.clone_image( - '', - volume, - ('nfs://127.0.0.1/share/img-id', None), - {'id': 'image_id'}, - '') - mox.VerifyAll() - self.assertFalse(result) - self.assertFalse(vol_dict['bootable']) - self.assertIsNone(vol_dict['provider_location']) - - def test_clone_image_resizefails(self): - drv = self._driver - mox = self.mox - volume = {'name': 'vol', 'size': '20'} - mox.StubOutWithMock(utils, 'get_volume_extra_specs') - mox.StubOutWithMock(drv, '_find_image_in_cache') - mox.StubOutWithMock(drv, '_is_cloneable_share') - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - mox.StubOutWithMock(image_utils, 'qemu_img_info') - mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') - mox.StubOutWithMock(drv, '_discover_file_till_timeout') - mox.StubOutWithMock(drv, '_set_rw_permissions') - mox.StubOutWithMock(drv, '_resize_image_file') - mox.StubOutWithMock(image_utils, 'convert_image') - mox.StubOutWithMock(drv, '_do_qos_for_volume') - mox.StubOutWithMock(drv, '_register_image_in_cache') - mox.StubOutWithMock(drv, '_is_share_clone_compatible') - mox.StubOutWithMock(drv, 'local_path') - - utils.get_volume_extra_specs(mox_lib.IgnoreArg()) - drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) - drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( - '127.0.0.1:/share') - drv._is_share_clone_compatible(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(True) - drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') - image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ - AndReturn(self.get_img_info('notraw')) - image_utils.convert_image(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg(), 'raw', - run_as_root=True) - image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\ - AndReturn(self.get_img_info('raw')) - drv._register_image_in_cache(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()) - drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol') - drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) - drv._set_rw_permissions('/mnt/vol') - drv._resize_image_file( - mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults()) - - mox.ReplayAll() - vol_dict, result = drv.clone_image( - '', - volume, - ('nfs://127.0.0.1/share/img-id', None), - {'id': 'image_id'}, - '') - mox.VerifyAll() - self.assertFalse(result) - self.assertFalse(vol_dict['bootable']) - self.assertIsNone(vol_dict['provider_location']) - - def test_is_cloneable_share_badformats(self): - drv = self._driver - strgs = ['10.61.666.22:/share/img', - 'nfs://10.61.666.22:/share/img', - 'nfs://10.61.666.22//share/img', - 'nfs://com.netapp.com:/share/img', - 'nfs://com.netapp.com//share/img', - 'com.netapp.com://share/im\g', - 'http://com.netapp.com://share/img', - 'nfs://com.netapp.com:/share/img', - 'nfs://com.netapp.com:8080//share/img' - 'nfs://com.netapp.com//img', - 'nfs://[ae::sr::ty::po]/img'] - for strg in strgs: - res = drv._is_cloneable_share(strg) - if res: - msg = 'Invalid format matched for url %s.' % strg - self.fail(msg) - - def test_is_cloneable_share_goodformat1(self): - drv = self._driver - mox = self.mox - strg = 'nfs://10.61.222.333/share/img' - mox.StubOutWithMock(drv, '_check_share_in_use') - drv._check_share_in_use(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn('share') - mox.ReplayAll() - drv._is_cloneable_share(strg) - mox.VerifyAll() - - def test_is_cloneable_share_goodformat2(self): - drv = self._driver - mox = self.mox - strg = 'nfs://10.61.222.333:8080/share/img' - mox.StubOutWithMock(drv, '_check_share_in_use') - drv._check_share_in_use(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn('share') - mox.ReplayAll() - drv._is_cloneable_share(strg) - mox.VerifyAll() - - def test_is_cloneable_share_goodformat3(self): - drv = self._driver - mox = self.mox - strg = 'nfs://com.netapp:8080/share/img' - mox.StubOutWithMock(drv, '_check_share_in_use') - drv._check_share_in_use(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn('share') - mox.ReplayAll() - drv._is_cloneable_share(strg) - mox.VerifyAll() - - def test_is_cloneable_share_goodformat4(self): - drv = self._driver - mox = self.mox - strg = 'nfs://netapp.com/share/img' - mox.StubOutWithMock(drv, '_check_share_in_use') - drv._check_share_in_use(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn('share') - mox.ReplayAll() - drv._is_cloneable_share(strg) - mox.VerifyAll() - - def test_is_cloneable_share_goodformat5(self): - drv = self._driver - mox = self.mox - strg = 'nfs://netapp.com/img' - mox.StubOutWithMock(drv, '_check_share_in_use') - drv._check_share_in_use(mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn('share') - mox.ReplayAll() - drv._is_cloneable_share(strg) - mox.VerifyAll() - - def test_check_share_in_use_no_conn(self): - drv = self._driver - share = drv._check_share_in_use(None, '/dir') - if share: - self.fail('Unexpected share detected.') - - def test_check_share_in_use_invalid_conn(self): - drv = self._driver - share = drv._check_share_in_use(':8989', '/dir') - if share: - self.fail('Unexpected share detected.') - - def test_check_share_in_use_incorrect_host(self): - drv = self._driver - mox = self.mox - mox.StubOutWithMock(utils, 'resolve_hostname') - utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception()) - mox.ReplayAll() - share = drv._check_share_in_use('incorrect:8989', '/dir') - mox.VerifyAll() - if share: - self.fail('Unexpected share detected.') - - def test_check_share_in_use_success(self): - drv = self._driver - mox = self.mox - drv._mounted_shares = ['127.0.0.1:/dir/share'] - mox.StubOutWithMock(utils, 'resolve_hostname') - mox.StubOutWithMock(drv, '_share_match_for_ip') - utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44') - drv._share_match_for_ip( - '10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share') - mox.ReplayAll() - share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share') - mox.VerifyAll() - if not share: - self.fail('Expected share not detected') - - def test_construct_image_url_loc(self): - drv = self._driver - img_loc = (None, - # Valid metdata - [{'metadata': - {'share_location': 'nfs://host/path', - 'mountpoint': '/opt/stack/data/glance', - 'id': 'abc-123', - 'type': 'nfs'}, - 'url': 'file:///opt/stack/data/glance/image-id-0'}, - # missing metadata - {'metadata': {}, - 'url': 'file:///opt/stack/data/glance/image-id-1'}, - # missing location_type - {'metadata': {'location_type': None}, - 'url': 'file:///opt/stack/data/glance/image-id-2'}, - # non-nfs location_type - {'metadata': {'location_type': 'not-NFS'}, - 'url': 'file:///opt/stack/data/glance/image-id-3'}, - # missing share_location - {'metadata': {'location_type': 'nfs', - 'share_location': None}, - 'url': 'file:///opt/stack/data/glance/image-id-4'}, - # missing mountpoint - {'metadata': {'location_type': 'nfs', - 'share_location': 'nfs://host/path', - # Pre-kilo we documented "mount_point" - 'mount_point': '/opt/stack/data/glance'}, - 'url': 'file:///opt/stack/data/glance/image-id-5'}, - # Valid metadata - {'metadata': - {'share_location': 'nfs://host/path', - 'mountpoint': '/opt/stack/data/glance', - 'id': 'abc-123', - 'type': 'nfs'}, - 'url': 'file:///opt/stack/data/glance/image-id-6'}]) - - locations = drv._construct_image_nfs_url(img_loc) - - self.assertIn("nfs://host/path/image-id-0", locations) - self.assertIn("nfs://host/path/image-id-6", locations) - self.assertEqual(2, len(locations)) - - def test_construct_image_url_direct(self): - drv = self._driver - img_loc = ("nfs://host/path/image-id", None) - - locations = drv._construct_image_nfs_url(img_loc) - - self.assertIn("nfs://host/path/image-id", locations) - - def test_get_pool(self): - pool = self._driver.get_pool({'provider_location': 'fake-share'}) - self.assertEqual('fake-share', pool) - - def _set_config(self, configuration): - configuration.netapp_storage_family = 'ontap_cluster' - configuration.netapp_storage_protocol = 'nfs' - configuration.netapp_login = 'admin' - configuration.netapp_password = 'pass' - configuration.netapp_server_hostname = '127.0.0.1' - configuration.netapp_transport_type = 'http' - configuration.netapp_server_port = None - configuration.netapp_vserver = 'openstack' - configuration.nfs_shares_config = '/nfs' - return configuration - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_all_default(self): - configuration = self._set_config(create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_http_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'http' - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_https_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'https' - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTPS) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_http_non_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_server_port = 81 - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_CONNECTION_INFO_HTTP, port=81) - mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_https_non_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'https' - configuration.netapp_server_port = 446 - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_cmode, 'Client') - self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') - driver.do_setup(context='') - FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_CONNECTION_INFO_HTTPS, port=446) - mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS) - - @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') - def test_convert_vol_ref_share_name_to_share_ip(self, mock_hostname): - drv = self._driver - share = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name') - modified_share = '10.12.142.11:/export/test_file_name' - - modified_vol_ref = drv._convert_vol_ref_share_name_to_share_ip(share) - - self.assertEqual(modified_share, modified_vol_ref) - - @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') - @mock.patch.object(os.path, 'isfile', return_value=True) - def test_get_share_mount_and_vol_from_vol_ref(self, mock_isfile, - mock_hostname): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name') - vol_ref = {'source-name': vol_path} - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - - (share, mount, file_path) = \ - drv._get_share_mount_and_vol_from_vol_ref(vol_ref) - - self.assertEqual(self.TEST_NFS_EXPORT1, share) - self.assertEqual(self.TEST_MNT_POINT, mount) - self.assertEqual('test_file_name', file_path) - - @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') - def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self, - mock_hostname): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - vol_ref = {'source-id': '1234546'} - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - - self.assertRaises(exception.ManageExistingInvalidReference, - drv._get_share_mount_and_vol_from_vol_ref, vol_ref) - - @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') - def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self, - mock_host): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT2, 'test_file_name') - vol_ref = {'source-name': vol_path} - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - - self.assertRaises(exception.ManageExistingInvalidReference, - drv._get_share_mount_and_vol_from_vol_ref, vol_ref) - - @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') - def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self, - mock_host): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - vol_ref = {'source-name': self.TEST_NFS_EXPORT2} - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - - self.assertRaises(exception.ManageExistingInvalidReference, - drv._get_share_mount_and_vol_from_vol_ref, vol_ref) - - @mock.patch.object(cinder_utils, 'get_file_size', return_value=1073741824) - def test_manage_existing_get_size(self, get_file_size): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - test_file = 'test_file_name' - volume = FakeVolume() - volume['name'] = 'file-new-managed-123' - volume['id'] = 'volume-new-managed-123' - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) - vol_ref = {'source-name': vol_path} - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, - test_file)) - - vol_size = drv.manage_existing_get_size(volume, vol_ref) - self.assertEqual(1, vol_size) - - @mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824) - def test_manage_existing_get_size_round_up(self, get_file_size): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - test_file = 'test_file_name' - volume = FakeVolume() - volume['name'] = 'file-new-managed-123' - volume['id'] = 'volume-new-managed-123' - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) - vol_ref = {'source-name': vol_path} - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, - test_file)) - - vol_size = drv.manage_existing_get_size(volume, vol_ref) - self.assertEqual(2, vol_size) - - @mock.patch.object(cinder_utils, 'get_file_size', return_value='badfloat') - def test_manage_existing_get_size_error(self, get_size): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - test_file = 'test_file_name' - volume = FakeVolume() - volume['name'] = 'file-new-managed-123' - volume['id'] = 'volume-new-managed-123' - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) - vol_ref = {'source-name': vol_path} - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, - test_file)) - - self.assertRaises(exception.VolumeBackendAPIException, - drv.manage_existing_get_size, volume, vol_ref) - - @mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824) - def test_manage_existing(self, get_file_size): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - test_file = 'test_file_name' - volume = FakeVolume() - volume['name'] = 'file-new-managed-123' - volume['id'] = 'volume-new-managed-123' - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) - vol_ref = {'source-name': vol_path} - drv._check_volume_type = mock.Mock() - self.stubs.Set(drv, '_execute', mock.Mock()) - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, - test_file)) - shutil.move = mock.Mock() - mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') - mock_get_specs.return_value = {} - self.mock_object(drv, '_do_qos_for_volume') - - location = drv.manage_existing(volume, vol_ref) - - self.assertEqual(self.TEST_NFS_EXPORT1, location['provider_location']) - drv._check_volume_type.assert_called_once_with( - volume, self.TEST_NFS_EXPORT1, test_file, {}) - - @mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824) - def test_manage_existing_move_fails(self, get_file_size): - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - test_file = 'test_file_name' - volume = FakeVolume() - volume['name'] = 'volume-new-managed-123' - volume['id'] = 'volume-new-managed-123' - vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) - vol_ref = {'source-name': vol_path} - mock_check_volume_type = drv._check_volume_type = mock.Mock() - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, - test_file)) - drv._execute = mock.Mock(side_effect=OSError) - mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') - mock_get_specs.return_value = {} - self.mock_object(drv, '_do_qos_for_volume') - - self.assertRaises(exception.VolumeBackendAPIException, - drv.manage_existing, volume, vol_ref) - - mock_check_volume_type.assert_called_once_with( - volume, self.TEST_NFS_EXPORT1, test_file, {}) - - @mock.patch.object(nfs_base, 'LOG') - def test_unmanage(self, mock_log): - drv = self._driver - self.mock_object(utils, 'get_valid_qos_policy_group_info') - volume = FakeVolume() - volume['id'] = '123' - volume['provider_location'] = '/share' - - drv.unmanage(volume) - - self.assertEqual(1, mock_log.info.call_count) - - -class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase): - """Test direct NetApp C Mode driver only and not inherit.""" - - def setUp(self): - super(NetAppCmodeNfsDriverOnlyTestCase, self).setUp() - self._custom_setup() - - def _custom_setup(self): - self.mock_object(utils, 'OpenStackInfo') - kwargs = {} - kwargs['netapp_mode'] = 'proxy' - kwargs['configuration'] = create_configuration() - self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs) - self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path' - self._driver.zapi_client = mock.Mock() - self.mock_object(netapp_nfs_cmode, 'LOG') - self._fake_empty_qos_policy_group_info = { - 'legacy': None, - 'spec': None, - } - self._fake_legacy_qos_policy_group_info = { - 'legacy': { - 'policy_name': 'qos_policy_1' - }, - 'spec': None, - } - - self.context = context.get_admin_context() - - @mock.patch.object(utils, 'LOG', mock.Mock()) - def test_create_volume(self): - drv = self._driver - fake_extra_specs = {} - fake_share = 'localhost:myshare' - host = 'hostname@backend#' + fake_share - mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') - mock_get_specs.return_value = fake_extra_specs - self.mock_object(drv, '_ensure_shares_mounted') - self.mock_object(drv, '_do_create_volume') - mock_get_qos_info =\ - self.mock_object(utils, 'get_valid_qos_policy_group_info') - mock_get_qos_info.return_value = self._fake_empty_qos_policy_group_info - - volume_info = self._driver.create_volume(FakeVolume(host, 1)) - - self.assertEqual(fake_share, volume_info.get('provider_location')) - self.assertEqual(0, utils.LOG.warning.call_count) - - def test_create_volume_no_pool_specified(self): - drv = self._driver - host = 'hostname@backend' # missing pool - with mock.patch.object(drv, '_ensure_shares_mounted'): - self.assertRaises(exception.InvalidHost, - self._driver.create_volume, FakeVolume(host, 1)) - - def test_create_volume_with_legacy_qos_policy(self): - drv = self._driver - fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'} - fake_share = 'localhost:myshare' - host = 'hostname@backend#' + fake_share - fake_volume = FakeVolume(host, 1) - mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') - mock_get_specs.return_value = fake_extra_specs - mock_get_qos_info =\ - self.mock_object(utils, 'get_valid_qos_policy_group_info') - mock_get_qos_info.return_value =\ - self._fake_legacy_qos_policy_group_info - self.mock_object(drv, '_ensure_shares_mounted') - self.mock_object(drv, '_do_create_volume') - mock_set_qos = self.mock_object(drv, '_set_qos_policy_group_on_volume') - - volume_info = self._driver.create_volume(fake_volume) - - self.assertEqual('localhost:myshare', - volume_info.get('provider_location')) - mock_set_qos.assert_called_once_with( - fake_volume, self._fake_legacy_qos_policy_group_info) - - def test_copy_img_to_vol_copyoffload_success(self): - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context) - image_service = object() - image_id = 'image_id' - drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) - drv._copy_from_img_service = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='share') - drv._get_vol_for_share = mock.Mock(return_value=volume.id) - - drv.copy_image_to_volume(self.context, volume, image_service, image_id) - drv._copy_from_img_service.assert_called_once_with(self.context, - volume, - image_service, - image_id) - - def test_copy_img_to_vol_copyoffload_failure(self): - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context) - - image_service = object() - image_id = 'image_id' - drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) - drv._copy_from_img_service = mock.Mock(side_effect=Exception()) - nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='share') - drv._get_vol_for_share = mock.Mock(return_value=volume.id) - - drv.copy_image_to_volume(self.context, volume, image_service, image_id) - drv._copy_from_img_service.assert_called_once_with(self.context, - volume, - image_service, - image_id) - nfs_base.NetAppNfsDriver.copy_image_to_volume. \ - assert_called_once_with( - self.context, volume, image_service, image_id) - - def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self): - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context) - image_service = mock.Mock() - image_service.get_location.return_value = (mock.Mock(), mock.Mock()) - image_service.show.return_value = {'size': 0} - image_id = 'image_id' - drv._client = mock.Mock() - drv._client.get_api_version = mock.Mock(return_value=(1, 20)) - drv._find_image_in_cache = mock.Mock(return_value=[]) - drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"]) - drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test", - "dr")) - drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1") - drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') - drv._get_host_ip = mock.Mock() - drv._get_provider_location = mock.Mock() - drv._get_export_path = mock.Mock(return_value="dr") - drv._check_share_can_hold_size = mock.Mock() - # Raise error as if the copyoffload file can not be found - drv._clone_file_dst_exists = mock.Mock(side_effect=OSError()) - - # Verify the original error is propagated - self.assertRaises(OSError, drv._copy_from_img_service, - self.context, volume, image_service, image_id) - - @mock.patch.object(image_utils, 'qemu_img_info') - def test_img_service_raw_copyoffload_workflow_success(self, - mock_qemu_img_info): - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context, size=1) - image_id = 'image_id' - image_service = mock.Mock() - image_service.get_location.return_value = ('nfs://ip1/openstack/img', - None) - image_service.show.return_value = {'size': 1, - 'disk_format': 'raw'} - - drv._check_get_nfs_path_segs =\ - mock.Mock(return_value=('ip1', '/openstack')) - drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') - drv._get_host_ip = mock.Mock(return_value='ip2') - drv._get_export_path = mock.Mock(return_value='/exp_path') - drv._get_provider_location = mock.Mock(return_value='share') - drv._execute = mock.Mock() - drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') - drv._discover_file_till_timeout = mock.Mock(return_value=True) - img_inf = mock.Mock() - img_inf.file_format = 'raw' - mock_qemu_img_info.return_value = img_inf - drv._check_share_can_hold_size = mock.Mock() - drv._move_nfs_file = mock.Mock(return_value=True) - drv._delete_file_at_path = mock.Mock() - drv._clone_file_dst_exists = mock.Mock() - drv._post_clone_image = mock.Mock() - - drv._copy_from_img_service(self.context, volume, - image_service, image_id) - drv._get_ip_verify_on_cluster.assert_any_call('ip1') - drv._get_export_path.assert_called_with(volume.id) - drv._check_share_can_hold_size.assert_called_with('share', 1) - - self.assertEqual(1, drv._execute.call_count) - drv._post_clone_image.assert_called_with(volume) - - @mock.patch.object(image_utils, 'convert_image') - @mock.patch.object(image_utils, 'qemu_img_info') - @mock.patch('os.path.exists') - def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists, - mock_qemu_img_info, - mock_cvrt_image): - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context, size=1) - image_id = 'image_id' - image_service = mock.Mock() - image_service.get_location.return_value = ('nfs://ip1/openstack/img', - None) - image_service.show.return_value = {'size': 1, - 'disk_format': 'qcow2'} - drv._check_get_nfs_path_segs =\ - mock.Mock(return_value=('ip1', '/openstack')) - - drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') - drv._get_host_ip = mock.Mock(return_value='ip2') - drv._get_export_path = mock.Mock(return_value='/exp_path') - drv._get_provider_location = mock.Mock(return_value='share') - drv._execute = mock.Mock() - drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') - img_inf = mock.Mock() - img_inf.file_format = 'raw' - mock_qemu_img_info.return_value = img_inf - drv._check_share_can_hold_size = mock.Mock() - - drv._move_nfs_file = mock.Mock(return_value=True) - drv._delete_file_at_path = mock.Mock() - drv._clone_file_dst_exists = mock.Mock() - drv._post_clone_image = mock.Mock() - - drv._copy_from_img_service(self.context, volume, - image_service, image_id) - drv._get_ip_verify_on_cluster.assert_any_call('ip1') - drv._get_export_path.assert_called_with(volume.id) - drv._check_share_can_hold_size.assert_called_with('share', 1) - self.assertEqual(1, mock_cvrt_image.call_count) - self.assertEqual(1, drv._execute.call_count) - self.assertEqual(2, drv._delete_file_at_path.call_count) - drv._clone_file_dst_exists.call_count == 1 - drv._post_clone_image.assert_called_with(volume) - - -class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase): - """Test direct NetApp 7 Mode driver.""" - - def _custom_setup(self): - self.mock_object(utils, 'OpenStackInfo') - - self.mock_object(common.na_utils, 'LOG') - self.mock_object(nfs_base, 'LOG') - self._driver = netapp_nfs_7mode.NetApp7modeNfsDriver( - configuration=create_configuration()) - self._driver.zapi_client = mock.Mock() - self.context = context.get_admin_context() - - def _prepare_delete_snapshot_mock(self, snapshot_exists): - drv = self._driver - mox = self.mox - - mox.StubOutWithMock(drv, '_get_provider_location') - mox.StubOutWithMock(drv, '_volume_not_present') - - if snapshot_exists: - mox.StubOutWithMock(drv, '_execute') - mox.StubOutWithMock(drv, '_get_volume_path') - - drv._get_provider_location(mox_lib.IgnoreArg()) - drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\ - .AndReturn(not snapshot_exists) - - if snapshot_exists: - drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - drv._execute('rm', None, run_as_root=True) - - mox.ReplayAll() - - return mox - - def test_create_volume_no_pool_specified(self): - drv = self._driver - host = 'hostname@backend' # missing pool - with mock.patch.object(drv, '_ensure_shares_mounted'): - self.assertRaises(exception.InvalidHost, - self._driver.create_volume, FakeVolume(host, 1)) - - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup') - @mock.patch.object(client_7mode.Client, '__init__', return_value=None) - def test_do_setup(self, mock_client_init, mock_super_do_setup): - context = mock.Mock() - self.mock_object(perf_7mode, 'Performance7modeLibrary') - self._driver.do_setup(context) - mock_client_init.assert_called_once_with(**SEVEN_MODE_CONNECTION_INFO) - mock_super_do_setup.assert_called_once_with(context) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_all_default(self): - configuration = self._set_config(create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_7mode, 'Client') - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_http_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'http' - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_7mode, 'Client') - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_https_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'https' - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_7mode, 'Client') - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTPS) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_http_non_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_server_port = 81 - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_7mode, 'Client') - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_7MODE_CONNECTION_INFO_HTTP, - port=81) - mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.Mock(return_value=(1, 20))) - @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) - def test_do_setup_https_non_default_port(self): - configuration = self._set_config(create_configuration()) - configuration.netapp_transport_type = 'https' - configuration.netapp_server_port = 446 - driver = common.NetAppDriver(configuration=configuration) - mock_invoke = self.mock_object(client_7mode, 'Client') - self.mock_object(perf_7mode, 'Performance7modeLibrary') - driver.do_setup(context='') - FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTPS, - port=446) - mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS) - - @mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error') - def test_check_for_setup_error(self, mock_super_check_for_setup_error): - self._driver.zapi_client.get_ontapi_version.return_value = (1, 20) - self.assertIsNone(self._driver.check_for_setup_error()) - mock_super_check_for_setup_error.assert_called_once_with() - - def test_check_for_setup_error_old_version(self): - self._driver.zapi_client.get_ontapi_version.return_value = (1, 8) - self.assertRaises(exception.VolumeBackendAPIException, - self._driver.check_for_setup_error) - - def test_check_for_setup_error_no_version(self): - self._driver.zapi_client.get_ontapi_version.return_value = None - self.assertRaises(exception.VolumeBackendAPIException, - self._driver.check_for_setup_error) - - def _prepare_clone_mock(self, status): - drv = self._driver - mox = self.mox - - volume = FakeVolume() - setattr(volume, 'provider_location', '127.0.0.1:/nfs') - - mox.StubOutWithMock(drv, '_get_export_ip_path') - - drv._get_export_ip_path( - mox_lib.IgnoreArg(), - mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs')) - return mox - - def test_clone_backing_file_for_volume_clear(self): - drv = self._driver - mox = self._prepare_clone_mock('fail') - drv.zapi_client = mox.CreateMockAnything() - drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn( - '/vol/vol1/nfs') - drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - - mox.ReplayAll() - - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + six.text_type(hash(volume_name)) - try: - drv._clone_backing_file_for_volume(volume_name, clone_name, - volume_id) - except Exception as e: - if isinstance(e, netapp_api.NaApiError): - pass - else: - raise - - mox.VerifyAll() - - def test_get_pool(self): - pool = self._driver.get_pool({'provider_location': 'fake-share'}) - self.assertEqual('fake-share', pool) - - def _set_config(self, configuration): - super(NetApp7modeNfsDriverTestCase, self)._set_config( - configuration) - configuration.netapp_storage_family = 'ontap_7mode' - return configuration - - def test_clone_backing_file_for_volume(self): - drv = self._driver - mox = self._prepare_clone_mock('pass') - drv.zapi_client = mox.CreateMockAnything() - drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn( - '/vol/vol1/nfs') - drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) - - mox.ReplayAll() - - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + six.text_type(hash(volume_name)) - share = 'ip:/share' - - drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id, - share) - - mox.VerifyAll() diff --git a/cinder/tests/unit/test_qos_specs.py b/cinder/tests/unit/test_qos_specs.py index ef4048dca..31aa64427 100644 --- a/cinder/tests/unit/test_qos_specs.py +++ b/cinder/tests/unit/test_qos_specs.py @@ -17,14 +17,18 @@ Unit Tests for qos specs internal API """ +import mock +import six import time from oslo_db import exception as db_exc +from oslo_utils import timeutils from cinder import context from cinder import db from cinder import exception from cinder import test +from cinder.tests.unit import fake_constants as fake from cinder.volume import qos_specs from cinder.volume import volume_types @@ -38,22 +42,33 @@ def fake_db_qos_specs_create(context, values): pass +def fake_db_get_vol_type(vol_type_number=1): + return {'name': 'type-' + six.text_type(vol_type_number), + 'id': fake.QOS_SPEC_ID, + 'updated_at': None, + 'created_at': None, + 'deleted_at': None, + 'description': 'desc', + 'deleted': False, + 'is_public': True, + 'projects': None, + 'extra_specs': None} + + class QoSSpecsTestCase(test.TestCase): """Test cases for qos specs code.""" def setUp(self): super(QoSSpecsTestCase, self).setUp() self.ctxt = context.get_admin_context() - def _create_qos_specs(self, name, values=None): + def _create_qos_specs(self, name, consumer='back-end', values=None): """Create a transfer object.""" - if values: - specs = dict(name=name, qos_specs=values) - else: - specs = {'name': name, - 'qos_specs': { - 'consumer': 'back-end', - 'key1': 'value1', - 'key2': 'value2'}} + if values is None: + values = {'key1': 'value1', 'key2': 'value2'} + + specs = {'name': name, + 'consumer': consumer, + 'specs': values} return db.qos_specs_create(self.ctxt, specs)['id'] def test_create(self): @@ -61,27 +76,31 @@ class QoSSpecsTestCase(test.TestCase): 'key2': 'value2', 'key3': 'value3'} ref = qos_specs.create(self.ctxt, 'FakeName', input) - specs = qos_specs.get_qos_specs(self.ctxt, ref['id']) - expected = (dict(consumer='back-end')) - expected.update(dict(id=ref['id'])) - expected.update(dict(name='FakeName')) - del input['consumer'] - expected.update(dict(specs=input)) - self.assertDictMatch(expected, specs) - - self.stubs.Set(db, 'qos_specs_create', - fake_db_qos_specs_create) + specs_obj = qos_specs.get_qos_specs(self.ctxt, ref['id']) + specs_obj_dic = {'consumer': specs_obj['consumer'], + 'id': specs_obj['id'], + 'name': specs_obj['name'], + 'specs': specs_obj['specs']} + expected = {'consumer': 'back-end', + 'id': ref['id'], + 'name': 'FakeName', + 'specs': input} + self.assertDictMatch(expected, + specs_obj_dic) # qos specs must have unique name self.assertRaises(exception.QoSSpecsExists, - qos_specs.create, self.ctxt, 'DupQoSName', input) + qos_specs.create, self.ctxt, 'FakeName', input) - input.update({'consumer': 'FakeConsumer'}) # consumer must be one of: front-end, back-end, both + input['consumer'] = 'fake' self.assertRaises(exception.InvalidQoSSpecs, qos_specs.create, self.ctxt, 'QoSName', input) del input['consumer'] + + self.stubs.Set(db, 'qos_specs_create', + fake_db_qos_specs_create) # able to catch DBError self.assertRaises(exception.QoSSpecsCreateFailed, qos_specs.create, self.ctxt, 'FailQoSName', input) @@ -90,39 +109,46 @@ class QoSSpecsTestCase(test.TestCase): def fake_db_update(context, specs_id, values): raise db_exc.DBError() - input = {'key1': 'value1', - 'consumer': 'WrongPlace'} - # consumer must be one of: front-end, back-end, both - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.update, self.ctxt, 'fake_id', input) + qos = {'consumer': 'back-end', + 'specs': {'key1': 'value1'}} - input['consumer'] = 'front-end' # qos specs must exists self.assertRaises(exception.QoSSpecsNotFound, - qos_specs.update, self.ctxt, 'fake_id', input) + qos_specs.update, self.ctxt, 'fake_id', qos) + + specs_id = self._create_qos_specs('Name', + qos['consumer'], + qos['specs']) - specs_id = self._create_qos_specs('Name', input) qos_specs.update(self.ctxt, specs_id, - {'key1': 'newvalue1', - 'key2': 'value2'}) + {'key1': 'newvalue1', 'key2': 'value2'}) + specs = qos_specs.get_qos_specs(self.ctxt, specs_id) self.assertEqual('newvalue1', specs['specs']['key1']) self.assertEqual('value2', specs['specs']['key2']) + # consumer must be one of: front-end, back-end, both + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.update, self.ctxt, specs_id, + {'consumer': 'not-real'}) + self.stubs.Set(db, 'qos_specs_update', fake_db_update) self.assertRaises(exception.QoSSpecsUpdateFailed, - qos_specs.update, self.ctxt, 'fake_id', input) + qos_specs.update, self.ctxt, specs_id, {'key': + 'new_key'}) def test_delete(self): + qos_id = self._create_qos_specs('my_qos') + def fake_db_associations_get(context, id): - if id == 'InUse': - return True - else: - return False + vol_types = [] + if id == qos_id: + vol_types = [fake_db_get_vol_type(id)] + return vol_types def fake_db_delete(context, id): - if id == 'NotFound': - raise exception.QoSSpecsNotFound(specs_id=id) + return {'deleted': True, + 'deleted_at': timeutils.utcnow()} def fake_disassociate_all(context, id): pass @@ -137,9 +163,13 @@ class QoSSpecsTestCase(test.TestCase): self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete, self.ctxt, 'NotFound') self.assertRaises(exception.QoSSpecsInUse, - qos_specs.delete, self.ctxt, 'InUse') + qos_specs.delete, self.ctxt, qos_id) # able to delete in-use qos specs if force=True - qos_specs.delete(self.ctxt, 'InUse', force=True) + qos_specs.delete(self.ctxt, qos_id, force=True) + + # Can delete without forcing when no volume types + qos_id_with_no_vol_types = self._create_qos_specs('no_vol_types') + qos_specs.delete(self.ctxt, qos_id_with_no_vol_types, force=False) def test_delete_keys(self): def fake_db_qos_delete_key(context, id, key): @@ -155,21 +185,25 @@ class QoSSpecsTestCase(test.TestCase): else: pass - value = dict(consumer='front-end', - foo='Foo', bar='Bar', zoo='tiger') - specs_id = self._create_qos_specs('QoSName', value) + value = {'foo': 'Foo', 'bar': 'Bar', 'zoo': 'tiger'} + name = 'QoSName' + consumer = 'front-end' + specs_id = self._create_qos_specs(name, consumer, value) qos_specs.delete_keys(self.ctxt, specs_id, ['foo', 'bar']) - del value['consumer'] + del value['foo'] del value['bar'] - expected = {'name': 'QoSName', + expected = {'name': name, 'id': specs_id, - 'consumer': 'front-end', + 'consumer': consumer, 'specs': value} specs = qos_specs.get_qos_specs(self.ctxt, specs_id) - self.assertDictMatch(expected, specs) + specs_dic = {'consumer': specs['consumer'], + 'id': specs['id'], + 'name': specs['name'], + 'specs': specs['specs']} + self.assertDictMatch(expected, specs_dic) - self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.stubs.Set(db, 'qos_specs_item_delete', fake_db_qos_delete_key) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete_keys, self.ctxt, None, []) @@ -177,30 +211,28 @@ class QoSSpecsTestCase(test.TestCase): qos_specs.delete_keys, self.ctxt, 'NotFound', []) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, - 'Found', ['NotFound']) + specs_id, ['NotFound']) self.assertRaises(exception.QoSSpecsKeyNotFound, - qos_specs.delete_keys, self.ctxt, 'Found', + qos_specs.delete_keys, self.ctxt, specs_id, ['foo', 'bar', 'NotFound']) - def test_get_associations(self): - def fake_db_associate_get(context, id): - if id == 'Trouble': - raise db_exc.DBError() - return [{'name': 'type-1', 'id': 'id-1'}, - {'name': 'type-2', 'id': 'id-2'}] + @mock.patch.object(db, 'qos_specs_associations_get') + def test_get_associations(self, mock_qos_specs_associations_get): + vol_types = [fake_db_get_vol_type(x) for x in range(2)] - self.stubs.Set(db, 'qos_specs_associations_get', - fake_db_associate_get) - expected1 = {'association_type': 'volume_type', - 'name': 'type-1', - 'id': 'id-1'} - expected2 = {'association_type': 'volume_type', - 'name': 'type-2', - 'id': 'id-2'} - res = qos_specs.get_associations(self.ctxt, 'specs-id') - self.assertIn(expected1, res) - self.assertIn(expected2, res) + mock_qos_specs_associations_get.return_value = vol_types + specs_id = self._create_qos_specs('new_spec') + res = qos_specs.get_associations(self.ctxt, specs_id) + for vol_type in vol_types: + expected_type = { + 'association_type': 'volume_type', + 'id': vol_type['id'], + 'name': vol_type['name'] + } + self.assertIn(expected_type, res) + e = exception.QoSSpecsNotFound(specs_id='Trouble') + mock_qos_specs_associations_get.side_effect = e self.assertRaises(exception.CinderException, qos_specs.get_associations, self.ctxt, 'Trouble') @@ -254,18 +286,8 @@ class QoSSpecsTestCase(test.TestCase): self.ctxt, 'specs-id', 'Invalid') def test_disassociate_qos_specs(self): - def fake_qos_specs_get(context, id): - if id == 'NotFound': - raise exception.QoSSpecsNotFound(specs_id=id) - else: - pass - def fake_db_disassociate(context, id, type_id): - if id == 'Trouble': - raise db_exc.DBError() - elif type_id == 'NotFound': - raise exception.VolumeTypeNotFound(volume_type_id=type_id) - pass + raise db_exc.DBError() type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') @@ -279,16 +301,19 @@ class QoSSpecsTestCase(test.TestCase): res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) - self.stubs.Set(db, 'qos_specs_disassociate', - fake_db_disassociate) - self.stubs.Set(qos_specs, 'get_qos_specs', - fake_qos_specs_get) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.disassociate_qos_specs, - self.ctxt, 'specs-id', 'NotFound') + self.ctxt, specs_id, 'NotFound') + + # Verify we can disassociate specs from volume_type even if they are + # not associated with no error + qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) + qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) + self.stubs.Set(db, 'qos_specs_disassociate', + fake_db_disassociate) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_qos_specs, - self.ctxt, 'Trouble', 'id') + self.ctxt, specs_id, type_ref['id']) def test_disassociate_all(self): def fake_db_disassociate_all(context, id): @@ -326,57 +351,51 @@ class QoSSpecsTestCase(test.TestCase): self.ctxt, 'Trouble') def test_get_all_specs(self): - input = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - 'consumer': 'both'} - specs_id1 = self._create_qos_specs('Specs1', input) - input.update({'key4': 'value4'}) - specs_id2 = self._create_qos_specs('Specs2', input) + qos_specs_list = [{'name': 'Specs1', + 'created_at': None, + 'updated_at': None, + 'deleted_at': None, + 'deleted': None, + 'consumer': 'both', + 'specs': {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3'}}, + {'name': 'Specs2', + 'created_at': None, + 'updated_at': None, + 'deleted_at': None, + 'deleted': None, + 'consumer': 'both', + 'specs': {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + 'key4': 'value4'}}] + + for qos_specs_dict in qos_specs_list: + qos_specs_id = self._create_qos_specs( + qos_specs_dict['name'], + qos_specs_dict['consumer'], + qos_specs_dict['specs']) + qos_specs_dict['id'] = qos_specs_id - expected1 = { - 'id': specs_id1, - 'name': 'Specs1', - 'consumer': 'both', - 'specs': {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'}} - expected2 = { - 'id': specs_id2, - 'name': 'Specs2', - 'consumer': 'both', - 'specs': {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - 'key4': 'value4'}} res = qos_specs.get_all_specs(self.ctxt) - self.assertEqual(2, len(res)) - self.assertIn(expected1, res) - self.assertIn(expected2, res) + self.assertEqual(len(qos_specs_list), len(res)) + + qos_res_simple_dict = [] + # Need to make list of dictionaries instead of VOs for assertIn to work + for qos in res: + qos_res_simple_dict.append( + qos.obj_to_primitive()['versioned_object.data']) + for qos_spec in qos_specs_list: + self.assertIn(qos_spec, qos_res_simple_dict) def test_get_qos_specs(self): one_time_value = str(int(time.time())) - input = {'key1': one_time_value, + specs = {'key1': one_time_value, 'key2': 'value2', - 'key3': 'value3', - 'consumer': 'both'} - id = self._create_qos_specs('Specs1', input) - specs = qos_specs.get_qos_specs(self.ctxt, id) + 'key3': 'value3'} + qos_id = self._create_qos_specs('Specs1', 'both', specs) + specs = qos_specs.get_qos_specs(self.ctxt, qos_id) self.assertEqual(one_time_value, specs['specs']['key1']) - self.assertRaises(exception.InvalidQoSSpecs, qos_specs.get_qos_specs, self.ctxt, None) - - def test_get_qos_specs_by_name(self): - one_time_value = str(int(time.time())) - input = {'key1': one_time_value, - 'key2': 'value2', - 'key3': 'value3', - 'consumer': 'back-end'} - self._create_qos_specs(one_time_value, input) - specs = qos_specs.get_qos_specs_by_name(self.ctxt, - one_time_value) - self.assertEqual(one_time_value, specs['specs']['key1']) - - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.get_qos_specs_by_name, self.ctxt, None) diff --git a/cinder/tests/unit/test_rpc.py b/cinder/tests/unit/test_rpc.py index f13bce9de..4fa43fd10 100644 --- a/cinder/tests/unit/test_rpc.py +++ b/cinder/tests/unit/test_rpc.py @@ -49,22 +49,6 @@ class RPCAPITestCase(test.TestCase): get_client.side_effect = fake_get_client FakeAPI() - @mock.patch('cinder.objects.Service.get_minimum_rpc_version', - return_value='liberty') - @mock.patch('cinder.objects.Service.get_minimum_obj_version', - return_value='liberty') - @mock.patch('cinder.rpc.get_client') - def test_init_liberty_caps(self, get_client, get_min_obj, get_min_rpc): - def fake_get_client(target, version_cap, serializer): - self.assertEqual(FakeAPI.TOPIC, target.topic) - self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) - self.assertEqual(rpc.LIBERTY_RPC_VERSIONS[FakeAPI.BINARY], - version_cap) - self.assertEqual('liberty', serializer.version_cap) - - get_client.side_effect = fake_get_client - FakeAPI() - @mock.patch('cinder.objects.Service.get_minimum_rpc_version', return_value=None) @mock.patch('cinder.objects.Service.get_minimum_obj_version', diff --git a/cinder/tests/unit/test_service.py b/cinder/tests/unit/test_service.py index c41bbf2c2..55a592f58 100644 --- a/cinder/tests/unit/test_service.py +++ b/cinder/tests/unit/test_service.py @@ -19,6 +19,7 @@ Unit Tests for remote procedure calls using queue """ +import ddt import mock from oslo_concurrency import processutils from oslo_config import cfg @@ -51,9 +52,10 @@ CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests.""" def __init__(self, host=None, - db_driver=None, service_name=None): + db_driver=None, service_name=None, cluster=None): super(FakeManager, self).__init__(host=host, - db_driver=db_driver) + db_driver=db_driver, + cluster=cluster) def test_method(self): return 'manager' @@ -67,7 +69,9 @@ class ExtendedService(service.Service): class ServiceManagerTestCase(test.TestCase): """Test cases for Services.""" - def test_message_gets_to_manager(self): + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_message_gets_to_manager(self, is_upgrading_mock): serv = service.Service('test', 'test', 'test', @@ -75,7 +79,9 @@ class ServiceManagerTestCase(test.TestCase): serv.start() self.assertEqual('manager', serv.test_method()) - def test_override_manager_method(self): + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_override_manager_method(self, is_upgrading_mock): serv = ExtendedService('test', 'test', 'test', @@ -83,9 +89,11 @@ class ServiceManagerTestCase(test.TestCase): serv.start() self.assertEqual('service', serv.test_method()) + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'}) @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'}) - def test_reset(self): + def test_reset(self, is_upgrading_mock): serv = service.Service('test', 'test', 'test', @@ -97,29 +105,45 @@ class ServiceManagerTestCase(test.TestCase): class ServiceFlagsTestCase(test.TestCase): - def test_service_enabled_on_create_based_on_flag(self): + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_service_enabled_on_create_based_on_flag(self, + is_upgrading_mock=False): + ctxt = context.get_admin_context() self.flags(enable_new_services=True) host = 'foo' binary = 'cinder-fake' - app = service.Service.create(host=host, binary=binary) - app.start() - app.stop() - ref = db.service_get(context.get_admin_context(), app.service_id) - db.service_destroy(context.get_admin_context(), app.service_id) - self.assertFalse(ref['disabled']) + cluster = 'cluster' + app = service.Service.create(host=host, binary=binary, cluster=cluster) + ref = db.service_get(ctxt, app.service_id) + db.service_destroy(ctxt, app.service_id) + self.assertFalse(ref.disabled) - def test_service_disabled_on_create_based_on_flag(self): + # Check that the cluster is also enabled + db_cluster = objects.ClusterList.get_all(ctxt)[0] + self.assertFalse(db_cluster.disabled) + db.cluster_destroy(ctxt, db_cluster.id) + + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_service_disabled_on_create_based_on_flag(self, is_upgrading_mock): + ctxt = context.get_admin_context() self.flags(enable_new_services=False) host = 'foo' binary = 'cinder-fake' - app = service.Service.create(host=host, binary=binary) - app.start() - app.stop() - ref = db.service_get(context.get_admin_context(), app.service_id) - db.service_destroy(context.get_admin_context(), app.service_id) - self.assertTrue(ref['disabled']) + cluster = 'cluster' + app = service.Service.create(host=host, binary=binary, cluster=cluster) + ref = db.service_get(ctxt, app.service_id) + db.service_destroy(ctxt, app.service_id) + self.assertTrue(ref.disabled) + + # Check that the cluster is also enabled + db_cluster = objects.ClusterList.get_all(ctxt)[0] + self.assertTrue(db_cluster.disabled) + db.cluster_destroy(ctxt, db_cluster.id) +@ddt.ddt class ServiceTestCase(test.TestCase): """Test cases for Services.""" @@ -128,32 +152,123 @@ class ServiceTestCase(test.TestCase): self.host = 'foo' self.binary = 'cinder-fake' self.topic = 'fake' + self.service_ref = {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0, + 'availability_zone': 'nova', + 'id': 1} + self.ctxt = context.get_admin_context() + + def _check_app(self, app, cluster=None, cluster_exists=None, + is_upgrading=False, svc_id=None, added_to_cluster=None): + """Check that Service instance and DB service and cluster are ok.""" + self.assertIsNotNone(app) + + # Check that we have the service ID + self.assertTrue(hasattr(app, 'service_id')) + + if svc_id: + self.assertEqual(svc_id, app.service_id) + + # Check that cluster has been properly set + self.assertEqual(cluster, app.cluster) + # Check that the entry has been really created in the DB + svc = objects.Service.get_by_id(self.ctxt, app.service_id) + + cluster_name = cluster if cluster_exists is not False else None + + # Check that cluster name matches + self.assertEqual(cluster_name, svc.cluster_name) + + clusters = objects.ClusterList.get_all(self.ctxt) + + if added_to_cluster is None: + added_to_cluster = not is_upgrading + + if cluster_name: + # Make sure we have created the cluster in the DB + self.assertEqual(1, len(clusters)) + cluster = clusters[0] + self.assertEqual(cluster_name, cluster.name) + self.assertEqual(self.binary, cluster.binary) + else: + # Make sure we haven't created any cluster in the DB + self.assertListEqual([], clusters.objects) + + self.assertEqual(added_to_cluster, app.added_to_cluster) + + @ddt.data(False, True) + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n') + def test_create(self, is_upgrading, is_upgrading_mock): + """Test non clustered service creation.""" + is_upgrading_mock.return_value = is_upgrading - def test_create(self): # NOTE(vish): Create was moved out of mock replay to make sure that # the looping calls are created in StartService. app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) + self._check_app(app, is_upgrading=is_upgrading) - self.assertIsNotNone(app) + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_create_with_cluster_not_upgrading(self, is_upgrading_mock): + """Test DB cluster creation when service is created.""" + cluster_name = 'cluster' + app = service.Service.create(host=self.host, binary=self.binary, + cluster=cluster_name, topic=self.topic) + self._check_app(app, cluster_name) - # Check that we have the service ID - self.assertTrue(hasattr(app, 'service_id')) - # Check that the entry has been really created in the DB - objects.Service.get_by_id(context.get_admin_context(), app.service_id) + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=True) + def test_create_with_cluster_upgrading(self, is_upgrading_mock): + """Test that we don't create the cluster while we are upgrading.""" + cluster_name = 'cluster' + app = service.Service.create(host=self.host, binary=self.binary, + cluster=cluster_name, topic=self.topic) + self._check_app(app, cluster_name, cluster_exists=False, + is_upgrading=True) - def test_report_state_newly_disconnected(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_create_svc_exists_upgrade_cluster(self, is_upgrading_mock): + """Test that we update cluster_name field when cfg has changed.""" + # Create the service in the DB + db_svc = db.service_create(context.get_admin_context(), + {'host': self.host, 'binary': self.binary, + 'topic': self.topic, + 'cluster_name': None}) + cluster_name = 'cluster' + app = service.Service.create(host=self.host, binary=self.binary, + cluster=cluster_name, topic=self.topic) + self._check_app(app, cluster_name, svc_id=db_svc.id) + + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=True) + def test_create_svc_exists_not_upgrade_cluster(self, is_upgrading_mock): + """Test we don't update cluster_name on cfg change when upgrading.""" + # Create the service in the DB + db_svc = db.service_create(context.get_admin_context(), + {'host': self.host, 'binary': self.binary, + 'topic': self.topic, + 'cluster': None}) + cluster_name = 'cluster' + app = service.Service.create(host=self.host, binary=self.binary, + cluster=cluster_name, topic=self.topic) + self._check_app(app, cluster_name, cluster_exists=False, + is_upgrading=True, svc_id=db_svc.id) + + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + @mock.patch.object(objects.service.Service, 'get_by_args') + @mock.patch.object(objects.service.Service, 'get_by_id') + def test_report_state_newly_disconnected(self, get_by_id, get_by_args, + is_upgrading_mock): + get_by_args.side_effect = exception.NotFound() + get_by_id.side_effect = db_exc.DBConnectionError() with mock.patch.object(objects.service, 'db') as mock_db: - mock_db.service_get_by_args.side_effect = exception.NotFound() - mock_db.service_create.return_value = service_ref - mock_db.service_get.side_effect = db_exc.DBConnectionError() + mock_db.service_create.return_value = self.service_ref serv = service.Service( self.host, @@ -166,17 +281,16 @@ class ServiceTestCase(test.TestCase): self.assertTrue(serv.model_disconnected) self.assertFalse(mock_db.service_update.called) - def test_report_state_disconnected_DBError(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + @mock.patch.object(objects.service.Service, 'get_by_args') + @mock.patch.object(objects.service.Service, 'get_by_id') + def test_report_state_disconnected_DBError(self, get_by_id, get_by_args, + is_upgrading_mock): + get_by_args.side_effect = exception.NotFound() + get_by_id.side_effect = db_exc.DBError() with mock.patch.object(objects.service, 'db') as mock_db: - mock_db.service_get_by_args.side_effect = exception.NotFound() - mock_db.service_create.return_value = service_ref - mock_db.service_get.side_effect = db_exc.DBError() + mock_db.service_create.return_value = self.service_ref serv = service.Service( self.host, @@ -189,41 +303,32 @@ class ServiceTestCase(test.TestCase): self.assertTrue(serv.model_disconnected) self.assertFalse(mock_db.service_update.called) - def test_report_state_newly_connected(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} - with mock.patch.object(objects.service, 'db') as mock_db,\ - mock.patch('cinder.db.sqlalchemy.api.get_by_id') as get_by_id: - mock_db.service_get_by_args.side_effect = exception.NotFound() - mock_db.service_create.return_value = service_ref - get_by_id.return_value = service_ref + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + @mock.patch('cinder.db.sqlalchemy.api.service_update') + @mock.patch('cinder.db.sqlalchemy.api.service_get') + def test_report_state_newly_connected(self, get_by_id, service_update, + is_upgrading_mock): + get_by_id.return_value = self.service_ref - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.model_disconnected = True - serv.report_state() + serv = service.Service( + self.host, + self.binary, + self.topic, + 'cinder.tests.unit.test_service.FakeManager' + ) + serv.start() + serv.model_disconnected = True + serv.report_state() - self.assertFalse(serv.model_disconnected) - self.assertTrue(mock_db.service_update.called) + self.assertFalse(serv.model_disconnected) + self.assertTrue(service_update.called) - def test_report_state_manager_not_working(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_report_state_manager_not_working(self, is_upgrading_mock): with mock.patch('cinder.db') as mock_db: - mock_db.service_get.return_value = service_ref + mock_db.service_get.return_value = self.service_ref serv = service.Service( self.host, @@ -238,7 +343,9 @@ class ServiceTestCase(test.TestCase): serv.manager.is_working.assert_called_once_with() self.assertFalse(mock_db.service_update.called) - def test_service_with_long_report_interval(self): + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) + def test_service_with_long_report_interval(self, is_upgrading_mock): self.override_config('service_down_time', 10) self.override_config('report_interval', 10) service.Service.create( @@ -246,9 +353,12 @@ class ServiceTestCase(test.TestCase): manager="cinder.tests.unit.test_service.FakeManager") self.assertEqual(25, CONF.service_down_time) + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) @mock.patch.object(rpc, 'get_server') @mock.patch('cinder.db') - def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc): + def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc, + is_upgrading_mock): serv = service.Service( self.host, self.binary, @@ -262,6 +372,8 @@ class ServiceTestCase(test.TestCase): serv.rpcserver.stop.assert_called_once_with() serv.rpcserver.wait.assert_called_once_with() + @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', + return_value=False) @mock.patch('cinder.service.Service.report_state') @mock.patch('cinder.service.Service.periodic_tasks') @mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall') @@ -269,7 +381,7 @@ class ServiceTestCase(test.TestCase): @mock.patch('cinder.db') def test_service_stop_waits_for_timers(self, mock_db, mock_rpc, mock_loopcall, mock_periodic, - mock_report): + mock_report, is_upgrading_mock): """Test that we wait for loopcalls only if stop succeeds.""" serv = service.Service( self.host, @@ -303,6 +415,61 @@ class ServiceTestCase(test.TestCase): self.assertEqual(1, serv.timers[1].stop.call_count) self.assertEqual(1, serv.timers[1].wait.call_count) + @mock.patch('cinder.manager.Manager.init_host') + @mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall') + @mock.patch('oslo_messaging.Target') + @mock.patch.object(rpc, 'get_server') + def _check_rpc_servers_and_init_host(self, app, added_to_cluster, cluster, + rpc_mock, target_mock, loop_mock, + init_host_mock): + app.start() + + # Since we have created the service entry we call init_host with + # added_to_cluster=True + init_host_mock.assert_called_once_with( + added_to_cluster=added_to_cluster) + + expected_target_calls = [mock.call(topic=self.topic, server=self.host)] + expected_rpc_calls = [mock.call(target_mock.return_value, mock.ANY, + mock.ANY), + mock.call().start()] + + if cluster and added_to_cluster: + self.assertIsNotNone(app.cluster_rpcserver) + expected_target_calls.append(mock.call(topic=self.topic, + server=cluster)) + expected_rpc_calls.extend(expected_rpc_calls[:]) + + # Check that we create message targets for host and cluster + target_mock.assert_has_calls(expected_target_calls) + + # Check we get and start rpc services for host and cluster + rpc_mock.assert_has_calls(expected_rpc_calls) + + self.assertIsNotNone(app.rpcserver) + + app.stop() + + @mock.patch('cinder.objects.Service.get_minimum_obj_version', + return_value='1.6') + def test_start_rpc_and_init_host_no_cluster(self, is_upgrading_mock): + """Test that without cluster we don't create rpc service.""" + app = service.Service.create(host=self.host, binary='cinder-volume', + cluster=None, topic=self.topic) + self._check_rpc_servers_and_init_host(app, True, None) + + @ddt.data('1.3', '1.7') + @mock.patch('cinder.objects.Service.get_minimum_obj_version') + def test_start_rpc_and_init_host_cluster(self, obj_version, + get_min_obj_mock): + """Test that with cluster we create the rpc service.""" + get_min_obj_mock.return_value = obj_version + cluster = 'cluster' + app = service.Service.create(host=self.host, binary='cinder-volume', + cluster=cluster, topic=self.topic) + self._check_rpc_servers_and_init_host(app, obj_version != '1.3', + cluster) + class TestWSGIService(test.TestCase): diff --git a/cinder/tests/unit/test_ssh_utils.py b/cinder/tests/unit/test_ssh_utils.py index dc7e3ad77..88ade3b2a 100644 --- a/cinder/tests/unit/test_ssh_utils.py +++ b/cinder/tests/unit/test_ssh_utils.py @@ -14,14 +14,10 @@ import mock import paramiko import uuid -from oslo_config import cfg - from cinder import exception from cinder import ssh_utils from cinder import test -CONF = cfg.CONF - class FakeSock(object): def settimeout(self, timeout): @@ -306,8 +302,8 @@ class SSHPoolTestCase(test.TestCase): max_size=1) with sshpool.item() as ssh: - self.assertTrue(isinstance(ssh.get_policy(), - paramiko.RejectPolicy)) + self.assertIsInstance(ssh.get_policy(), + paramiko.RejectPolicy) @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @@ -326,5 +322,5 @@ class SSHPoolTestCase(test.TestCase): max_size=1) with sshpool.item() as ssh: - self.assertTrue(isinstance(ssh.get_policy(), - paramiko.AutoAddPolicy)) + self.assertIsInstance(ssh.get_policy(), + paramiko.AutoAddPolicy) diff --git a/cinder/tests/unit/test_synology_common.py b/cinder/tests/unit/test_synology_common.py index 4bbb6e728..a4c9e4769 100644 --- a/cinder/tests/unit/test_synology_common.py +++ b/cinder/tests/unit/test_synology_common.py @@ -16,6 +16,7 @@ """Tests for the Synology iSCSI volume driver.""" import copy +import math import mock from oslo_utils import units @@ -109,6 +110,7 @@ POOL_INFO = { 'readonly': False, 'fs_type': 'ext4', 'location': 'internal', + 'eppool_used_byte': '139177984', 'size_total_byte': '487262806016', 'volume_id': 1, 'size_free_byte': '486521139200', @@ -367,6 +369,8 @@ class SynoCommonTestCase(test.TestCase): config.pool_name = POOL_NAME config.chap_username = 'abcd' config.chap_password = 'qwerty' + config.reserved_percentage = 0 + config.max_over_subscription_ratio = 20 return config @@ -457,13 +461,58 @@ class SynoCommonTestCase(test.TestCase): result = self.common._get_pool_size() self.assertEqual((int(int(POOL_INFO['size_free_byte']) / units.Gi), - int(int(POOL_INFO['size_total_byte']) / units.Gi)), + int(int(POOL_INFO['size_total_byte']) / units.Gi), + math.ceil((float(POOL_INFO['size_total_byte']) - + float(POOL_INFO['size_free_byte']) - + float(POOL_INFO['eppool_used_byte'])) / + units.Gi)), result) del pool_info['size_free_byte'] self.assertRaises(exception.MalformedResponse, self.common._get_pool_size) + def test__get_pool_lun_provisioned_size(self): + out = { + 'data': { + 'luns': [{ + 'lun_id': 1, + 'location': '/' + POOL_NAME, + 'size': 5368709120 + }, { + 'lun_id': 2, + 'location': '/' + POOL_NAME, + 'size': 3221225472 + }] + }, + 'success': True + } + self.common.exec_webapi = mock.Mock(return_value=out) + + result = self.common._get_pool_lun_provisioned_size() + (self.common.exec_webapi. + assert_called_with('SYNO.Core.ISCSI.LUN', + 'list', + mock.ANY, + location='/' + POOL_NAME)) + self.assertEqual(int(math.ceil(float(5368709120 + 3221225472) / + units.Gi)), + result) + + def test__get_pool_lun_provisioned_size_error(self): + out = { + 'data': {}, + 'success': True + } + self.common.exec_webapi = mock.Mock(return_value=out) + + self.assertRaises(exception.MalformedResponse, + self.common._get_pool_lun_provisioned_size) + + self.conf.pool_name = '' + self.assertRaises(exception.InvalidConfigurationValue, + self.common._get_pool_lun_provisioned_size) + def test__get_lun_info(self): out = { 'data': { @@ -847,7 +896,7 @@ class SynoCommonTestCase(test.TestCase): VOLUME['name'], NEW_VOLUME['name']) - @mock.patch('time.sleep') + @mock.patch('eventlet.sleep') def test__check_lun_status_normal(self, _patched_sleep): self.common._get_lun_status = ( mock.Mock(side_effect=[ @@ -869,7 +918,7 @@ class SynoCommonTestCase(test.TestCase): self.common._check_lun_status_normal, VOLUME['name']) - @mock.patch('time.sleep') + @mock.patch('eventlet.sleep') def test__check_snapshot_status_healthy(self, _patched_sleep): self.common._get_snapshot_status = ( mock.Mock(side_effect=[ @@ -1180,7 +1229,9 @@ class SynoCommonTestCase(test.TestCase): self.assertIsNone(result) def test_update_volume_stats(self): - self.common._get_pool_size = mock.Mock(return_value=(10, 100)) + self.common._get_pool_size = mock.Mock(return_value=(10, 100, 50)) + self.common._get_pool_lun_provisioned_size = ( + mock.Mock(return_value=300)) data = { 'volume_backend_name': 'DiskStation', @@ -1193,6 +1244,8 @@ class SynoCommonTestCase(test.TestCase): 'reserved_percentage': 0, 'free_capacity_gb': 10, 'total_capacity_gb': 100, + 'provisioned_capacity_gb': 350, + 'max_over_subscription_ratio': 20, 'iscsi_ip_address': '10.0.0.1', 'pool_name': 'volume1', 'backend_info': @@ -1400,7 +1453,7 @@ class SynoCommonTestCase(test.TestCase): SNAPSHOT) self.common.exec_webapi = ( - mock.Mock(side_effect=exception.SynoAuthError)) + mock.Mock(side_effect=exception.SynoAuthError(reason='dont care'))) self.assertRaises(exception.SynoAuthError, self.common.create_snapshot, @@ -1426,7 +1479,7 @@ class SynoCommonTestCase(test.TestCase): 'delete_snapshot', mock.ANY, snapshot_uuid=DS_SNAPSHOT_UUID, - delete_by='Cinder')) + deleted_by='Cinder')) self.assertIsNone(result) result = self.common.delete_snapshot(SNAPSHOT) diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py index b483d5c1b..103c360f9 100644 --- a/cinder/tests/unit/test_utils.py +++ b/cinder/tests/unit/test_utils.py @@ -18,9 +18,9 @@ import functools import os import time +import ddt import mock from oslo_concurrency import processutils as putils -from oslo_config import cfg from oslo_utils import timeutils import six from six.moves import range @@ -29,12 +29,10 @@ import webob.exc import cinder from cinder import exception from cinder import test +from cinder.tests.unit import fake_constants as fake from cinder import utils -CONF = cfg.CONF - - class ExecuteTestCase(test.TestCase): @mock.patch('cinder.utils.processutils.execute') def test_execute(self, mock_putils_exe): @@ -241,36 +239,6 @@ class GenericUtilsTestCase(test.TestCase): result = utils.service_is_up(service) self.assertFalse(result) - def test_safe_parse_xml(self): - - normal_body = ('' - 'heythere') - - def killer_body(): - return ((""" - - ]> - - - %(d)s - - """) % { - 'a': 'A' * 10, - 'b': '&a;' * 10, - 'c': '&b;' * 10, - 'd': '&c;' * 9999, - }).strip() - - dom = utils.safe_minidom_parse_string(normal_body) - # Some versions of minidom inject extra newlines so we ignore them - result = str(dom.toxml()).replace('\n', '') - self.assertEqual(normal_body, result) - - self.assertRaises(ValueError, - utils.safe_minidom_parse_string, - killer_body()) - def test_check_ssh_injection(self): cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer'] self.assertIsNone(utils.check_ssh_injection(cmd_list)) @@ -796,6 +764,52 @@ class BrickUtils(test.TestCase): 'protocol', mock_helper.return_value, driver=None, use_multipath=False, device_scan_attempts=3) + @mock.patch('os_brick.encryptors.get_volume_encryptor') + @mock.patch('cinder.utils.get_root_helper') + def test_brick_attach_volume_encryptor(self, mock_helper, + mock_get_encryptor): + attach_info = {'device': {'path': 'dev/sda'}, + 'conn': {'driver_volume_type': 'iscsi', + 'data': {}, }} + encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} + ctxt = mock.Mock(name='context') + mock_encryptor = mock.Mock() + mock_get_encryptor.return_value = mock_encryptor + utils.brick_attach_volume_encryptor(ctxt, attach_info, encryption) + + connection_info = attach_info['conn'] + connection_info['data']['device_path'] = attach_info['device']['path'] + mock_helper.assert_called_once_with() + mock_get_encryptor.assert_called_once_with( + root_helper=mock_helper.return_value, + connection_info=connection_info, + keymgr=mock.ANY, + **encryption) + mock_encryptor.attach_volume.assert_called_once_with( + ctxt, **encryption) + + @mock.patch('os_brick.encryptors.get_volume_encryptor') + @mock.patch('cinder.utils.get_root_helper') + def test_brick_detach_volume_encryptor(self, + mock_helper, mock_get_encryptor): + attach_info = {'device': {'path': 'dev/sda'}, + 'conn': {'driver_volume_type': 'iscsi', + 'data': {}, }} + encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} + mock_encryptor = mock.Mock() + mock_get_encryptor.return_value = mock_encryptor + utils.brick_detach_volume_encryptor(attach_info, encryption) + + mock_helper.assert_called_once_with() + connection_info = attach_info['conn'] + connection_info['data']['device_path'] = attach_info['device']['path'] + mock_get_encryptor.assert_called_once_with( + root_helper=mock_helper.return_value, + connection_info=connection_info, + keymgr=mock.ANY, + **encryption) + mock_encryptor.detach_volume.assert_called_once_with(**encryption) + class StringLengthTestCase(test.TestCase): def test_check_string_length(self): @@ -1065,6 +1079,7 @@ class TestRetryDecorator(test.TestCase): self.assertFalse(mock_sleep.called) +@ddt.ddt class LogTracingTestCase(test.TestCase): def test_utils_setup_tracing(self): @@ -1293,43 +1308,82 @@ class LogTracingTestCase(test.TestCase): self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) - def test_utils_calculate_virtual_free_capacity_with_thick(self): - host_stat = {'total_capacity_gb': 30.01, - 'free_capacity_gb': 28.01, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, + def test_utils_trace_method_with_password_dict(self): + mock_logging = self.mock_object(utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + @utils.trace_method + def _trace_test_method(*args, **kwargs): + return {'something': 'test', + 'password': 'Now you see me'} + + utils.setup_tracing(['method']) + result = _trace_test_method(self) + expected_unmasked_dict = {'something': 'test', + 'password': 'Now you see me'} + + self.assertEqual(expected_unmasked_dict, result) + self.assertEqual(2, mock_log.debug.call_count) + self.assertIn("'password': '***'", + str(mock_log.debug.call_args_list[1])) + + def test_utils_trace_method_with_password_str(self): + mock_logging = self.mock_object(utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + @utils.trace_method + def _trace_test_method(*args, **kwargs): + return "'adminPass': 'Now you see me'" + + utils.setup_tracing(['method']) + result = _trace_test_method(self) + expected_unmasked_str = "'adminPass': 'Now you see me'" + + self.assertEqual(expected_unmasked_str, result) + self.assertEqual(2, mock_log.debug.call_count) + self.assertIn("'adminPass': '***'", + str(mock_log.debug.call_args_list[1])) + + @ddt.data( + {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 1.0, + 'thin_support': False, 'thick_support': True, + 'is_thin_lun': False, 'expected': 27.01}, + {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, + 'thin_support': True, 'thick_support': False, + 'is_thin_lun': True, 'expected': 37.02}, + {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, + 'thin_support': True, 'thick_support': True, + 'is_thin_lun': True, 'expected': 37.02}, + {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 2.0, + 'thin_support': True, 'thick_support': True, + 'is_thin_lun': False, 'expected': 27.01}, + ) + @ddt.unpack + def test_utils_calculate_virtual_free_capacity_provision_type( + self, total, free, provisioned, max_ratio, thin_support, + thick_support, is_thin_lun, expected): + host_stat = {'total_capacity_gb': total, + 'free_capacity_gb': free, + 'provisioned_capacity_gb': provisioned, + 'max_over_subscription_ratio': max_ratio, + 'thin_provisioning_support': thin_support, + 'thick_provisioning_support': thick_support, 'reserved_percentage': 5} - free = utils.calculate_virtual_free_capacity( + free_capacity = utils.calculate_virtual_free_capacity( host_stat['total_capacity_gb'], host_stat['free_capacity_gb'], host_stat['provisioned_capacity_gb'], host_stat['thin_provisioning_support'], host_stat['max_over_subscription_ratio'], - host_stat['reserved_percentage']) + host_stat['reserved_percentage'], + is_thin_lun) - self.assertEqual(27.01, free) - - def test_utils_calculate_virtual_free_capacity_with_thin(self): - host_stat = {'total_capacity_gb': 20.01, - 'free_capacity_gb': 18.01, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5} - - free = utils.calculate_virtual_free_capacity( - host_stat['total_capacity_gb'], - host_stat['free_capacity_gb'], - host_stat['provisioned_capacity_gb'], - host_stat['thin_provisioning_support'], - host_stat['max_over_subscription_ratio'], - host_stat['reserved_percentage']) - - self.assertEqual(37.02, free) + self.assertEqual(expected, free_capacity) class Comparable(utils.ComparableMixin): diff --git a/cinder/tests/unit/test_v7000_iscsi.py b/cinder/tests/unit/test_v7000_iscsi.py index a6bf37fb6..2c0a07b3e 100644 --- a/cinder/tests/unit/test_v7000_iscsi.py +++ b/cinder/tests/unit/test_v7000_iscsi.py @@ -21,7 +21,8 @@ import mock from cinder import exception from cinder import test -from cinder.tests.unit import fake_vmem_client as vmemclient +from cinder.tests.unit.volume.drivers.violin import \ + fake_vmem_client as vmemclient from cinder.volume import configuration as conf from cinder.volume.drivers.violin import v7000_common from cinder.volume.drivers.violin import v7000_iscsi diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index baf4db95f..3b8b1a9d4 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -47,7 +47,7 @@ from cinder import coordination from cinder import db from cinder import exception from cinder.image import image_utils -from cinder import keymgr +from cinder import keymgr as key_manager from cinder.message import defined_messages from cinder.message import resource_types from cinder import objects @@ -55,11 +55,11 @@ from cinder.objects import fields import cinder.policy from cinder import quota from cinder import test +from cinder.tests import fake_driver from cinder.tests.unit.api import fakes from cinder.tests.unit.brick import fake_lvm from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_driver from cinder.tests.unit import fake_service from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume @@ -127,7 +127,7 @@ class FakeImageService(object): class BaseVolumeTestCase(test.TestCase): """Test Case for volumes.""" - FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' + FAKE_UUID = fake.IMAGE_ID def setUp(self): super(BaseVolumeTestCase, self).setUp() @@ -153,7 +153,7 @@ class BaseVolumeTestCase(test.TestCase): self.stubs.Set(brick_lvm.LVM, 'get_all_volume_groups', self.fake_get_all_volume_groups) - fake_image.stub_out_image_service(self.stubs) + fake_image.mock_image_service(self) self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) self.stubs.Set(os.path, 'exists', lambda x: True) self.volume.driver.set_initialized() @@ -169,16 +169,6 @@ class BaseVolumeTestCase(test.TestCase): except OSError: pass - def assert_notify_called(self, mock_notify, calls): - for i in range(0, len(calls)): - mock_call = mock_notify.call_args_list[i] - call = calls[i] - - posargs = mock_call[0] - - self.assertEqual(call[0], posargs[0]) - self.assertEqual(call[1], posargs[2]) - def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): return [{'name': 'cinder-volumes', 'size': '5.00', @@ -250,105 +240,71 @@ class BaseVolumeTestCase(test.TestCase): class AvailabilityZoneTestCase(BaseVolumeTestCase): + def setUp(self): + super(AvailabilityZoneTestCase, self).setUp() + self.get_all = self.patch( + 'cinder.db.service_get_all', autospec=True, + return_value = [{'availability_zone': 'a', 'disabled': False}]) + def test_list_availability_zones_cached(self): - volume_api = cinder.volume.api.API() - with mock.patch.object(volume_api.db, - 'service_get_all_by_topic') as get_all: - get_all.return_value = [ - { - 'availability_zone': 'a', - 'disabled': False, - }, - ] - azs = volume_api.list_availability_zones(enable_cache=True) - self.assertEqual([{"name": 'a', 'available': True}], list(azs)) - self.assertIsNotNone(volume_api.availability_zones_last_fetched) - self.assertTrue(get_all.called) - volume_api.list_availability_zones(enable_cache=True) - self.assertEqual(1, get_all.call_count) + azs = self.volume_api.list_availability_zones(enable_cache=True) + self.assertEqual([{"name": 'a', 'available': True}], list(azs)) + self.assertIsNotNone(self.volume_api.availability_zones_last_fetched) + self.assertTrue(self.get_all.called) + self.volume_api.list_availability_zones(enable_cache=True) + self.assertEqual(1, self.get_all.call_count) def test_list_availability_zones_no_cached(self): - volume_api = cinder.volume.api.API() - with mock.patch.object(volume_api.db, - 'service_get_all_by_topic') as get_all: - get_all.return_value = [ - { - 'availability_zone': 'a', - 'disabled': False, - }, - ] - azs = volume_api.list_availability_zones(enable_cache=False) - self.assertEqual([{"name": 'a', 'available': True}], list(azs)) - self.assertIsNone(volume_api.availability_zones_last_fetched) + azs = self.volume_api.list_availability_zones(enable_cache=False) + self.assertEqual([{"name": 'a', 'available': True}], list(azs)) + self.assertIsNone(self.volume_api.availability_zones_last_fetched) - with mock.patch.object(volume_api.db, - 'service_get_all_by_topic') as get_all: - get_all.return_value = [ - { - 'availability_zone': 'a', - 'disabled': True, - }, - ] - azs = volume_api.list_availability_zones(enable_cache=False) - self.assertEqual([{"name": 'a', 'available': False}], list(azs)) - self.assertIsNone(volume_api.availability_zones_last_fetched) + self.get_all.return_value[0]['disabled'] = True + azs = self.volume_api.list_availability_zones(enable_cache=False) + self.assertEqual([{"name": 'a', 'available': False}], list(azs)) + self.assertIsNone(self.volume_api.availability_zones_last_fetched) def test_list_availability_zones_refetched(self): timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) - volume_api = cinder.volume.api.API() - with mock.patch.object(volume_api.db, - 'service_get_all_by_topic') as get_all: - get_all.return_value = [ - { - 'availability_zone': 'a', - 'disabled': False, - }, - ] - azs = volume_api.list_availability_zones(enable_cache=True) - self.assertEqual([{"name": 'a', 'available': True}], list(azs)) - self.assertIsNotNone(volume_api.availability_zones_last_fetched) - last_fetched = volume_api.availability_zones_last_fetched - self.assertTrue(get_all.called) - volume_api.list_availability_zones(enable_cache=True) - self.assertEqual(1, get_all.call_count) + azs = self.volume_api.list_availability_zones(enable_cache=True) + self.assertEqual([{"name": 'a', 'available': True}], list(azs)) + self.assertIsNotNone(self.volume_api.availability_zones_last_fetched) + last_fetched = self.volume_api.availability_zones_last_fetched + self.assertTrue(self.get_all.called) + self.volume_api.list_availability_zones(enable_cache=True) + self.assertEqual(1, self.get_all.call_count) - # The default cache time is 3600, push past that... - timeutils.advance_time_seconds(3800) - get_all.return_value = [ - { - 'availability_zone': 'a', - 'disabled': False, - }, - { - 'availability_zone': 'b', - 'disabled': False, - }, - ] - azs = volume_api.list_availability_zones(enable_cache=True) - azs = sorted([n['name'] for n in azs]) - self.assertEqual(['a', 'b'], azs) - self.assertEqual(2, get_all.call_count) - self.assertGreater(volume_api.availability_zones_last_fetched, - last_fetched) + # The default cache time is 3600, push past that... + timeutils.advance_time_seconds(3800) + self.get_all.return_value = [ + { + 'availability_zone': 'a', + 'disabled': False, + }, + { + 'availability_zone': 'b', + 'disabled': False, + }, + ] + azs = self.volume_api.list_availability_zones(enable_cache=True) + azs = sorted([n['name'] for n in azs]) + self.assertEqual(['a', 'b'], azs) + self.assertEqual(2, self.get_all.call_count) + self.assertGreater(self.volume_api.availability_zones_last_fetched, + last_fetched) def test_list_availability_zones_enabled_service(self): - services = [ + def sort_func(obj): + return obj['name'] + + self.get_all.return_value = [ {'availability_zone': 'ping', 'disabled': 0}, {'availability_zone': 'ping', 'disabled': 1}, {'availability_zone': 'pong', 'disabled': 0}, {'availability_zone': 'pung', 'disabled': 1}, ] - def stub_service_get_all_by_topic(*args, **kwargs): - return services - - self.stubs.Set(db, 'service_get_all_by_topic', - stub_service_get_all_by_topic) - - def sort_func(obj): - return obj['name'] - volume_api = cinder.volume.api.API() azs = volume_api.list_availability_zones() azs = sorted(azs, key=sort_func) @@ -362,6 +318,7 @@ class AvailabilityZoneTestCase(BaseVolumeTestCase): self.assertEqual(expected, azs) +@ddt.ddt class VolumeTestCase(BaseVolumeTestCase): def setUp(self): @@ -518,6 +475,27 @@ class VolumeTestCase(BaseVolumeTestCase): self.volume.delete_volume(self.context, vol0.id) self.volume.delete_volume(self.context, vol1.id) + @mock.patch('cinder.volume.manager.VolumeManager.' + '_include_resources_in_cluster') + def test_init_host_cluster_not_changed(self, include_in_cluster_mock): + self.volume.init_host(False) + include_in_cluster_mock.assert_not_called() + + @mock.patch('cinder.objects.volume.VolumeList.include_in_cluster') + @mock.patch('cinder.objects.consistencygroup.ConsistencyGroupList.' + 'include_in_cluster') + def test_init_host_added_to_cluster(self, vol_include_mock, + cg_include_mock): + self.mock_object(self.volume, 'cluster', mock.sentinel.cluster) + self.volume.init_host(True) + + vol_include_mock.assert_called_once_with(mock.ANY, + mock.sentinel.cluster, + host=self.volume.host) + cg_include_mock.assert_called_once_with(mock.ANY, + mock.sentinel.cluster, + host=self.volume.host) + @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'}) @@ -777,20 +755,28 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, volume_id) - def test_create_volume_with_invalid_metadata(self): - """Test volume create with too much metadata fails.""" - volume_api = cinder.volume.api.API() - test_meta = {'fake_key': 'fake_value' * 256} - self.assertRaises(exception.InvalidVolumeMetadataSize, - volume_api.create, + @mock.patch('cinder.db.volume_metadata_update') + def test_create_volume_metadata(self, metadata_update): + metadata = {'fake_key': 'fake_value'} + metadata_update.return_value = metadata + volume = tests_utils.create_volume(self.context, **self.volume_params) + res = self.volume_api.create_volume_metadata(self.context, + volume, metadata) + metadata_update.assert_called_once_with(self.context, volume.id, + metadata, False, + common.METADATA_TYPES.user) + self.assertEqual(metadata, res) + + @ddt.data('maintenance', 'uploading') + def test_create_volume_metadata_maintenance(self, status): + metadata = {'fake_key': 'fake_value'} + volume = tests_utils.create_volume(self.context, **self.volume_params) + volume['status'] = status + self.assertRaises(exception.InvalidVolume, + self.volume_api.create_volume_metadata, self.context, - 1, - 'name', - 'description', - None, - None, - None, - test_meta) + volume, + metadata) def test_update_volume_metadata_with_metatype(self): """Test update volume metadata with different metadata type.""" @@ -800,11 +786,8 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) self.volume.create_volume(self.context, volume.id, volume=volume) - - volume_api = cinder.volume.api.API() - # update user metadata associated with the volume. - result_meta = volume_api.update_volume_metadata( + result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta2, @@ -813,7 +796,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(test_meta2, result_meta) # create image metadata associated with the volume. - result_meta = volume_api.update_volume_metadata( + result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta1, @@ -822,7 +805,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(test_meta1, result_meta) # update image metadata associated with the volume. - result_meta = volume_api.update_volume_metadata( + result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta2, @@ -832,7 +815,7 @@ class VolumeTestCase(BaseVolumeTestCase): # update volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, - volume_api.update_volume_metadata, + self.volume_api.update_volume_metadata, self.context, volume, test_meta1, @@ -846,9 +829,8 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, - volume_api.update_volume_metadata, + self.volume_api.update_volume_metadata, self.context, volume, test_meta1, @@ -859,9 +841,8 @@ class VolumeTestCase(BaseVolumeTestCase): def test_update_with_ovo(self, volume_update): """Test update volume using oslo_versionedobject.""" volume = tests_utils.create_volume(self.context, **self.volume_params) - volume_api = cinder.volume.api.API() updates = {'display_name': 'foobbar'} - volume_api.update(self.context, volume, updates) + self.volume_api.update(self.context, volume, updates) volume_update.assert_called_once_with(self.context, volume.id, updates) self.assertEqual('foobbar', volume.display_name) @@ -875,11 +856,8 @@ class VolumeTestCase(BaseVolumeTestCase): **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id, volume=volume) - - volume_api = cinder.volume.api.API() - # delete user metadata associated with the volume. - volume_api.delete_volume_metadata( + self.volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', @@ -889,7 +867,7 @@ class VolumeTestCase(BaseVolumeTestCase): db.volume_metadata_get(self.context, volume_id)) # create image metadata associated with the volume. - result_meta = volume_api.update_volume_metadata( + result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta1, @@ -899,7 +877,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(test_meta1, result_meta) # delete image metadata associated with the volume. - volume_api.delete_volume_metadata( + self.volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', @@ -914,7 +892,7 @@ class VolumeTestCase(BaseVolumeTestCase): # delete volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, - volume_api.delete_volume_metadata, + self.volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', @@ -927,9 +905,8 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, - volume_api.delete_volume_metadata, + self.volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', @@ -941,9 +918,8 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, - volume_api.attach, + self.volume_api.attach, self.context, volume, None, None, None, None) @@ -1069,17 +1045,24 @@ class VolumeTestCase(BaseVolumeTestCase): volume_type=db_vol_type) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) - @mock.patch.object(keymgr, 'API', fake_keymgr.fake_api) - def test_create_volume_with_encrypted_volume_type(self): + @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) + def test_create_volume_with_encrypted_volume_type_aes(self): ctxt = context.get_admin_context() + cipher = 'aes-xts-plain64' + key_size = 256 + control_location = 'front-end' + db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + {'control_location': control_location, + 'provider': ENCRYPTION_PROVIDER, + 'cipher': cipher, + 'key_size': key_size}) volume_api = cinder.volume.api.API() @@ -1090,7 +1073,55 @@ class VolumeTestCase(BaseVolumeTestCase): 'name', 'description', volume_type=db_vol_type) + + key_manager = volume_api.key_manager + key = key_manager.get(self.context, volume['encryption_key_id']) + self.assertEqual(key_size, len(key.get_encoded()) * 8) + self.assertEqual('aes', key.algorithm) + + metadata = db.volume_encryption_metadata_get(self.context, volume.id) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) + self.assertEqual(cipher, metadata.get('cipher')) + self.assertEqual(key_size, metadata.get('key_size')) + self.assertIsNotNone(volume['encryption_key_id']) + + @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) + def test_create_volume_with_encrypted_volume_type_blowfish(self): + ctxt = context.get_admin_context() + + cipher = 'blowfish-cbc' + key_size = 32 + control_location = 'front-end' + + db.volume_type_create(ctxt, + {'id': '61298380-0c12-11e3-bfd6-4b48424183be', + 'name': 'LUKS'}) + db.volume_type_encryption_create( + ctxt, + '61298380-0c12-11e3-bfd6-4b48424183be', + {'control_location': control_location, + 'provider': ENCRYPTION_PROVIDER, + 'cipher': cipher, + 'key_size': key_size}) + + volume_api = cinder.volume.api.API() + + db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') + + volume = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + + key_manager = volume_api.key_manager + key = key_manager.get(self.context, volume['encryption_key_id']) + self.assertEqual('blowfish', key.algorithm) + + metadata = db.volume_encryption_metadata_get(self.context, volume.id) + self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) + self.assertEqual(cipher, metadata.get('cipher')) + self.assertEqual(key_size, metadata.get('key_size')) self.assertIsNotNone(volume['encryption_key_id']) def test_create_volume_with_provider_id(self): @@ -1103,13 +1134,18 @@ class VolumeTestCase(BaseVolumeTestCase): self.volume.create_volume(self.context, volume['id']) self.assertEqual(fake.PROVIDER_ID, volume['provider_id']) - @mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api) + @mock.patch.object(key_manager, 'API', new=fake_keymgr.fake_api) def test_create_delete_volume_with_encrypted_volume_type(self): - db_vol_type = db.volume_type_create( - self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) + cipher = 'aes-xts-plain64' + key_size = 256 + db.volume_type_create(self.context, + {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, + 'cipher': cipher, 'key_size': key_size}) + + db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, @@ -1126,7 +1162,13 @@ class VolumeTestCase(BaseVolumeTestCase): db.volume_update(self.context, volume['id'], {'status': 'available'}) self.volume_api.delete(self.context, volume) - volume = db.volume_get(self.context, volume['id']) + volume = objects.Volume.get_by_id(self.context, volume.id) + while volume.status == 'available': + # Must wait for volume_api delete request to process enough to + # change the volume status. + time.sleep(0.5) + volume.refresh() + self.assertEqual('deleting', volume['status']) db.volume_destroy(self.context, volume['id']) @@ -1503,8 +1545,7 @@ class VolumeTestCase(BaseVolumeTestCase): snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) - with mock.patch.object(db, - 'service_get_all_by_topic') as mock_get_service, \ + with mock.patch('cinder.db.service_get_all') as mock_get_service, \ mock.patch.object(volume_api, 'list_availability_zones') as mock_get_azs: mock_get_service.return_value = [{'host': 'foo'}] @@ -1973,10 +2014,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, src_vol_id) - @mock.patch.object(keymgr, 'API', fake_keymgr.fake_api) + @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_from_snapshot_with_encryption(self): """Test volume can be created from a snapshot of an encrypted volume""" ctxt = context.get_admin_context() + cipher = 'aes-xts-plain64' + key_size = 256 db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', @@ -1984,7 +2027,8 @@ class VolumeTestCase(BaseVolumeTestCase): db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, + 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() @@ -2021,15 +2065,17 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertIsNotNone(volume_dst['encryption_key_id']) key_manager = volume_api.key_manager # must use *same* key manager - volume_src_key = key_manager.get_key(self.context, - volume_src['encryption_key_id']) - volume_dst_key = key_manager.get_key(self.context, - volume_dst['encryption_key_id']) + volume_src_key = key_manager.get(self.context, + volume_src['encryption_key_id']) + volume_dst_key = key_manager.get(self.context, + volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_create_volume_from_encrypted_volume(self): """Test volume can be created from an encrypted volume.""" - self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api) + self.stubs.Set(key_manager, 'API', fake_keymgr.fake_api) + cipher = 'aes-xts-plain64' + key_size = 256 volume_api = cinder.volume.api.API() @@ -2041,7 +2087,8 @@ class VolumeTestCase(BaseVolumeTestCase): db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, + 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') @@ -2067,11 +2114,11 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) - key_manager = volume_api.key_manager # must use *same* key manager - volume_src_key = key_manager.get_key(self.context, - volume_src['encryption_key_id']) - volume_dst_key = key_manager.get_key(self.context, - volume_dst['encryption_key_id']) + km = volume_api.key_manager # must use *same* key manager + volume_src_key = km.get(self.context, + volume_src['encryption_key_id']) + volume_dst_key = km.get(self.context, + volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_delete_encrypted_volume(self): @@ -2081,7 +2128,7 @@ class VolumeTestCase(BaseVolumeTestCase): vol_api = cinder.volume.api.API() with mock.patch.object( vol_api.key_manager, - 'delete_key', + 'delete', side_effect=Exception): self.assertRaises(exception.InvalidVolume, vol_api.delete, @@ -2166,16 +2213,19 @@ class VolumeTestCase(BaseVolumeTestCase): _mock_volume_admin_metadata_get, mock_get_target): """Make sure initialize_connection returns correct information.""" - _fake_admin_meta = {'fake-key': 'fake-value'} + _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] _fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID, 'volume_admin_metadata': _fake_admin_meta} + fake_volume_obj = fake_volume.fake_volume_obj(self.context, + **_fake_volume) _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume - _mock_volume_admin_metadata_get.return_value = _fake_admin_meta + _mock_volume_admin_metadata_get.return_value = { + 'fake-key': 'fake-value'} connector = {'ip': 'IP', 'initiator': 'INITIATOR'} qos_values = {'consumer': 'front-end', @@ -2186,7 +2236,7 @@ class VolumeTestCase(BaseVolumeTestCase): with mock.patch.object(cinder.volume.volume_types, 'get_volume_type_qos_specs') as type_qos, \ - mock.patch.object(cinder.tests.unit.fake_driver.FakeISCSIDriver, + mock.patch.object(cinder.tests.fake_driver.FakeISCSIDriver, 'initialize_connection') as driver_init: type_qos.return_value = dict(qos_specs=qos_values) driver_init.return_value = {'data': {}} @@ -2195,53 +2245,42 @@ class VolumeTestCase(BaseVolumeTestCase): 'key2': 'value2'} # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end - conn_info = self.volume.initialize_connection(self.context, - fake.VOLUME_ID, - connector) + conn_info = self.volume.initialize_connection( + self.context, fake.VOLUME_ID, connector, + volume=fake_volume_obj) self.assertDictMatch(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) - conn_info = self.volume.initialize_connection(self.context, - fake.VOLUME_ID, - connector) + conn_info = self.volume.initialize_connection( + self.context, fake.VOLUME_ID, connector, + volume=fake_volume_obj) self.assertDictMatch(qos_specs_expected, conn_info['data']['qos_specs']) # initialize_connection() skips qos_specs that is designated to be # consumed by back-end only qos_values.update({'consumer': 'back-end'}) type_qos.return_value = dict(qos_specs=qos_values) - conn_info = self.volume.initialize_connection(self.context, - fake.VOLUME_ID, - connector) + conn_info = self.volume.initialize_connection( + self.context, fake.VOLUME_ID, connector, + volume=fake_volume_obj) self.assertIsNone(conn_info['data']['qos_specs']) @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') - @mock.patch.object(db.sqlalchemy.api, 'volume_get') - @mock.patch.object(db, 'volume_update') def test_initialize_connection_export_failure(self, - _mock_volume_update, - _mock_volume_get, _mock_create_export): """Test exception path for create_export failure.""" - _fake_admin_meta = {'fake-key': 'fake-value'} - _fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, - 'name': 'fake_name', - 'host': 'fake_host', - 'id': fake.VOLUME_ID, - 'volume_admin_metadata': _fake_admin_meta} - - _mock_volume_get.return_value = _fake_volume - _mock_volume_update.return_value = _fake_volume + volume = tests_utils.create_volume( + self.context, admin_metadata={'fake-key': 'fake-value'}, + volume_type_id=fake.VOLUME_TYPE_ID, **self.volume_params) _mock_create_export.side_effect = exception.CinderException connector = {'ip': 'IP', 'initiator': 'INITIATOR'} self.assertRaises(exception.VolumeBackendAPIException, self.volume.initialize_connection, - self.context, - fake.VOLUME_ID, - connector) + self.context, fake.VOLUME_ID, connector, + volume=volume) def test_run_attach_detach_volume_for_instance(self): """Make sure volume can be attached and detached from instance.""" @@ -3178,7 +3217,6 @@ class VolumeTestCase(BaseVolumeTestCase): [test_volume], 'fake_name', 'fake_description', - False, fake.CONSISTENCY_GROUP_ID) def test_cannot_delete_volume_in_use(self): @@ -3704,7 +3742,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume_api = cinder.volume.api.API( image_service=FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', - image_id=1) + image_id=self.FAKE_UUID) volume_id = volume['id'] self.assertEqual('creating', volume['status']) @@ -4193,17 +4231,22 @@ class VolumeTestCase(BaseVolumeTestCase): fake_error_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) + self.assertEqual('creating', volume_src.status) self.volume.create_volume(self.context, volume_src.id, volume=volume_src) + self.assertEqual('available', volume_src.status) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) + self.assertEqual('creating', volume_dst.status) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_dst.id, volume=volume_dst) - self.assertEqual('creating', volume_src['status']) + # Source volume's status is still available and dst is set to error + self.assertEqual('available', volume_src.status) + self.assertEqual('error', volume_dst.status) self.volume.delete_volume(self.context, volume_dst.id, volume=volume_dst) self.volume.delete_volume(self.context, volume_src.id, @@ -4597,6 +4640,10 @@ class VolumeTestCase(BaseVolumeTestCase): 'is_snapshot': False} self.assertEqual(expected_result, result) + def test_backup_use_temp_snapshot_config(self): + local_conf = self.volume.driver.configuration.local_conf + self.assertFalse(local_conf.backup_use_temp_snapshot) + @mock.patch.object(QUOTAS, 'reserve', side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) def test_existing_snapshot_failed_quota_reserve(self, mock_reserve): @@ -5172,7 +5219,8 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): def _retype_volume_exec(self, driver, mock_notify, snap=False, policy='on-demand', migrate_exc=False, exc=None, diff_equal=False, - replica=False, reserve_vol_type_only=False): + replica=False, reserve_vol_type_only=False, + encryption_changed=False): elevated = context.get_admin_context() project_id = self.context.project_id @@ -5232,7 +5280,10 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): mock.patch.object(db.sqlalchemy.api, 'volume_get') as mock_get: mock_get.return_value = volume _retype.return_value = driver - _diff.return_value = ({}, diff_equal) + returned_diff = {} + if encryption_changed: + returned_diff = {'encryption': 'fake'} + _diff.return_value = (returned_diff, diff_equal) if migrate_exc: _mig.side_effect = KeyError else: @@ -5297,6 +5348,8 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertEqual(CONF.host, volume.host) self.assertEqual(0, volumes_in_use) mock_notify.assert_not_called() + if encryption_changed: + self.assertTrue(_mig.called) def test_retype_volume_driver_success(self): self._retype_volume_exec(True) @@ -5326,6 +5379,9 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): def test_retype_volume_with_type_only(self): self._retype_volume_exec(True, reserve_vol_type_only=True) + def test_retype_volume_migration_encryption(self): + self._retype_volume_exec(False, encryption_changed=True) + def test_migrate_driver_not_initialized(self): volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) @@ -5371,7 +5427,7 @@ class ReplicationTestCase(BaseVolumeTestCase): @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover_host') @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_by_args') + @mock.patch.object(cinder.db, 'service_get') def test_failover_host(self, mock_db_args, mock_db_update, mock_failover): """Test replication failover_host.""" @@ -5386,7 +5442,7 @@ class ReplicationTestCase(BaseVolumeTestCase): @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover_host') @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_by_args') + @mock.patch.object(cinder.db, 'service_get') def test_failover_host_unexpected_status(self, mock_db_args, mock_db_update, mock_failover): @@ -5404,7 +5460,7 @@ class ReplicationTestCase(BaseVolumeTestCase): @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host') @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_by_args') + @mock.patch.object(cinder.db, 'service_get') def test_freeze_host(self, mock_db_args, mock_db_update, mock_freeze): """Test replication freeze_host.""" @@ -5419,7 +5475,7 @@ class ReplicationTestCase(BaseVolumeTestCase): @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host') @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_by_args') + @mock.patch.object(cinder.db, 'service_get') def test_freeze_host_unexpected_status(self, mock_db_args, mock_db_update, mock_freeze): @@ -5437,7 +5493,7 @@ class ReplicationTestCase(BaseVolumeTestCase): @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host') @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_by_args') + @mock.patch.object(cinder.db, 'service_get') def test_thaw_host(self, mock_db_args, mock_db_update, mock_thaw): """Test replication thaw_host.""" @@ -5453,7 +5509,7 @@ class ReplicationTestCase(BaseVolumeTestCase): @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host') @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_by_args') + @mock.patch.object(cinder.db, 'service_get') def test_thaw_host_unexpected_status(self, mock_db_args, mock_db_update, mock_thaw): @@ -5948,9 +6004,10 @@ class DriverTestCase(test.TestCase): self.volume.delete_volume(self.context, volume_id) +@ddt.ddt class GenericVolumeDriverTestCase(DriverTestCase): """Test case for VolumeDriver.""" - driver_name = "cinder.tests.unit.fake_driver.LoggingVolumeDriver" + driver_name = "cinder.tests.fake_driver.LoggingVolumeDriver" @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @@ -5964,9 +6021,8 @@ class GenericVolumeDriverTestCase(DriverTestCase): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID - backup = tests_utils.create_backup(self.context, - vol['id']) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj = tests_utils.create_backup(self.context, + vol['id']) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() @@ -5999,15 +6055,15 @@ class GenericVolumeDriverTestCase(DriverTestCase): status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) - self.assertEqual('dummy', cloned_vol['provider_location']) - self.assertEqual('available', cloned_vol['status']) + self.assertEqual('dummy', cloned_vol.provider_location) + self.assertEqual('available', cloned_vol.status) mock_create_cloned_volume.return_value = None vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) - self.assertEqual('available', cloned_vol['status']) + self.assertEqual('available', cloned_vol.status) @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @@ -6024,9 +6080,8 @@ class GenericVolumeDriverTestCase(DriverTestCase): temp_vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID - backup = tests_utils.create_backup(self.context, - vol['id']) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj = tests_utils.create_backup(self.context, + vol['id']) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() @@ -6117,16 +6172,15 @@ class GenericVolumeDriverTestCase(DriverTestCase): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID - backup = tests_utils.create_backup(self.context, - vol['id']) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj = tests_utils.create_backup(self.context, + vol['id']) (backup_device, is_snapshot) = self.volume.driver.get_backup_device( self.context, backup_obj) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertEqual(volume, backup_device) self.assertFalse(is_snapshot) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) - self.assertIsNone(backup.temp_volume_id) + backup_obj.refresh() + self.assertIsNone(backup_obj.temp_volume_id) def test_get_backup_device_in_use(self): vol = tests_utils.create_volume(self.context, @@ -6135,9 +6189,8 @@ class GenericVolumeDriverTestCase(DriverTestCase): temp_vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID - backup = tests_utils.create_backup(self.context, - vol['id']) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj = tests_utils.create_backup(self.context, + vol['id']) with mock.patch.object( self.volume.driver, '_create_temp_cloned_volume') as mock_create_temp: @@ -6147,7 +6200,7 @@ class GenericVolumeDriverTestCase(DriverTestCase): backup_obj)) self.assertEqual(temp_vol, backup_device) self.assertFalse(is_snapshot) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj.refresh() self.assertEqual(temp_vol.id, backup_obj.temp_volume_id) def test__create_temp_volume_from_snapshot(self): @@ -6164,15 +6217,20 @@ class GenericVolumeDriverTestCase(DriverTestCase): temp_vol = self.volume.driver._create_temp_volume_from_snapshot( self.context, vol, snapshot) - self.assertEqual('detached', temp_vol['attach_status']) - self.assertEqual('fakezone', temp_vol['availability_zone']) + self.assertEqual('detached', temp_vol.attach_status) + self.assertEqual('fakezone', temp_vol.availability_zone) @mock.patch.object(utils, 'brick_get_connector_properties') @mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume') @mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume') @mock.patch.object(volutils, 'copy_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') + @mock.patch.object(cinder.volume.volume_types, + 'volume_types_encryption_changed') + @ddt.data(False, True) def test_copy_volume_data_mgr(self, + encryption_changed, + mock_encryption_changed, mock_get_capabilities, mock_copy, mock_detach, @@ -6185,17 +6243,24 @@ class GenericVolumeDriverTestCase(DriverTestCase): dest_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) mock_get_connector.return_value = {} + mock_encryption_changed.return_value = encryption_changed self.volume.driver._throttle = mock.MagicMock() attach_expected = [ - mock.call(self.context, dest_vol, {}, remote=False), - mock.call(self.context, src_vol, {}, remote=False)] + mock.call(self.context, dest_vol, {}, + remote=False, + attach_encryptor=encryption_changed), + mock.call(self.context, src_vol, {}, + remote=False, + attach_encryptor=encryption_changed)] detach_expected = [ mock.call(self.context, {'device': {'path': 'bar'}}, - dest_vol, {}, force=False, remote=False), + dest_vol, {}, force=False, remote=False, + attach_encryptor=encryption_changed), mock.call(self.context, {'device': {'path': 'foo'}}, - src_vol, {}, force=False, remote=False)] + src_vol, {}, force=False, remote=False, + attach_encryptor=encryption_changed)] attach_volume_returns = [ {'device': {'path': 'bar'}}, @@ -6230,6 +6295,148 @@ class GenericVolumeDriverTestCase(DriverTestCase): db.volume_destroy(self.context, src_vol['id']) db.volume_destroy(self.context, dest_vol['id']) + @mock.patch.object(os_brick.initiator.connector, + 'get_connector_properties') + @mock.patch.object(image_utils, 'fetch_to_raw') + @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') + @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') + @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') + @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') + def test_copy_image_to_encrypted_volume(self, + mock_detach_encryptor, + mock_attach_encryptor, + mock_detach_volume, + mock_attach_volume, + mock_fetch_to_raw, + mock_get_connector_properties): + properties = {} + volume = tests_utils.create_volume( + self.context, status='available', + size=2, + encryption_key_id=fake.ENCRYPTION_KEY_ID) + volume_id = volume['id'] + volume = db.volume_get(context.get_admin_context(), volume_id) + image_service = fake_image.FakeImageService() + local_path = 'dev/sda' + attach_info = {'device': {'path': local_path}, + 'conn': {'driver_volume_type': 'iscsi', + 'data': {}, }} + + mock_get_connector_properties.return_value = properties + mock_attach_volume.return_value = [attach_info, volume] + + self.volume.driver.copy_image_to_encrypted_volume( + self.context, volume, image_service, fake.IMAGE_ID) + + encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} + mock_attach_volume.assert_called_once_with( + self.context, volume, properties) + mock_attach_encryptor.assert_called_once_with( + self.context, attach_info, encryption) + mock_fetch_to_raw.assert_called_once_with( + self.context, image_service, fake.IMAGE_ID, + local_path, '1M', size=2) + mock_detach_encryptor.assert_called_once_with( + attach_info, encryption) + mock_detach_volume.assert_called_once_with( + self.context, attach_info, volume, properties) + + @mock.patch.object(os_brick.initiator.connector, + 'get_connector_properties') + @mock.patch.object(image_utils, 'fetch_to_raw') + @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') + @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') + @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') + @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') + def test_copy_image_to_encrypted_volume_failed_attach_encryptor( + self, + mock_detach_encryptor, + mock_attach_encryptor, + mock_detach_volume, + mock_attach_volume, + mock_fetch_to_raw, + mock_get_connector_properties): + properties = {} + volume = tests_utils.create_volume( + self.context, status='available', + size=2, + encryption_key_id=fake.ENCRYPTION_KEY_ID) + volume_id = volume['id'] + volume = db.volume_get(context.get_admin_context(), volume_id) + image_service = fake_image.FakeImageService() + attach_info = {'device': {'path': 'dev/sda'}, + 'conn': {'driver_volume_type': 'iscsi', + 'data': {}, }} + + mock_get_connector_properties.return_value = properties + mock_attach_volume.return_value = [attach_info, volume] + raised_exception = os_brick.exception.VolumeEncryptionNotSupported( + volume_id = "123", + volume_type = "abc") + mock_attach_encryptor.side_effect = raised_exception + + self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported, + self.volume.driver.copy_image_to_encrypted_volume, + self.context, volume, image_service, fake.IMAGE_ID) + + encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} + mock_attach_volume.assert_called_once_with( + self.context, volume, properties) + mock_attach_encryptor.assert_called_once_with( + self.context, attach_info, encryption) + self.assertFalse(mock_fetch_to_raw.called) + self.assertFalse(mock_detach_encryptor.called) + mock_detach_volume.assert_called_once_with( + self.context, attach_info, volume, properties) + + @mock.patch.object(os_brick.initiator.connector, + 'get_connector_properties') + @mock.patch.object(image_utils, 'fetch_to_raw') + @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') + @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') + @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') + @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') + def test_copy_image_to_encrypted_volume_failed_fetch( + self, + mock_detach_encryptor, mock_attach_encryptor, + mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, + mock_get_connector_properties): + properties = {} + volume = tests_utils.create_volume( + self.context, status='available', + size=2, + encryption_key_id=fake.ENCRYPTION_KEY_ID) + volume_id = volume['id'] + volume = db.volume_get(context.get_admin_context(), volume_id) + image_service = fake_image.FakeImageService() + local_path = 'dev/sda' + attach_info = {'device': {'path': local_path}, + 'conn': {'driver_volume_type': 'iscsi', + 'data': {}, }} + + mock_get_connector_properties.return_value = properties + mock_attach_volume.return_value = [attach_info, volume] + raised_exception = exception.ImageUnacceptable(reason='fake', + image_id=fake.IMAGE_ID) + mock_fetch_to_raw.side_effect = raised_exception + + encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} + self.assertRaises(exception.ImageUnacceptable, + self.volume.driver.copy_image_to_encrypted_volume, + self.context, volume, image_service, fake.IMAGE_ID) + + mock_attach_volume.assert_called_once_with( + self.context, volume, properties) + mock_attach_encryptor.assert_called_once_with( + self.context, attach_info, encryption) + mock_fetch_to_raw.assert_called_once_with( + self.context, image_service, fake.IMAGE_ID, + local_path, '1M', size=2) + mock_detach_encryptor.assert_called_once_with( + attach_info, encryption) + mock_detach_volume.assert_called_once_with( + self.context, attach_info, volume, properties) + class FibreChannelTestCase(DriverTestCase): """Test Case for FibreChannelDriver.""" @@ -6373,7 +6580,7 @@ class ImageVolumeCacheTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, **volume_params) with mock.patch.object( - volume_api.key_manager, 'delete_key') as key_del_mock: + volume_api.key_manager, 'delete') as key_del_mock: key_del_mock.side_effect = Exception("Key not found") volume_api.delete(self.context, volume) @@ -6384,8 +6591,6 @@ class DiscardFlagTestCase(BaseVolumeTestCase): def setUp(self): super(DiscardFlagTestCase, self).setUp() self.volume.driver = mock.MagicMock() - self.mock_db = mock.MagicMock() - self.volume.db = self.mock_db @ddt.data(dict(config_discard_flag=True, driver_discard_flag=None, @@ -6413,15 +6618,6 @@ class DiscardFlagTestCase(BaseVolumeTestCase): config_discard_flag, driver_discard_flag, expected_flag): - volume_properties = {'volume_type_id': None} - - def _get_item(key): - return volume_properties[key] - - mock_volume = mock.MagicMock() - mock_volume.__getitem__.side_effect = _get_item - self.mock_db.volume_get.return_value = mock_volume - self.mock_db.volume_update.return_value = mock_volume self.volume.driver.create_export.return_value = None connector = {'ip': 'IP', 'initiator': 'INITIATOR'} @@ -6444,7 +6640,13 @@ class DiscardFlagTestCase(BaseVolumeTestCase): self.volume.driver.configuration.safe_get.side_effect = _safe_get - conn_info = self.volume.initialize_connection(self.context, 'id', - connector) + with mock.patch.object(objects, 'Volume') as mock_vol: + volume = tests_utils.create_volume(self.context) + volume.volume_type_id = None + mock_vol.get_by_id.return_value = volume + + conn_info = self.volume.initialize_connection(self.context, + volume.id, + connector) self.assertEqual(expected_flag, conn_info['data'].get('discard')) diff --git a/cinder/tests/unit/test_volume_rpcapi.py b/cinder/tests/unit/test_volume_rpcapi.py index 4b3d9d858..2202cb707 100644 --- a/cinder/tests/unit/test_volume_rpcapi.py +++ b/cinder/tests/unit/test_volume_rpcapi.py @@ -22,6 +22,7 @@ import ddt from oslo_config import cfg from oslo_serialization import jsonutils +from cinder.common import constants from cinder import context from cinder import db from cinder import objects @@ -72,33 +73,57 @@ class VolumeRpcAPITestCase(test.TestCase): self.context, consistencygroup_id=source_group.id) - group = tests_utils.create_consistencygroup( + cg = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', cgsnapshot_id=cgsnapshot.id) - group2 = tests_utils.create_consistencygroup( + cg2 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', source_cgid=source_group.id) - group = objects.ConsistencyGroup.get_by_id(self.context, group.id) - group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) + generic_group = tests_utils.create_group( + self.context, + availability_zone=CONF.storage_availability_zone, + group_type_id='group_type1', + host='fakehost@fakedrv#fakepool') + + group_snapshot = tests_utils.create_group_snapshot( + self.context, + group_id=generic_group.id, + group_type_id='group_type1') + + cg = objects.ConsistencyGroup.get_by_id(self.context, cg.id) + cg2 = objects.ConsistencyGroup.get_by_id(self.context, cg2.id) cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = snapshot self.fake_reservations = ["RESERVATION"] - self.fake_cg = group - self.fake_cg2 = group2 + self.fake_cg = cg + self.fake_cg2 = cg2 self.fake_src_cg = jsonutils.to_primitive(source_group) self.fake_cgsnap = cgsnapshot self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) + self.fake_group = generic_group + self.fake_group_snapshot = group_snapshot + + self.addCleanup(self._cleanup) + + def _cleanup(self): + self.fake_snapshot.destroy() + self.fake_volume_obj.destroy() + self.fake_group_snapshot.destroy() + self.fake_group.destroy() + self.fake_cgsnap.destroy() + self.fake_cg2.destroy() + self.fake_cg.destroy() def test_serialized_volume_has_id(self): self.assertIn('id', self.fake_volume) @@ -172,7 +197,7 @@ class VolumeRpcAPITestCase(test.TestCase): host = kwargs['cgsnapshot'].consistencygroup.host target['server'] = utils.extract_host(host) - target['topic'] = '%s.%s' % (CONF.volume_topic, host) + target['topic'] = '%s.%s' % (constants.VOLUME_TOPIC, host) self.fake_args = None self.fake_kwargs = None @@ -188,8 +213,8 @@ class VolumeRpcAPITestCase(test.TestCase): if expected_retval: return expected_retval - self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method) - self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method) + self.mock_object(rpcapi.client, "prepare", _fake_prepare_method) + self.mock_object(rpcapi.client, rpc_method, _fake_rpc_method) retval = getattr(rpcapi, method)(ctxt, **kwargs) @@ -223,6 +248,80 @@ class VolumeRpcAPITestCase(test.TestCase): else: self.assertEqual(expected_msg[kwarg], value) + def _test_group_api(self, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + + if 'rpcapi_class' in kwargs: + rpcapi_class = kwargs['rpcapi_class'] + del kwargs['rpcapi_class'] + else: + rpcapi_class = volume_rpcapi.VolumeAPI + rpcapi = rpcapi_class() + expected_retval = 'foo' if method == 'call' else None + + target = { + "version": kwargs.pop('version', rpcapi.RPC_API_VERSION) + } + + if 'request_spec' in kwargs: + spec = jsonutils.to_primitive(kwargs['request_spec']) + kwargs['request_spec'] = spec + + expected_msg = copy.deepcopy(kwargs) + if 'host' in expected_msg: + del expected_msg['host'] + if 'group_snapshot' in expected_msg: + group_snapshot = expected_msg['group_snapshot'] + if group_snapshot: + group_snapshot.group + kwargs['group_snapshot'].group + + if 'host' in kwargs: + host = kwargs['host'] + elif 'group' in kwargs: + host = kwargs['group']['host'] + elif 'group_snapshot' in kwargs: + host = kwargs['group_snapshot'].group.host + + target['server'] = utils.extract_host(host) + target['topic'] = '%s.%s' % (constants.VOLUME_TOPIC, host) + + self.fake_args = None + self.fake_kwargs = None + + def _fake_prepare_method(*args, **kwds): + for kwd in kwds: + self.assertEqual(kwds[kwd], target[kwd]) + return rpcapi.client + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method) + self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + self.assertEqual(expected_retval, retval) + expected_args = [ctxt, method] + + for arg, expected_arg in zip(self.fake_args, expected_args): + self.assertEqual(expected_arg, arg) + + for kwarg, value in self.fake_kwargs.items(): + if isinstance(value, objects.Group): + expected_group = expected_msg[kwarg].obj_to_primitive() + group = value.obj_to_primitive() + self.assertEqual(expected_group, group) + elif isinstance(value, objects.GroupSnapshot): + expected_grp_snap = expected_msg[kwarg].obj_to_primitive() + grp_snap = value.obj_to_primitive() + self.assertEqual(expected_grp_snap, grp_snap) + else: + self.assertEqual(expected_msg[kwarg], value) + def test_create_consistencygroup(self): self._test_volume_api('create_consistencygroup', rpc_method='cast', group=self.fake_cg, host='fake_host1', @@ -245,7 +344,8 @@ class VolumeRpcAPITestCase(test.TestCase): self._test_volume_api('delete_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='2.0') - def test_create_volume(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) + def test_create_volume(self, can_send_version): self._test_volume_api('create_volume', rpc_method='cast', volume=self.fake_volume_obj, @@ -253,9 +353,12 @@ class VolumeRpcAPITestCase(test.TestCase): request_spec='fake_request_spec', filter_properties='fake_properties', allow_reschedule=True, - version='2.0') + version='2.4') + can_send_version.assert_has_calls([mock.call('2.4')]) - def test_create_volume_serialization(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', + return_value=False) + def test_create_volume_serialization(self, can_send_version): request_spec = {"metadata": self.fake_volume_metadata} self._test_volume_api('create_volume', rpc_method='cast', @@ -265,6 +368,7 @@ class VolumeRpcAPITestCase(test.TestCase): filter_properties='fake_properties', allow_reschedule=True, version='2.0') + can_send_version.assert_has_calls([mock.call('2.4')]) def test_delete_volume(self): self._test_volume_api('delete_volume', @@ -341,10 +445,18 @@ class VolumeRpcAPITestCase(test.TestCase): 'disk_format': 'fake_type'}, version='2.0') - def test_initialize_connection(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) + def test_initialize_connection(self, mock_can_send_version): self._test_volume_api('initialize_connection', rpc_method='call', - volume=self.fake_volume, + volume=self.fake_volume_obj, + connector='fake_connector', + version='2.3') + + mock_can_send_version.return_value = False + self._test_volume_api('initialize_connection', + rpc_method='call', + volume=self.fake_volume_obj, connector='fake_connector', version='2.0') @@ -511,3 +623,34 @@ class VolumeRpcAPITestCase(test.TestCase): rpc_method='call', volume=self.fake_volume_obj, version='2.0') + + def test_create_group(self): + self._test_group_api('create_group', rpc_method='cast', + group=self.fake_group, host='fake_host1', + version='2.5') + + def test_delete_group(self): + self._test_group_api('delete_group', rpc_method='cast', + group=self.fake_group, version='2.5') + + def test_update_group(self): + self._test_group_api('update_group', rpc_method='cast', + group=self.fake_group, add_volumes=['vol1'], + remove_volumes=['vol2'], version='2.5') + + def test_create_group_from_src(self): + self._test_group_api('create_group_from_src', rpc_method='cast', + group=self.fake_group, + group_snapshot=self.fake_group_snapshot, + source_group=None, + version='2.6') + + def test_create_group_snapshot(self): + self._test_group_api('create_group_snapshot', rpc_method='cast', + group_snapshot=self.fake_group_snapshot, + version='2.6') + + def test_delete_group_snapshot(self): + self._test_group_api('delete_group_snapshot', rpc_method='cast', + group_snapshot=self.fake_group_snapshot, + version='2.6') diff --git a/cinder/tests/unit/test_volume_types.py b/cinder/tests/unit/test_volume_types.py index 677c87ac3..6ad216b61 100644 --- a/cinder/tests/unit/test_volume_types.py +++ b/cinder/tests/unit/test_volume_types.py @@ -511,3 +511,46 @@ class VolumeTypeTestCase(test.TestCase): volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(self.ctxt, 'type-test') self.assertIsNotNone(vtype.get('extra_specs', None)) + + @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') + def _exec_volume_types_encryption_changed(self, enc1, enc2, + expected_result, + mock_get_encryption): + def _get_encryption(ctxt, type_id): + if enc1 and enc1['volume_type_id'] == type_id: + return enc1 + if enc2 and enc2['volume_type_id'] == type_id: + return enc2 + return None + + mock_get_encryption.side_effect = _get_encryption + actual_result = volume_types.volume_types_encryption_changed( + self.ctxt, fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID) + self.assertEqual(expected_result, actual_result) + + def test_volume_types_encryption_changed(self): + enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, + 'cipher': 'fake', + 'created_at': 'time1', } + enc2 = {'volume_type_id': fake.VOLUME_TYPE2_ID, + 'cipher': 'fake', + 'created_at': 'time2', } + self._exec_volume_types_encryption_changed(enc1, enc2, False) + + def test_volume_types_encryption_changed2(self): + enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, + 'cipher': 'fake1', + 'created_at': 'time1', } + enc2 = {'volume_type_id': fake.VOLUME_TYPE2_ID, + 'cipher': 'fake2', + 'created_at': 'time1', } + self._exec_volume_types_encryption_changed(enc1, enc2, True) + + def test_volume_types_encryption_changed3(self): + self._exec_volume_types_encryption_changed(None, None, False) + + def test_volume_types_encryption_changed4(self): + enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, + 'cipher': 'fake1', + 'created_at': 'time1', } + self._exec_volume_types_encryption_changed(enc1, None, True) diff --git a/cinder/tests/unit/test_volume_utils.py b/cinder/tests/unit/test_volume_utils.py index 30e9a7df6..91966e48f 100644 --- a/cinder/tests/unit/test_volume_utils.py +++ b/cinder/tests/unit/test_volume_utils.py @@ -25,8 +25,10 @@ from oslo_concurrency import processutils from oslo_config import cfg from cinder import context +from cinder import db from cinder.db.sqlalchemy import models from cinder import exception +from cinder import keymgr from cinder.objects import fields from cinder import test from cinder.tests.unit.backup import fake_backup @@ -36,6 +38,8 @@ from cinder.tests.unit import fake_volume from cinder import utils from cinder.volume import throttling from cinder.volume import utils as volume_utils +from cinder.volume import volume_types + CONF = cfg.CONF @@ -437,24 +441,33 @@ class ClearVolumeTestCase(test.TestCase): @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_shred(self, mock_conf, mock_exec): + # 'shred' now uses 'dd'. Remove this test when + # support for 'volume_clear=shred' is removed. mock_conf.volume_clear = 'shred' mock_conf.volume_clear_size = 1 mock_conf.volume_clear_ionice = None + mock_conf.volume_dd_blocksize = '1M' output = volume_utils.clear_volume(1024, 'volume_path') self.assertIsNone(output) - mock_exec.assert_called_once_with( - 'shred', '-n3', '-s1MiB', "volume_path", run_as_root=True) + mock_exec.assert_called_with( + 'dd', 'if=/dev/zero', 'of=volume_path', 'count=1', 'bs=1M', + 'oflag=direct', run_as_root=True) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_shred_not_clear_size(self, mock_conf, mock_exec): + # 'shred' now uses 'dd'. Remove this test when + # support for 'volume_clear=shred' is removed. mock_conf.volume_clear = 'shred' mock_conf.volume_clear_size = None mock_conf.volume_clear_ionice = None + mock_conf.volume_dd_blocksize = '1M' + mock_conf.volume_clear_size = 1 output = volume_utils.clear_volume(1024, 'volume_path') self.assertIsNone(output) - mock_exec.assert_called_once_with( - 'shred', '-n3', "volume_path", run_as_root=True) + mock_exec.assert_called_with( + 'dd', 'if=/dev/zero', 'of=volume_path', 'count=1', 'bs=1M', + 'oflag=direct', run_as_root=True) @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_invalid_opt(self, mock_conf): @@ -758,52 +771,116 @@ class VolumeUtilsTestCase(test.TestCase): host_2 = 'fake_host2@backend1' self.assertFalse(volume_utils.hosts_are_equivalent(host_1, host_2)) + @mock.patch('cinder.volume.utils.CONF') + def test_extract_id_from_volume_name_vol_id_pattern(self, conf_mock): + conf_mock.volume_name_template = 'volume-%s' + vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' + vol_name = conf_mock.volume_name_template % vol_id + result = volume_utils.extract_id_from_volume_name(vol_name) + self.assertEqual(vol_id, result) + + @mock.patch('cinder.volume.utils.CONF') + def test_extract_id_from_volume_name_vol_id_vol_pattern(self, conf_mock): + conf_mock.volume_name_template = 'volume-%s-volume' + vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' + vol_name = conf_mock.volume_name_template % vol_id + result = volume_utils.extract_id_from_volume_name(vol_name) + self.assertEqual(vol_id, result) + + @mock.patch('cinder.volume.utils.CONF') + def test_extract_id_from_volume_name_id_vol_pattern(self, conf_mock): + conf_mock.volume_name_template = '%s-volume' + vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' + vol_name = conf_mock.volume_name_template % vol_id + result = volume_utils.extract_id_from_volume_name(vol_name) + self.assertEqual(vol_id, result) + + @mock.patch('cinder.volume.utils.CONF') + def test_extract_id_from_volume_name_no_match(self, conf_mock): + conf_mock.volume_name_template = '%s-volume' + vol_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' + result = volume_utils.extract_id_from_volume_name(vol_name) + self.assertIsNone(result) + vol_name = 'blahblahblah' + result = volume_utils.extract_id_from_volume_name(vol_name) + self.assertIsNone(result) + @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) def test_check_managed_volume_already_managed(self, exists_mock): id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_id = 'volume-' + id_ - result = volume_utils.check_already_managed_volume(vol_id) - self.assertTrue(result) - exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) - - @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) - def test_check_already_managed_with_vol_id_vol_pattern(self, exists_mock): - template = 'volume-%s-volume' - self.override_config('volume_name_template', template) - id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_id = template % id_ - - result = volume_utils.check_already_managed_volume(vol_id) - self.assertTrue(result) - exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) - - @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) - def test_check_already_managed_with_id_vol_pattern(self, exists_mock): - template = '%s-volume' - self.override_config('volume_name_template', template) - id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_id = template % id_ - - result = volume_utils.check_already_managed_volume(vol_id) + result = volume_utils.check_already_managed_volume(id_) self.assertTrue(result) exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=False) - def test_check_managed_volume_not_managed_cinder_like_name(self, - exists_mock): + def test_check_managed_volume_not_managed_proper_uuid(self, exists_mock): id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_id = 'volume-' + id_ - result = volume_utils.check_already_managed_volume(vol_id) + result = volume_utils.check_already_managed_volume(id_) self.assertFalse(result) exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) - def test_check_managed_volume_not_managed(self): - result = volume_utils.check_already_managed_volume('test-volume') + def test_check_managed_volume_not_managed_invalid_id(self): + result = volume_utils.check_already_managed_volume(1) + self.assertFalse(result) + result = volume_utils.check_already_managed_volume('not-a-uuid') self.assertFalse(result) - def test_check_managed_volume_not_managed_id_like_uuid(self): - result = volume_utils.check_already_managed_volume('volume-d8cd1fe') - self.assertFalse(result) + @mock.patch('cinder.volume.utils.CONF') + def test_extract_id_from_snapshot_name(self, conf_mock): + conf_mock.snapshot_name_template = '%s-snapshot' + snap_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' + snap_name = conf_mock.snapshot_name_template % snap_id + result = volume_utils.extract_id_from_snapshot_name(snap_name) + self.assertEqual(snap_id, result) + + @mock.patch('cinder.volume.utils.CONF') + def test_extract_id_from_snapshot_name_no_match(self, conf_mock): + conf_mock.snapshot_name_template = '%s-snapshot' + snap_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' + result = volume_utils.extract_id_from_snapshot_name(snap_name) + self.assertIsNone(result) + snap_name = 'blahblahblah' + result = volume_utils.extract_id_from_snapshot_name(snap_name) + self.assertIsNone(result) + + def test_paginate_entries_list_with_marker(self): + entries = [{'reference': {'name': 'vol03'}, 'size': 1}, + {'reference': {'name': 'vol01'}, 'size': 3}, + {'reference': {'name': 'vol02'}, 'size': 3}, + {'reference': {'name': 'vol04'}, 'size': 2}, + {'reference': {'name': 'vol06'}, 'size': 3}, + {'reference': {'name': 'vol07'}, 'size': 1}, + {'reference': {'name': 'vol05'}, 'size': 1}] + expected = [{'reference': {'name': 'vol04'}, 'size': 2}, + {'reference': {'name': 'vol03'}, 'size': 1}, + {'reference': {'name': 'vol05'}, 'size': 1}] + res = volume_utils.paginate_entries_list(entries, {'name': 'vol02'}, 3, + 1, ['size', 'reference'], + ['desc', 'asc']) + self.assertEqual(expected, res) + + def test_paginate_entries_list_without_marker(self): + entries = [{'reference': {'name': 'vol03'}, 'size': 1}, + {'reference': {'name': 'vol01'}, 'size': 3}, + {'reference': {'name': 'vol02'}, 'size': 3}, + {'reference': {'name': 'vol04'}, 'size': 2}, + {'reference': {'name': 'vol06'}, 'size': 3}, + {'reference': {'name': 'vol07'}, 'size': 1}, + {'reference': {'name': 'vol05'}, 'size': 1}] + expected = [{'reference': {'name': 'vol07'}, 'size': 1}, + {'reference': {'name': 'vol06'}, 'size': 3}, + {'reference': {'name': 'vol05'}, 'size': 1}] + res = volume_utils.paginate_entries_list(entries, None, 3, None, + ['reference'], ['desc']) + self.assertEqual(expected, res) + + def test_paginate_entries_list_marker_not_found(self): + entries = [{'reference': {'name': 'vol03'}, 'size': 1}, + {'reference': {'name': 'vol01'}, 'size': 3}] + self.assertRaises(exception.InvalidInput, + volume_utils.paginate_entries_list, + entries, {'name': 'vol02'}, 3, None, + ['size', 'reference'], ['desc', 'asc']) def test_convert_config_string_to_dict(self): test_string = "{'key-1'='val-1' 'key-2'='val-2' 'key-3'='val-3'}" @@ -812,3 +889,43 @@ class VolumeUtilsTestCase(test.TestCase): self.assertEqual( expected_dict, volume_utils.convert_config_string_to_dict(test_string)) + + @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=False) + def test_create_encryption_key_unencrypted(self, is_encrypted): + result = volume_utils.create_encryption_key(mock.ANY, + mock.ANY, + fake.VOLUME_TYPE_ID) + self.assertIsNone(result) + + @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=True) + @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') + @mock.patch('cinder.keymgr.conf_key_mgr.ConfKeyManager.create_key') + def test_create_encryption_key_encrypted(self, create_key, + get_volume_type_encryption, + is_encryption): + enc_key = {'cipher': 'aes-xts-plain64', + 'key_size': 256, + 'provider': 'p1', + 'control_location': 'front-end', + 'encryption_id': 'uuid1'} + ctxt = context.get_admin_context() + type_ref1 = volume_types.create(ctxt, "type1") + encryption = db.volume_type_encryption_create( + ctxt, type_ref1['id'], enc_key) + get_volume_type_encryption.return_value = encryption + CONF.set_override( + 'api_class', + 'cinder.keymgr.conf_key_mgr.ConfKeyManager', + group='key_manager') + key_manager = keymgr.API() + volume_utils.create_encryption_key(ctxt, + key_manager, + fake.VOLUME_TYPE_ID) + is_encryption.assert_called_once_with(ctxt, + fake.VOLUME_TYPE_ID) + get_volume_type_encryption.assert_called_once_with( + ctxt, + fake.VOLUME_TYPE_ID) + create_key.assert_called_once_with(ctxt, + algorithm='aes', + length=256) diff --git a/cinder/tests/unit/utils.py b/cinder/tests/unit/utils.py index 5149e0d4a..0309cc94c 100644 --- a/cinder/tests/unit/utils.py +++ b/cinder/tests/unit/utils.py @@ -47,6 +47,7 @@ def create_volume(ctxt, replication_extended_status=None, replication_driver_data=None, consistencygroup_id=None, + group_id=None, previous_status=None, testcase_instance=None, **kwargs): @@ -65,6 +66,8 @@ def create_volume(ctxt, vol['availability_zone'] = availability_zone if consistencygroup_id: vol['consistencygroup_id'] = consistencygroup_id + if group_id: + vol['group_id'] = group_id if volume_type_id: vol['volume_type_id'] = volume_type_id for key in kwargs: @@ -166,6 +169,38 @@ def create_consistencygroup(ctxt, return cg +def create_group(ctxt, + host='test_host@fakedrv#fakepool', + name='test_group', + description='this is a test group', + status=fields.GroupStatus.AVAILABLE, + availability_zone='fake_az', + group_type_id=None, + volume_type_ids=None, + **kwargs): + """Create a group object in the DB.""" + + grp = objects.Group(ctxt) + grp.host = host + grp.user_id = ctxt.user_id or fake.USER_ID + grp.project_id = ctxt.project_id or fake.PROJECT_ID + grp.status = status + grp.name = name + grp.description = description + grp.availability_zone = availability_zone + if group_type_id: + grp.group_type_id = group_type_id + if volume_type_ids: + grp.volume_type_ids = volume_type_ids + new_id = kwargs.pop('id', None) + grp.update(kwargs) + grp.create() + if new_id and new_id != grp.id: + db.group_update(ctxt, grp.id, {'id': new_id}) + grp = objects.Group.get_by_id(ctxt, new_id) + return grp + + def create_cgsnapshot(ctxt, consistencygroup_id, name='test_cgsnapshot', @@ -207,8 +242,55 @@ def create_cgsnapshot(ctxt, return objects.CGSnapshot.get_by_id(ctxt, cgsnap.id) +def create_group_snapshot(ctxt, + group_id, + group_type_id=None, + name='test_group_snapshot', + description='this is a test group snapshot', + status='creating', + recursive_create_if_needed=True, + return_vo=True, + **kwargs): + """Create a group snapshot object in the DB.""" + values = { + 'user_id': ctxt.user_id or fake.USER_ID, + 'project_id': ctxt.project_id or fake.PROJECT_ID, + 'status': status, + 'name': name, + 'description': description, + 'group_id': group_id} + values.update(kwargs) + + if recursive_create_if_needed and group_id: + create_grp = False + try: + objects.Group.get_by_id(ctxt, + group_id) + create_vol = not db.volume_get_all_by_generic_group( + ctxt, group_id) + except exception.GroupNotFound: + create_grp = True + create_vol = True + if create_grp: + create_group(ctxt, id=group_id, group_type_id=group_type_id) + if create_vol: + create_volume(ctxt, group_id=group_id) + + if not return_vo: + return db.group_snapshot_create(ctxt, values) + else: + group_snapshot = objects.GroupSnapshot(ctxt) + new_id = values.pop('id', None) + group_snapshot.update(values) + group_snapshot.create() + if new_id and new_id != group_snapshot.id: + db.group_snapshot_update(ctxt, group_snapshot.id, {'id': new_id}) + group_snapshot = objects.GroupSnapshot.get_by_id(ctxt, new_id) + return group_snapshot + + def create_backup(ctxt, - volume_id, + volume_id=fake.VOLUME_ID, display_name='test_backup', display_description='This is a test backup', status=fields.BackupStatus.CREATING, @@ -216,27 +298,32 @@ def create_backup(ctxt, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, - data_timestamp=None): - backup = {} - backup['volume_id'] = volume_id - backup['user_id'] = ctxt.user_id - backup['project_id'] = ctxt.project_id - backup['host'] = socket.gethostname() - backup['availability_zone'] = '1' - backup['display_name'] = display_name - backup['display_description'] = display_description - backup['container'] = 'fake' - backup['status'] = status - backup['fail_reason'] = '' - backup['service'] = 'fake' - backup['parent_id'] = parent_id - backup['size'] = 5 * 1024 * 1024 - backup['object_count'] = 22 - backup['temp_volume_id'] = temp_volume_id - backup['temp_snapshot_id'] = temp_snapshot_id - backup['snapshot_id'] = snapshot_id - backup['data_timestamp'] = data_timestamp - return db.backup_create(ctxt, backup) + data_timestamp=None, + **kwargs): + """Create a backup object.""" + values = { + 'user_id': ctxt.user_id or fake.USER_ID, + 'project_id': ctxt.project_id or fake.PROJECT_ID, + 'volume_id': volume_id, + 'status': status, + 'display_name': display_name, + 'display_description': display_description, + 'container': 'fake', + 'availability_zone': 'fake', + 'service': 'fake', + 'size': 5 * 1024 * 1024, + 'object_count': 22, + 'host': socket.gethostname(), + 'parent_id': parent_id, + 'temp_volume_id': temp_volume_id, + 'temp_snapshot_id': temp_snapshot_id, + 'snapshot_id': snapshot_id, + 'data_timestamp': data_timestamp, } + + values.update(kwargs) + backup = objects.Backup(ctxt, **values) + backup.create() + return backup def create_message(ctxt, diff --git a/cinder/tests/unit/volume/drivers/dell/__init__.py b/cinder/tests/unit/volume/drivers/dell/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_dellfc.py b/cinder/tests/unit/volume/drivers/dell/test_dellfc.py similarity index 69% rename from cinder/tests/unit/test_dellfc.py rename to cinder/tests/unit/volume/drivers/dell/test_dellfc.py index 4faa37efc..0362d8797 100644 --- a/cinder/tests/unit/test_dellfc.py +++ b/cinder/tests/unit/volume/drivers/dell/test_dellfc.py @@ -176,7 +176,7 @@ class DellSCSanFCDriverTestCase(test.TestCase): 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - 'create_server_multiple_hbas', + 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', @@ -222,7 +222,74 @@ class DellSCSanFCDriverTestCase(test.TestCase): self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None) + mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) + mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_server', + return_value=SCSERVER) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_volume', + return_value=VOLUME) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'get_volume', + return_value=VOLUME) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'map_volume', + return_value=MAPPING) + @mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver, + '_is_live_vol') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_wwns') + @mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver, + 'initialize_secondary') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'get_live_volume') + def test_initialize_connection_live_vol(self, + mock_get_live_volume, + mock_initialize_secondary, + mock_find_wwns, + mock_is_live_volume, + mock_map_volume, + mock_get_volume, + mock_find_volume, + mock_find_server, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID} + connector = self.connector + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_is_live_volume.return_value = True + mock_find_wwns.return_value = ( + 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], + {u'21000024FF30441C': [u'5000D31000FCBE35'], + u'21000024FF30441D': [u'5000D31000FCBE3D']}) + mock_initialize_secondary.return_value = ( + 1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'], + {u'21000024FF30441E': [u'5000D31000FCBE36'], + u'21000024FF30441F': [u'5000D31000FCBE3E']}) + mock_get_live_volume.return_value = (sclivevol, False) + res = self.driver.initialize_connection(volume, connector) + expected = {'data': + {'discard': True, + 'initiator_target_map': + {u'21000024FF30441C': [u'5000D31000FCBE35'], + u'21000024FF30441D': [u'5000D31000FCBE3D'], + u'21000024FF30441E': [u'5000D31000FCBE36'], + u'21000024FF30441F': [u'5000D31000FCBE3E']}, + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35', + u'5000D31000FCBE3E', u'5000D31000FCBE36']}, + 'driver_volume_type': 'fibre_channel'} + + self.assertEqual(expected, res, 'Unexpected return data') + # verify find_volume has been called and that is has been called twice + mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True) mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, @@ -260,7 +327,7 @@ class DellSCSanFCDriverTestCase(test.TestCase): 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - 'create_server_multiple_hbas', + 'create_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', @@ -342,6 +409,101 @@ class DellSCSanFCDriverTestCase(test.TestCase): volume, connector) + def test_initialize_secondary(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock( + return_value=self.VOLUME) + find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], + {u'21000024FF30441C': [u'5000D31000FCBE35'], + u'21000024FF30441D': [u'5000D31000FCBE3D']}) + mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) + mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) + ret = self.driver.initialize_secondary(mock_api, sclivevol, + ['wwn1', 'wwn2']) + + self.assertEqual(find_wwns_ret, ret) + + def test_initialize_secondary_create_server(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=None) + mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock( + return_value=self.VOLUME) + find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], + {u'21000024FF30441C': [u'5000D31000FCBE35'], + u'21000024FF30441D': [u'5000D31000FCBE3D']}) + mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) + mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) + ret = self.driver.initialize_secondary(mock_api, sclivevol, + ['wwn1', 'wwn2']) + self.assertEqual(find_wwns_ret, ret) + + def test_initialize_secondary_no_server(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=None) + mock_api.create_server = mock.MagicMock(return_value=None) + ret = self.driver.initialize_secondary(mock_api, sclivevol, + ['wwn1', 'wwn2']) + expected = (None, [], {}) + self.assertEqual(expected, ret) + + def test_initialize_secondary_map_fail(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock(return_value=None) + ret = self.driver.initialize_secondary(mock_api, sclivevol, + ['wwn1', 'wwn2']) + expected = (None, [], {}) + self.assertEqual(expected, ret) + + def test_initialize_secondary_vol_not_found(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock( + return_value=self.VOLUME) + mock_api.get_volume = mock.MagicMock(return_value=None) + ret = self.driver.initialize_secondary(mock_api, sclivevol, + ['wwn1', 'wwn2']) + expected = (None, [], {}) + self.assertEqual(expected, ret) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @@ -380,6 +542,56 @@ class DellSCSanFCDriverTestCase(test.TestCase): 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_server', + return_value=SCSERVER) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_volume', + return_value=VOLUME) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'unmap_volume', + return_value=True) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_wwns', + return_value=(1, + [u'5000D31000FCBE3D', + u'5000D31000FCBE35'], + {u'21000024FF30441C': + [u'5000D31000FCBE35'], + u'21000024FF30441D': + [u'5000D31000FCBE3D']})) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'get_volume_count', + return_value=1) + @mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver, + '_is_live_vol') + @mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver, + 'terminate_secondary') + def test_terminate_connection_live_vol(self, + mock_terminate_secondary, + mock_is_live_vol, + mock_get_volume_count, + mock_find_wwns, + mock_unmap_volume, + mock_find_volume, + mock_find_server, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID} + connector = self.connector + mock_terminate_secondary.return_value = (None, [], {}) + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_is_live_vol.return_value = sclivevol + res = self.driver.terminate_connection(volume, connector) + mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) + expected = {'driver_volume_type': 'fibre_channel', + 'data': {}} + self.assertEqual(expected, res, 'Unexpected return data') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @@ -482,10 +694,6 @@ class DellSCSanFCDriverTestCase(test.TestCase): mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector - # self.assertRaises(exception.VolumeBackendAPIException, - # self.driver.terminate_connection, - # volume, - # connector) res = self.driver.terminate_connection(volume, connector) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} @@ -572,6 +780,24 @@ class DellSCSanFCDriverTestCase(test.TestCase): 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') + def test_terminate_secondary(self, + mock_close_connection, + mock_open_connection, + mock_init): + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) + mock_api.find_wwns = mock.MagicMock(return_value=(None, [], {})) + mock_api.unmap_volume = mock.MagicMock(return_value=True) + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + ret = self.driver.terminate_secondary(mock_api, sclivevol, + ['wwn1', 'wwn2']) + expected = (None, [], {}) + self.assertEqual(expected, ret) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) diff --git a/cinder/tests/unit/test_dellsc.py b/cinder/tests/unit/volume/drivers/dell/test_dellsc.py similarity index 80% rename from cinder/tests/unit/test_dellsc.py rename to cinder/tests/unit/volume/drivers/dell/test_dellsc.py index 94c481de1..c91ec851e 100644 --- a/cinder/tests/unit/test_dellsc.py +++ b/cinder/tests/unit/volume/drivers/dell/test_dellsc.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import eventlet import mock import uuid @@ -193,6 +194,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): IQN = 'iqn.2002-03.com.compellent:5000D31000000001' ISCSI_PROPERTIES = {'access_mode': 'rw', + 'discard': True, 'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', @@ -244,6 +246,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.driver.backends = None self.driver.replication_enabled = False + self.mock_sleep = self.mock_object(eventlet, 'sleep') + self.volid = str(uuid.uuid4()) self.volume_name = "volume" + self.volid self.connector = { @@ -274,6 +278,54 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.fake_iqn) } + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_check_for_setup_error(self, + mock_find_sc, + mock_close_connection, + mock_open_connection, + mock_init): + # Fail, Fail due to repl partner not found, success. + mock_find_sc.side_effect = [exception.VolumeBackendAPIException(''), + 10000, + 12345, + exception.VolumeBackendAPIException(''), + 10000, + 12345, + 67890] + + # Find SC throws + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + # Replication enabled but one backend is down. + self.driver.replication_enabled = True + self.driver.backends = [{'target_device_id': '12345', + 'managed_backend_name': 'host@dell1', + 'qosnode': 'cinderqos'}, + {'target_device_id': '67890', + 'managed_backend_name': 'host@dell2', + 'qosnode': 'otherqos'}] + self.assertRaises(exception.InvalidHost, + self.driver.check_for_setup_error) + # Good run. Should run without exceptions. + self.driver.check_for_setup_error() + # failed over run + mock_find_sc.side_effect = None + mock_find_sc.reset_mock() + mock_find_sc.return_value = 10000 + self.driver.failed_over = True + self.driver.check_for_setup_error() + # find sc should be called exactly once + mock_find_sc.assert_called_once_with() + # No repl run + mock_find_sc.reset_mock() + mock_find_sc.return_value = 10000 + self.driver.failed_over = False + self.driver.replication_enabled = False + self.driver.backends = None + self.driver.check_for_setup_error() + mock_find_sc.assert_called_once_with() + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_volume_extra_specs') def test__create_replications(self, @@ -358,6 +410,71 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.assertEqual({}, res) self.driver.backends = backends + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_volume_extra_specs') + def test__create_replications_live_volume(self, + mock_get_volume_extra_specs, + mock_close_connection, + mock_open_connection, + mock_init): + backends = self.driver.backends + model_update = {'replication_status': 'enabled', + 'replication_driver_data': '12345'} + + vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} + scvol = {'name': fake.VOLUME_ID} + + mock_api = mock.MagicMock() + mock_api.create_live_volume = mock.MagicMock( + return_value={'instanceId': '1'}) + # Live volume with two backends defined. + self.driver.backends = [{'target_device_id': '12345', + 'managed_backend_name': 'host@dell1', + 'qosnode': 'cinderqos', + 'remoteqos': 'remoteqos'}, + {'target_device_id': '67890', + 'managed_backend_name': 'host@dell2', + 'qosnode': 'otherqos', + 'remoteqos': 'remoteqos'}] + mock_get_volume_extra_specs.return_value = { + 'replication:activereplay': ' True', + 'replication_enabled': ' True', + 'replication:livevolume': ' True'} + self.assertRaises(exception.ReplicationError, + self.driver._create_replications, + mock_api, + vol, + scvol) + # Live volume + self.driver.backends = [{'target_device_id': '12345', + 'managed_backend_name': 'host@dell1', + 'qosnode': 'cinderqos', + 'diskfolder': 'ssd', + 'remoteqos': 'remoteqos'}] + res = self.driver._create_replications(mock_api, vol, scvol) + mock_api.create_live_volume.assert_called_once_with( + scvol, '12345', True, False, False, 'cinderqos', 'remoteqos') + self.assertEqual(model_update, res) + # Active replay False + mock_get_volume_extra_specs.return_value = { + 'replication_enabled': ' True', + 'replication:livevolume': ' True'} + res = self.driver._create_replications(mock_api, vol, scvol) + mock_api.create_live_volume.assert_called_with( + scvol, '12345', False, False, False, 'cinderqos', 'remoteqos') + self.assertEqual(model_update, res) + # Sync + mock_get_volume_extra_specs.return_value = { + 'replication_enabled': ' True', + 'replication:livevolume': ' True', + 'replication_type': ' sync'} + res = self.driver._create_replications(mock_api, vol, scvol) + mock_api.create_live_volume.assert_called_with( + scvol, '12345', False, True, False, 'cinderqos', 'remoteqos') + self.assertEqual(model_update, res) + + self.driver.backends = backends + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_volume_extra_specs') def test__delete_replications(self, @@ -388,6 +505,45 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): mock_api.delete_replication.assert_any_call(scvol, 67890) self.driver.backends = backends + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_volume_extra_specs') + def test__delete_live_volume(self, + mock_get_volume_extra_specs, + mock_close_connection, + mock_open_connection, + mock_init): + backends = self.driver.backends + vol = {'id': fake.VOLUME_ID} + mock_api = mock.MagicMock() + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api.get_live_volume = mock.MagicMock(return_value=(sclivevol, + False)) + # No replication driver data. + ret = self.driver._delete_live_volume(mock_api, vol) + self.assertFalse(ret) + # Bogus rdd + vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} + ret = self.driver._delete_live_volume(mock_api, vol) + self.assertFalse(ret) + # Valid delete. + mock_api.delete_live_volume = mock.MagicMock(return_value=True) + vol = {'id': fake.VOLUME_ID, 'replication_driver_data': '102'} + ret = self.driver._delete_live_volume(mock_api, vol) + self.assertTrue(ret) + # Wrong ssn. + vol = {'id': fake.VOLUME_ID, 'replication_driver_data': '103'} + ret = self.driver._delete_live_volume(mock_api, vol) + self.assertFalse(ret) + # No live volume found. + mock_api.get_live_volume.return_value = (None, False) + ret = self.driver._delete_live_volume(mock_api, vol) + self.assertFalse(ret) + + self.driver.backends = backends + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @@ -530,7 +686,12 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume', return_value=True) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs', + return_value={'enabled': True, + 'live': False}) def test_delete_volume(self, + mock_get_replication_specs, mock_delete_volume, mock_delete_replications, mock_close_connection, @@ -547,12 +708,38 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.assertTrue(mock_delete_replications.called) self.assertEqual(2, mock_delete_replications.call_count) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_delete_live_volume') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'delete_volume', + return_value=True) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs', + return_value={'enabled': True, + 'live': True}) + def test_delete_volume_live_volume(self, + mock_get_replication_specs, + mock_delete_volume, + mock_delete_live_volume, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID, 'provider_id': '1.1'} + self.driver.delete_volume(volume) + mock_delete_volume.assert_called_with(fake.VOLUME_ID, '1.1') + self.assertTrue(mock_delete_live_volume.called) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume', return_value=False) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs', + return_value={'enabled': True, + 'live': False}) def test_delete_volume_failure(self, + mock_get_replication_specs, mock_delete_volume, mock_delete_replications, mock_close_connection, @@ -644,7 +831,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): # verify find_volume has been called and that is has been called twice mock_find_volume.called_once_with(fake.VOLUME_ID, provider_id) mock_get_volume.called_once_with(provider_id) - props = self.ISCSI_PROPERTIES + props = self.ISCSI_PROPERTIES.copy() expected = {'data': props, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @@ -767,6 +954,231 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): volume, connector) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_server', + return_value=None) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'create_server', + return_value=SCSERVER) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_volume', + return_value=VOLUME) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'get_volume', + return_value=VOLUME) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'map_volume', + return_value=MAPPINGS[0]) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_iscsi_properties', + return_value=ISCSI_PROPERTIES) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_is_live_vol') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + 'initialize_secondary') + def test_initialize_connection_live_volume(self, + mock_initialize_secondary, + mock_is_live_vol, + mock_find_iscsi_props, + mock_map_volume, + mock_get_volume, + mock_find_volume, + mock_create_server, + mock_find_server, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID} + connector = self.connector + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_is_live_vol.return_value = sclivevol + lvol_properties = {'access_mode': 'rw', + 'target_discovered': False, + 'target_iqn': + u'iqn:1', + 'target_iqns': + [ + u'iqn:1', + u'iqn:2'], + 'target_lun': 1, + 'target_luns': [1, 1], + 'target_portal': u'192.168.1.21:3260', + 'target_portals': [u'192.168.1.21:3260', + u'192.168.1.22:3260']} + mock_initialize_secondary.return_value = lvol_properties + props = self.ISCSI_PROPERTIES.copy() + props['target_iqns'] += lvol_properties['target_iqns'] + props['target_luns'] += lvol_properties['target_luns'] + props['target_portals'] += lvol_properties['target_portals'] + ret = self.driver.initialize_connection(volume, connector) + expected = {'data': props, + 'driver_volume_type': 'iscsi'} + self.assertEqual(expected, ret) + + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs', + return_value={'enabled': True, 'live': True}) + def test_is_live_vol(self, + mock_get_replication_specs, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID, + 'provider_id': '101.1'} + ret = self.driver._is_live_vol(volume) + self.assertTrue(ret) + + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs', + return_value={'enabled': True, 'live': False}) + def test_is_live_vol_repl_not_live(self, + mock_get_replication_specs, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID} + ret = self.driver._is_live_vol(volume) + self.assertFalse(ret) + + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs', + return_value={'enabled': False, 'live': False}) + def test_is_live_vol_no_repl(self, + mock_get_replication_specs, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID} + ret = self.driver._is_live_vol(volume) + self.assertFalse(ret) + + def test_initialize_secondary(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock( + return_value=self.VOLUME) + mock_api.find_iscsi_properties = mock.MagicMock( + return_value=self.ISCSI_PROPERTIES) + mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) + ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') + self.assertEqual(self.ISCSI_PROPERTIES, ret) + + def test_initialize_secondary_create_server(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=None) + mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock( + return_value=self.VOLUME) + mock_api.find_iscsi_properties = mock.MagicMock( + return_value=self.ISCSI_PROPERTIES) + mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) + ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') + self.assertEqual(self.ISCSI_PROPERTIES, ret) + + def test_initialize_secondary_no_server(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=None) + mock_api.create_server = mock.MagicMock(return_value=None) + expected = {'target_discovered': False, + 'target_iqn': None, + 'target_iqns': [], + 'target_portal': None, + 'target_portals': [], + 'target_lun': None, + 'target_luns': [], + } + ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') + self.assertEqual(expected, ret) + + def test_initialize_secondary_map_fail(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock(return_value=None) + expected = {'target_discovered': False, + 'target_iqn': None, + 'target_iqns': [], + 'target_portal': None, + 'target_portals': [], + 'target_lun': None, + 'target_luns': [], + } + ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') + self.assertEqual(expected, ret) + + def test_initialize_secondary_vol_not_found(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.map_secondary_volume = mock.MagicMock( + return_value=self.VOLUME) + mock_api.get_volume = mock.MagicMock(return_value=None) + expected = {'target_discovered': False, + 'target_iqn': None, + 'target_iqns': [], + 'target_portal': None, + 'target_portals': [], + 'target_lun': None, + 'target_luns': [], + } + ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') + self.assertEqual(expected, ret) + + def test_terminate_secondary(self, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_api = mock.MagicMock() + mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) + mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) + mock_api.unmap_volume = mock.MagicMock() + self.driver.terminate_secondary(mock_api, sclivevol, 'iqn') + mock_api.find_server.assert_called_once_with('iqn', 102) + mock_api.get_volume.assert_called_once_with('102.101') + mock_api.unmap_volume.assert_called_once_with(self.VOLUME, + self.SCSERVER) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @@ -789,6 +1201,44 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) self.assertIsNone(res, 'None expected') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_server', + return_value=SCSERVER) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_volume', + return_value=VOLUME) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'unmap_volume', + return_value=True) + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_is_live_vol') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + 'terminate_secondary') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'get_live_volume') + def test_terminate_connection_live_volume(self, + mock_get_live_vol, + mock_terminate_secondary, + mock_is_live_vol, + mock_unmap_volume, + mock_find_volume, + mock_find_server, + mock_close_connection, + mock_open_connection, + mock_init): + volume = {'id': fake.VOLUME_ID} + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + mock_is_live_vol.return_value = True + mock_get_live_vol.return_value = (sclivevol, False) + connector = self.connector + res = self.driver.terminate_connection(volume, connector) + mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) + self.assertIsNone(res, 'None expected') + self.assertTrue(mock_terminate_secondary.called) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @@ -1371,7 +1821,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): context = {} volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake'} self.driver.ensure_export(context, volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake') + mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake', False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', @@ -1386,7 +1836,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None) + mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', @@ -1400,7 +1850,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake') + mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake', False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', @@ -2201,8 +2651,70 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.assertIsNone(destssn) self.driver.backends = backends - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - 'break_replication') + def test__failover_live_volume(self, + mock_close_connection, + mock_open_connection, + mock_init): + mock_api = mock.MagicMock() + sclivevol = {'instanceId': '101.100', + 'primaryVolume': {'instanceId': '101.101', + 'instanceName': fake.VOLUME2_ID}, + 'secondaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 102} + postfail = {'instanceId': '101.100', + 'primaryVolume': {'instanceId': '102.101', + 'instanceName': fake.VOLUME_ID}, + 'secondaryVolume': {'instanceId': '101.101', + 'instanceName': fake.VOLUME2_ID}, + 'secondaryScSerialNumber': 102} + mock_api.get_live_volume = mock.MagicMock() + mock_api.get_live_volume.side_effect = [(sclivevol, False), + (postfail, True), + (sclivevol, False), + (sclivevol, False) + ] + # Good run. + mock_api.swap_roles_live_volume = mock.MagicMock(return_value=True) + model_update = {'provider_id': '102.101', + 'replication_status': 'failed-over'} + ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, + '101.100') + self.assertEqual(model_update, ret) + # Swap fail + mock_api.swap_roles_live_volume.return_value = False + model_update = {'status': 'error'} + ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, + '101.100') + self.assertEqual(model_update, ret) + # Can't find live volume. + mock_api.get_live_volume.return_value = (None, False) + ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, + '101.100') + self.assertEqual(model_update, ret) + + def test__failover_replication(self, + mock_close_connection, + mock_open_connection, + mock_init): + rvol = {'instanceId': '102.101'} + mock_api = mock.MagicMock() + mock_api.break_replication = mock.MagicMock(return_value=rvol) + # Good run. + model_update = {'replication_status': 'failed-over', + 'provider_id': '102.101'} + ret = self.driver._failover_replication(mock_api, fake.VOLUME_ID, + '101.100', 102) + self.assertEqual(model_update, ret) + # break fail + mock_api.break_replication.return_value = None + model_update = {'status': 'error'} + ret = self.driver._failover_replication(mock_api, fake.VOLUME_ID, + '101.100', 102) + self.assertEqual(model_update, ret) + + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_failover_replication') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_parse_secondary') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, @@ -2211,15 +2723,20 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): 'remove_mappings') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, 'failback_volumes') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs') def test_failover_host(self, + mock_get_replication_specs, mock_failback_volumes, mock_remove_mappings, mock_find_volume, mock_parse_secondary, - mock_break_replication, + mock_failover_replication, mock_close_connection, mock_open_connection, mock_init): + mock_get_replication_specs.return_value = {'enabled': False, + 'live': False} self.driver.replication_enabled = False self.driver.failed_over = False volumes = [{'id': fake.VOLUME_ID, @@ -2236,12 +2753,133 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): '12345') # Good run self.driver.replication_enabled = True + mock_get_replication_specs.return_value = {'enabled': True, + 'live': False} mock_parse_secondary.return_value = 12345 expected_destssn = 12345 - mock_break_replication.side_effect = [{'instanceId': '2.1'}, # test1 - {'instanceId': '2.2'}, - {'instanceId': '2.1'}, # test2 - {'instanceId': '2.1'}] # test3 + mock_failover_replication.side_effect = [ + {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 1 + {'provider_id': '2.2', 'replication_status': 'failed-over'}, + {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 2 + {'provider_id': '2.1', 'replication_status': 'failed-over'}] # 3 + expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': + {'replication_status': 'failed-over', + 'provider_id': '2.1'}}, + {'volume_id': fake.VOLUME2_ID, 'updates': + {'replication_status': 'failed-over', + 'provider_id': '2.2'}}] + destssn, volume_update = self.driver.failover_host( + {}, volumes, '12345') + self.assertEqual(expected_destssn, destssn) + self.assertEqual(expected_volume_update, volume_update) + # Good run. Not all volumes replicated. + volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, + {'id': fake.VOLUME2_ID, 'replication_driver_data': ''}] + expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': + {'replication_status': 'failed-over', + 'provider_id': '2.1'}}, + {'volume_id': fake.VOLUME2_ID, 'updates': + {'status': 'error'}}] + self.driver.failed_over = False + self.driver.active_backend_id = None + destssn, volume_update = self.driver.failover_host( + {}, volumes, '12345') + self.assertEqual(expected_destssn, destssn) + self.assertEqual(expected_volume_update, volume_update) + # Good run. Not all volumes replicated. No replication_driver_data. + volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, + {'id': fake.VOLUME2_ID}] + expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': + {'replication_status': 'failed-over', + 'provider_id': '2.1'}}, + {'volume_id': fake.VOLUME2_ID, 'updates': + {'status': 'error'}}] + self.driver.failed_over = False + self.driver.active_backend_id = None + destssn, volume_update = self.driver.failover_host( + {}, volumes, '12345') + self.assertEqual(expected_destssn, destssn) + self.assertEqual(expected_volume_update, volume_update) + # Good run. No volumes replicated. No replication_driver_data. + volumes = [{'id': fake.VOLUME_ID}, + {'id': fake.VOLUME2_ID}] + expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': + {'status': 'error'}}, + {'volume_id': fake.VOLUME2_ID, 'updates': + {'status': 'error'}}] + self.driver.failed_over = False + self.driver.active_backend_id = None + destssn, volume_update = self.driver.failover_host( + {}, volumes, '12345') + self.assertEqual(expected_destssn, destssn) + self.assertEqual(expected_volume_update, volume_update) + # Secondary not found. + mock_parse_secondary.return_value = None + self.driver.failed_over = False + self.driver.active_backend_id = None + self.assertRaises(exception.InvalidInput, + self.driver.failover_host, + {}, + volumes, + '54321') + # Already failed over. + self.driver.failed_over = True + self.driver.failover_host({}, volumes, 'default') + mock_failback_volumes.assert_called_once_with(volumes) + # Already failed over. + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.failover_host, {}, volumes, '67890') + self.driver.replication_enabled = False + + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_failover_live_volume') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_parse_secondary') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_volume') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'remove_mappings') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + 'failback_volumes') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs') + def test_failover_host_live_volume(self, + mock_get_replication_specs, + mock_failback_volumes, + mock_remove_mappings, + mock_find_volume, + mock_parse_secondary, + mock_failover_live_volume, + mock_close_connection, + mock_open_connection, + mock_init): + mock_get_replication_specs.return_value = {'enabled': False, + 'live': False} + self.driver.replication_enabled = False + self.driver.failed_over = False + volumes = [{'id': fake.VOLUME_ID, + 'replication_driver_data': '12345', + 'provider_id': '1.1'}, + {'id': fake.VOLUME2_ID, + 'replication_driver_data': '12345', + 'provider_id': '1.2'}] + # No run. Not doing repl. Should raise. + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.failover_host, + {}, + volumes, + '12345') + # Good run + self.driver.replication_enabled = True + mock_get_replication_specs.return_value = {'enabled': True, + 'live': True} + mock_parse_secondary.return_value = 12345 + expected_destssn = 12345 + mock_failover_live_volume.side_effect = [ + {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 1 + {'provider_id': '2.2', 'replication_status': 'failed-over'}, + {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 2 + {'provider_id': '2.1', 'replication_status': 'failed-over'}] # 3 expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, @@ -2531,6 +3169,77 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.driver.failed_over = False self.driver.backends = backends + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_qos', + return_value='cinderqos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_volume') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_update_backend') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'get_live_volume') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'swap_roles_live_volume') + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, + '_get_replication_specs') + def test_failback_volumes_live_vol(self, + mock_get_replication_specs, + mock_swap_roles_live_volume, + mock_get_live_volume, + mock_update_backend, + mock_find_volume, + mock_get_qos, + mock_close_connection, + mock_open_connection, + mock_init): + self.driver.replication_enabled = True + self.driver.failed_over = True + self.driver.active_backend_id = 12345 + self.driver.primaryssn = 11111 + backends = self.driver.backends + self.driver.backends = [{'target_device_id': '12345', + 'qosnode': 'cinderqos', + 'remoteqos': 'remoteqos'}] + volumes = [{'id': fake.VOLUME_ID, + 'replication_driver_data': '12345', + 'provider_id': '12345.1'}, + {'id': fake.VOLUME2_ID, + 'replication_driver_data': '12345', + 'provider_id': '12345.2'}] + mock_get_live_volume.side_effect = [( + {'instanceId': '11111.101', + 'secondaryVolume': {'instanceId': '11111.1001', + 'instanceName': fake.VOLUME_ID}, + 'secondaryScSerialNumber': 11111}, True), ( + {'instanceId': '11111.102', + 'secondaryVolume': {'instanceId': '11111.1002', + 'instanceName': fake.VOLUME2_ID}, + 'secondaryScSerialNumber': 11111}, True + )] + mock_get_replication_specs.return_value = {'enabled': True, + 'live': True} + mock_swap_roles_live_volume.side_effect = [True, True] + mock_find_volume.side_effect = [{'instanceId': '12345.1'}, + {'instanceId': '12345.2'}] + + # we don't care about the return. We just want to make sure that + # _wait_for_replication is called with the proper replitems. + ret = self.driver.failback_volumes(volumes) + expected = [{'updates': {'provider_id': '11111.1001', + 'replication_status': 'enabled', + 'status': 'available'}, + 'volume_id': fake.VOLUME_ID}, + {'updates': {'provider_id': '11111.1002', + 'replication_status': 'enabled', + 'status': 'available'}, + 'volume_id': fake.VOLUME2_ID}] + + self.assertEqual(expected, ret) + + self.driver.replication_enabled = False + self.driver.failed_over = False + self.driver.backends = backends + @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_qos', return_value='cinderqos') @@ -3167,6 +3876,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): self.driver.failback_timeout = 1 self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) + calls = [mock.call(1)] * 5 + self.mock_sleep.assert_has_calls(calls) self.backends = backends @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, diff --git a/cinder/tests/unit/test_dellscapi.py b/cinder/tests/unit/volume/drivers/dell/test_dellscapi.py similarity index 91% rename from cinder/tests/unit/test_dellscapi.py rename to cinder/tests/unit/volume/drivers/dell/test_dellscapi.py index d24bdcf7c..f908b2bec 100644 --- a/cinder/tests/unit/test_dellscapi.py +++ b/cinder/tests/unit/volume/drivers/dell/test_dellscapi.py @@ -1916,7 +1916,7 @@ class DellSCSanAPITestCase(test.TestCase): False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', - self.configuration.dell_sc_volume_folder) + self.configuration.dell_sc_volume_folder, -1) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, @@ -1931,7 +1931,7 @@ class DellSCSanAPITestCase(test.TestCase): False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', - self.configuration.dell_sc_volume_folder) + self.configuration.dell_sc_volume_folder, -1) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, @@ -2003,7 +2003,7 @@ class DellSCSanAPITestCase(test.TestCase): True) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', - self.configuration.dell_sc_volume_folder) + self.configuration.dell_sc_volume_folder, -1) self.assertTrue(mock_create_folder_path.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @@ -2546,7 +2546,7 @@ class DellSCSanAPITestCase(test.TestCase): res = self.scapi._find_server_folder(False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', - self.configuration.dell_sc_server_folder) + self.configuration.dell_sc_server_folder, 12345) self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, @@ -2566,7 +2566,7 @@ class DellSCSanAPITestCase(test.TestCase): res = self.scapi._find_server_folder(True) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', - self.configuration.dell_sc_server_folder) + self.configuration.dell_sc_server_folder, 12345) self.assertTrue(mock_create_folder_path.called) self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') @@ -2583,7 +2583,7 @@ class DellSCSanAPITestCase(test.TestCase): False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', - self.configuration.dell_sc_volume_folder) + self.configuration.dell_sc_volume_folder, 12345) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.HttpClient, @@ -2674,20 +2674,23 @@ class DellSCSanAPITestCase(test.TestCase): res = self.scapi._find_serveros('Red Hat Linux 6.x') self.assertIsNone(res, 'None expected') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_server_folder', + return_value=SVR_FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=FC_HBA) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - 'create_server', + '_create_server', return_value=SCSERVER) def test_create_server_multiple_hbas(self, mock_create_server, mock_add_hba, + mock_find_server_folder, mock_close_connection, mock_open_connection, mock_init): - res = self.scapi.create_server_multiple_hbas( - self.WWNS) + res = self.scapi.create_server(self.WWNS) self.assertTrue(mock_create_server.called) self.assertTrue(mock_add_hba.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @@ -3417,93 +3420,6 @@ class DellSCSanAPITestCase(test.TestCase): 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_domains', - return_value=ISCSI_FLT_DOMAINS) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_by_address(self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find iSCSI mappings by IP Address & port - res = self.scapi.find_iscsi_properties( - self.VOLUME, '192.168.0.21', 3260) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_domains', - return_value=ISCSI_FLT_DOMAINS) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_by_address_not_found( - self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_ctrl, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find iSCSI mappings by IP Address & port are not found - res = self.scapi.find_iscsi_properties( - self.VOLUME, '192.168.1.21', 3260) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_ctrl.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=[]) @@ -3747,94 +3663,6 @@ class DellSCSanAPITestCase(test.TestCase): self.assertTrue(mock_find_controller_port_iscsi_config.called) self.assertTrue(mock_find_active_controller.called) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_is_virtualport_mode', - return_value=False) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_controller_port_iscsi_config', - return_value=ISCSI_CONFIG) - def test_find_iscsi_properties_by_address_legacy( - self, - mock_find_controller_port_iscsi_config, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find iSCSI mappings by IP Address & port - res = self.scapi.find_iscsi_properties( - self.VOLUME, '192.168.0.21', 3260) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - self.assertTrue(mock_find_controller_port_iscsi_config.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_is_virtualport_mode', - return_value=False) - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, - '_find_controller_port_iscsi_config', - return_value=ISCSI_CONFIG) - def test_find_iscsi_properties_by_address_not_found_legacy( - self, - mock_find_controller_port_iscsi_config, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_ctrl, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find iSCSI mappings by IP Address & port are not found - res = self.scapi.find_iscsi_properties( - self.VOLUME, '192.168.1.21', 3260) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_ctrl.called) - self.assertTrue(mock_find_controller_port_iscsi_config.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @@ -4798,7 +4626,7 @@ class DellSCSanAPITestCase(test.TestCase): fake_scvolume = {'name': 'name', 'instanceId': 'id'} res = self.scapi.update_storage_profile(fake_scvolume, None) self.assertTrue(res) - self.assertTrue('fakeId' in repr(mock_put.call_args_list[0])) + self.assertIn('fakeId', repr(mock_put.call_args_list[0])) self.assertEqual(1, LOG.info.call_count) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, @@ -5965,6 +5793,133 @@ class DellSCSanAPITestCase(test.TestCase): self.scapi._find_qos, 'Cinder QoS') + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'put', + return_value=RESPONSE_400) + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get', + return_value=RESPONSE_200) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json', + return_value=SCREPL) + def test_update_replicate_active_replay_fail(self, + mock_get_json, + mock_get, + mock_put, + mock_close_connection, + mock_open_connection, + mock_init): + ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, + True) + self.assertFalse(ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get', + return_value=RESPONSE_200) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json', + return_value=SCREPL) + def test_update_replicate_active_replay_nothing_to_do( + self, mock_get_json, mock_get, mock_close_connection, + mock_open_connection, mock_init): + ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, + False) + self.assertTrue(ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get', + return_value=RESPONSE_200) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json', + return_value=[]) + def test_update_replicate_active_replay_not_found(self, + mock_get_json, + mock_get, + mock_close_connection, + mock_open_connection, + mock_init): + ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, + True) + self.assertTrue(ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get', + return_value=RESPONSE_400) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json', + return_value=[]) + def test_update_replicate_active_replay_not_found2(self, + mock_get_json, + mock_get, + mock_close_connection, + mock_open_connection, + mock_init): + ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, + True) + self.assertTrue(ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post', + return_value=RESPONSE_200) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json', + return_value=[{'instanceId': '12345.1'}]) + def test_get_disk_folder(self, + mock_get_json, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + ret = self.scapi._get_disk_folder(12345, 'name') + expected_payload = {'filter': {'filterType': 'AND', 'filters': [ + {'filterType': 'Equals', 'attributeName': 'scSerialNumber', + 'attributeValue': 12345}, + {'filterType': 'Equals', 'attributeName': 'name', + 'attributeValue': 'name'}]}} + mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', + expected_payload) + self.assertEqual({'instanceId': '12345.1'}, ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post', + return_value=RESPONSE_400) + def test_get_disk_folder_fail(self, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + ret = self.scapi._get_disk_folder(12345, 'name') + expected_payload = {'filter': {'filterType': 'AND', 'filters': [ + {'filterType': 'Equals', 'attributeName': 'scSerialNumber', + 'attributeValue': 12345}, + {'filterType': 'Equals', 'attributeName': 'name', + 'attributeValue': 'name'}]}} + mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', + expected_payload) + self.assertIsNone(ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post', + return_value=RESPONSE_200) + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + def test_get_disk_folder_fail_bad_json(self, + mock_get_json, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + mock_get_json.side_effect = (exception.VolumeBackendAPIException('')) + ret = self.scapi._get_disk_folder(12345, 'name') + expected_payload = {'filter': {'filterType': 'AND', 'filters': [ + {'filterType': 'Equals', 'attributeName': 'scSerialNumber', + 'attributeValue': 12345}, + {'filterType': 'Equals', 'attributeName': 'name', + 'attributeValue': 'name'}]}} + mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', + expected_payload) + self.assertIsNone(ret) + @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @@ -6272,9 +6227,7 @@ class DellSCSanAPITestCase(test.TestCase): None] # 4 # Much like find volume we do not gate on this. mock_get_screplication.side_effect = [self.SCREPL[0], # 1 - None, # 2 - None, # 3 - None] # 4 + None] # 2 # This mock_find_repl_volume.side_effect = [self.VOLUME, # 1 self.VOLUME, # 2 @@ -6446,6 +6399,335 @@ class DellSCSanAPITestCase(test.TestCase): scvol, 'a,b') + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + def test_get_live_volume(self, + mock_get_json, + mock_get, + mock_close_connection, + mock_open_connection, + mock_init): + # Basic check + retlv, retswapped = self.scapi.get_live_volume(None) + self.assertIsNone(retlv) + self.assertFalse(retswapped) + lv1 = {'primaryVolume': {'instanceId': '12345.1'}, + 'secondaryVolume': {'instanceId': '67890.1'}} + lv2 = {'primaryVolume': {'instanceId': '12345.2'}} + mock_get_json.return_value = [lv1, lv2] + mock_get.return_value = self.RESPONSE_200 + # Good Run + retlv, retswapped = self.scapi.get_live_volume('12345.2') + self.assertEqual(lv2, retlv) + self.assertFalse(retswapped) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + def test_get_live_volume_not_found(self, + mock_get_json, + mock_get, + mock_close_connection, + mock_open_connection, + mock_init): + lv1 = {'primaryVolume': {'instanceId': '12345.1'}, + 'secondaryVolume': {'instanceId': '67890.1'}} + lv2 = {'primaryVolume': {'instanceId': '12345.2'}, + 'secondaryVolume': {'instanceId': '67890.2'}} + mock_get_json.return_value = [lv1, lv2] + mock_get.return_value = self.RESPONSE_200 + retlv, retswapped = self.scapi.get_live_volume('12345.3') + self.assertIsNone(retlv) + self.assertFalse(retswapped) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + def test_get_live_volume_swapped(self, + mock_get_json, + mock_get, + mock_close_connection, + mock_open_connection, + mock_init): + lv1 = {'primaryVolume': {'instanceId': '12345.1'}, + 'secondaryVolume': {'instanceId': '67890.1'}} + lv2 = {'primaryVolume': {'instanceId': '67890.2'}, + 'secondaryVolume': {'instanceId': '12345.2'}} + mock_get_json.return_value = [lv1, lv2] + mock_get.return_value = self.RESPONSE_200 + retlv, retswapped = self.scapi.get_live_volume('12345.2') + self.assertEqual(lv2, retlv) + self.assertTrue(retswapped) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'get') + def test_get_live_volume_error(self, + mock_get, + mock_close_connection, + mock_open_connection, + mock_init): + mock_get.return_value = self.RESPONSE_400 + retlv, retswapped = self.scapi.get_live_volume('12345.2') + self.assertIsNone(retlv) + self.assertFalse(retswapped) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + def test_map_secondary_volume(self, + mock_get_json, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101'}, + 'secondaryScSerialNumber': 102} + scdestsrv = {'instanceId': '102.1000'} + mock_post.return_value = self.RESPONSE_200 + mock_get_json.return_value = {'instanceId': '102.101.1'} + ret = self.scapi.map_secondary_volume(sclivevol, scdestsrv) + expected_payload = {'Server': '102.1000', + 'Advanced': {'MapToDownServerHbas': True}} + mock_post.assert_called_once_with( + 'StorageCenter/ScLiveVolume/101.101/MapSecondaryVolume', + expected_payload, True + ) + self.assertEqual({'instanceId': '102.101.1'}, ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post') + def test_map_secondary_volume_err(self, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101'}, + 'secondaryScSerialNumber': 102} + scdestsrv = {'instanceId': '102.1000'} + mock_post.return_value = self.RESPONSE_400 + ret = self.scapi.map_secondary_volume(sclivevol, scdestsrv) + expected_payload = {'Server': '102.1000', + 'Advanced': {'MapToDownServerHbas': True}} + mock_post.assert_called_once_with( + 'StorageCenter/ScLiveVolume/101.101/MapSecondaryVolume', + expected_payload, True + ) + self.assertIsNone(ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_qos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_create_live_volume(self, + mock_find_sc, + mock_find_qos, + mock_get_json, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + scvol = {'instanceId': '101.1', + 'name': 'name'} + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101'}, + 'secondaryScSerialNumber': 102} + + remotessn = '102' + active = True + sync = False + primaryqos = 'fast' + secondaryqos = 'slow' + mock_find_sc.return_value = 102 + mock_find_qos.side_effect = [{'instanceId': '101.1001'}, + {'instanceId': '102.1001'}] + mock_post.return_value = self.RESPONSE_200 + mock_get_json.return_value = sclivevol + ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, + False, primaryqos, secondaryqos) + mock_find_sc.assert_called_once_with(102) + mock_find_qos.assert_any_call(primaryqos) + mock_find_qos.assert_any_call(secondaryqos, 102) + self.assertEqual(sclivevol, ret) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_get_json') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_qos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_create_live_volume_autofailover(self, + mock_find_sc, + mock_find_qos, + mock_get_json, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + scvol = {'instanceId': '101.1', + 'name': 'name'} + sclivevol = {'instanceId': '101.101', + 'secondaryVolume': {'instanceId': '102.101'}, + 'secondaryScSerialNumber': 102} + + remotessn = '102' + active = True + sync = False + primaryqos = 'fast' + secondaryqos = 'slow' + mock_find_sc.return_value = 102 + mock_find_qos.side_effect = [{'instanceId': '101.1001'}, + {'instanceId': '102.1001'}] + mock_post.return_value = self.RESPONSE_200 + mock_get_json.return_value = sclivevol + ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, + True, primaryqos, secondaryqos) + mock_find_sc.assert_called_once_with(102) + mock_find_qos.assert_any_call(primaryqos) + mock_find_qos.assert_any_call(secondaryqos, 102) + self.assertEqual(sclivevol, ret) + # Make sure sync flipped and that we set HighAvailablity. + expected = {'SyncMode': 'HighAvailability', + 'SwapRolesAutomaticallyEnabled': False, + 'SecondaryStorageCenter': 102, + 'FailoverAutomaticallyEnabled': True, + 'StorageCenter': 12345, + 'RestoreAutomaticallyEnabled': True, + 'SecondaryQosNode': '102.1001', + 'ReplicateActiveReplay': True, + 'PrimaryQosNode': '101.1001', + 'Type': 'Synchronous', + 'PrimaryVolume': '101.1', + 'SecondaryVolumeAttributes': + {'Notes': 'Created by Dell Cinder Driver', + 'CreateSourceVolumeFolderPath': True, + 'Name': 'name'} + } + mock_post.assert_called_once_with('StorageCenter/ScLiveVolume', + expected, True) + + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'post') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_qos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_create_live_volume_error(self, + mock_find_sc, + mock_find_qos, + mock_post, + mock_close_connection, + mock_open_connection, + mock_init): + scvol = {'instanceId': '101.1', + 'name': 'name'} + remotessn = '102' + active = True + sync = False + primaryqos = 'fast' + secondaryqos = 'slow' + mock_find_sc.return_value = 102 + mock_find_qos.side_effect = [{'instanceId': '101.1001'}, + {'instanceId': '102.1001'}] + mock_post.return_value = self.RESPONSE_400 + ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, + False, primaryqos, secondaryqos) + mock_find_sc.assert_called_once_with(102) + mock_find_qos.assert_any_call(primaryqos) + mock_find_qos.assert_any_call(secondaryqos, 102) + self.assertIsNone(ret) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_qos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_create_live_volume_no_dest(self, + mock_find_sc, + mock_find_qos, + mock_close_connection, + mock_open_connection, + mock_init): + scvol = {'instanceId': '101.1', + 'name': 'name'} + remotessn = '102' + active = True + sync = False + primaryqos = 'fast' + secondaryqos = 'slow' + mock_find_sc.return_value = 102 + mock_find_qos.return_value = {} + ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, + False, primaryqos, secondaryqos) + mock_find_sc.assert_called_once_with(102) + mock_find_qos.assert_any_call(primaryqos) + mock_find_qos.assert_any_call(secondaryqos, 102) + self.assertIsNone(ret) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_qos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_create_live_volume_no_qos(self, + mock_find_sc, + mock_find_qos, + mock_close_connection, + mock_open_connection, + mock_init): + scvol = {'instanceId': '101.1', + 'name': 'name'} + remotessn = '102' + active = True + sync = False + primaryqos = 'fast' + secondaryqos = 'slow' + mock_find_sc.return_value = 102 + mock_find_qos.return_value = None + ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, + False, primaryqos, secondaryqos) + mock_find_sc.assert_called_once_with(102) + mock_find_qos.assert_any_call(primaryqos) + mock_find_qos.assert_any_call(secondaryqos, 102) + self.assertIsNone(ret) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + '_find_qos') + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'find_sc') + def test_create_live_volume_no_secondary_qos(self, + mock_find_sc, + mock_find_qos, + mock_close_connection, + mock_open_connection, + mock_init): + scvol = {'instanceId': '101.1', + 'name': 'name'} + remotessn = '102' + active = True + sync = False + primaryqos = 'fast' + secondaryqos = 'slow' + mock_find_sc.return_value = 102 + mock_find_qos.side_effect = [{'instanceId': '101.1001'}, + None] + ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, + False, primaryqos, secondaryqos) + mock_find_sc.assert_called_once_with(102) + mock_find_qos.assert_any_call(primaryqos) + mock_find_qos.assert_any_call(secondaryqos, 102) + self.assertIsNone(ret) + @mock.patch.object(dell_storagecenter_api.HttpClient, 'put') def test_manage_replay(self, @@ -6707,6 +6989,22 @@ class DellSCSanAPITestCase(test.TestCase): self.assertIsNone(retbool) self.assertIsNone(retnum) + @mock.patch.object(dell_storagecenter_api.HttpClient, + 'delete') + def test_delete_live_volume(self, + mock_delete, + mock_close_connection, + mock_open_connection, + mock_init): + mock_delete.return_value = self.RESPONSE_200 + ret = self.scapi.delete_live_volume({'instanceId': '12345.101'}, + True) + self.assertTrue(ret) + mock_delete.return_value = self.RESPONSE_400 + ret = self.scapi.delete_live_volume({'instanceId': '12345.101'}, + True) + self.assertFalse(ret) + class DellSCSanAPIConnectionTestCase(test.TestCase): @@ -7009,37 +7307,215 @@ class DellHttpClientTestCase(test.TestCase): self.httpclient._wait_for_async_complete, self.ASYNCTASK) - @mock.patch.object(dell_storagecenter_api.HttpClient, - '_rest_ret', - return_value=RESPONSE_200) @mock.patch.object(requests.Session, 'get', return_value=RESPONSE_200) def test_get(self, - mock_get, - mock_rest_ret): - ret = self.httpclient.get('url', False) + mock_get): + ret = self.httpclient.get('url') self.assertEqual(self.RESPONSE_200, ret) - mock_rest_ret.assert_called_once_with(self.RESPONSE_200, False) expected_headers = self.httpclient.header.copy() mock_get.assert_called_once_with('https://localhost:3033/api/rest/url', headers=expected_headers, verify=False) - @mock.patch.object(dell_storagecenter_api.HttpClient, - '_rest_ret', - return_value=RESPONSE_200) - @mock.patch.object(requests.Session, - 'get', - return_value=RESPONSE_200) - def test_get_async(self, - mock_get, - mock_rest_ret): - ret = self.httpclient.get('url', True) - self.assertEqual(self.RESPONSE_200, ret) - mock_rest_ret.assert_called_once_with(self.RESPONSE_200, True) - expected_headers = self.httpclient.header.copy() - expected_headers['async'] = True - mock_get.assert_called_once_with('https://localhost:3033/api/rest/url', - headers=expected_headers, - verify=False) + +class DellStorageCenterApiHelperTestCase(test.TestCase): + + """DellStorageCenterApiHelper test case + + Class to test the Storage Center API helper using Mock. + """ + + def setUp(self): + super(DellStorageCenterApiHelperTestCase, self).setUp() + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'open_connection') + def test_setup_connection(self, + mock_open_connection): + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.dell_sc_volume_folder = 'a' + config.dell_sc_server_folder = 'a' + config.dell_sc_verify_cert = False + config.san_port = 3033 + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'FC') + ret = helper._setup_connection() + self.assertEqual(12345, ret.primaryssn) + self.assertEqual(12345, ret.ssn) + self.assertEqual('FibreChannel', ret.protocol) + mock_open_connection.assert_called_once_with() + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'open_connection') + def test_setup_connection_iscsi(self, + mock_open_connection): + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.dell_sc_volume_folder = 'a' + config.dell_sc_server_folder = 'a' + config.dell_sc_verify_cert = False + config.san_port = 3033 + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'iSCSI') + ret = helper._setup_connection() + self.assertEqual(12345, ret.primaryssn) + self.assertEqual(12345, ret.ssn) + self.assertEqual('Iscsi', ret.protocol) + mock_open_connection.assert_called_once_with() + + @mock.patch.object(dell_storagecenter_api.StorageCenterApi, + 'open_connection') + def test_setup_connection_failover(self, + mock_open_connection): + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.dell_sc_volume_folder = 'a' + config.dell_sc_server_folder = 'a' + config.dell_sc_verify_cert = False + config.san_port = 3033 + helper = dell_storagecenter_api.StorageCenterApiHelper(config, '67890', + 'iSCSI') + ret = helper._setup_connection() + self.assertEqual(12345, ret.primaryssn) + self.assertEqual(67890, ret.ssn) + self.assertEqual('Iscsi', ret.protocol) + mock_open_connection.assert_called_once_with() + + @mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper, + '_setup_connection') + def test_open_connection(self, + mock_setup_connection): + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.san_port = 3033 + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'FC') + mock_connection = mock.MagicMock() + mock_connection.apiversion = '3.1' + mock_setup_connection.return_value = mock_connection + ret = helper.open_connection() + self.assertEqual('3.1', ret.apiversion) + self.assertEqual('192.168.0.101', helper.san_ip) + self.assertEqual('username', helper.san_login) + self.assertEqual('password', helper.san_password) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper, + '_setup_connection') + def test_open_connection_fail_no_secondary(self, + mock_setup_connection): + + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.san_port = 3033 + config.secondary_san_ip = '' + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'FC') + mock_setup_connection.side_effect = ( + exception.VolumeBackendAPIException('abc')) + self.assertRaises(exception.VolumeBackendAPIException, + helper.open_connection) + mock_setup_connection.assert_called_once_with() + self.assertEqual('192.168.0.101', helper.san_ip) + self.assertEqual('username', helper.san_login) + self.assertEqual('password', helper.san_password) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper, + '_setup_connection') + def test_open_connection_secondary(self, + mock_setup_connection): + + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.san_port = 3033 + config.secondary_san_ip = '192.168.0.102' + config.secondary_san_login = 'username2' + config.secondary_san_password = 'password2' + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'FC') + mock_connection = mock.MagicMock() + mock_connection.apiversion = '3.1' + mock_setup_connection.side_effect = [ + (exception.VolumeBackendAPIException('abc')), mock_connection] + ret = helper.open_connection() + self.assertEqual('3.1', ret.apiversion) + self.assertEqual(2, mock_setup_connection.call_count) + self.assertEqual('192.168.0.102', helper.san_ip) + self.assertEqual('username2', helper.san_login) + self.assertEqual('password2', helper.san_password) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper, + '_setup_connection') + def test_open_connection_fail_partial_secondary_config( + self, mock_setup_connection): + + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.san_port = 3033 + config.secondary_san_ip = '192.168.0.102' + config.secondary_san_login = 'username2' + config.secondary_san_password = '' + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'FC') + mock_setup_connection.side_effect = ( + exception.VolumeBackendAPIException('abc')) + self.assertRaises(exception.VolumeBackendAPIException, + helper.open_connection) + mock_setup_connection.assert_called_once_with() + self.assertEqual('192.168.0.101', helper.san_ip) + self.assertEqual('username', helper.san_login) + self.assertEqual('password', helper.san_password) + + @mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper, + '_setup_connection') + def test_open_connection_to_secondary_and_back(self, + mock_setup_connection): + + config = mock.MagicMock() + config.dell_sc_ssn = 12345 + config.san_ip = '192.168.0.101' + config.san_login = 'username' + config.san_password = 'password' + config.san_port = 3033 + config.secondary_san_ip = '192.168.0.102' + config.secondary_san_login = 'username2' + config.secondary_san_password = 'password2' + helper = dell_storagecenter_api.StorageCenterApiHelper(config, None, + 'FC') + mock_connection = mock.MagicMock() + mock_connection.apiversion = '3.1' + mock_setup_connection.side_effect = [ + (exception.VolumeBackendAPIException('abc')), mock_connection, + (exception.VolumeBackendAPIException('abc')), mock_connection] + helper.open_connection() + self.assertEqual('192.168.0.102', helper.san_ip) + self.assertEqual('username2', helper.san_login) + self.assertEqual('password2', helper.san_password) + self.assertEqual(2, mock_setup_connection.call_count) + helper.open_connection() + self.assertEqual('192.168.0.101', helper.san_ip) + self.assertEqual('username', helper.san_login) + self.assertEqual('password', helper.san_password) diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/__init__.py b/cinder/tests/unit/volume/drivers/emc/scaleio/__init__.py index ad6d43396..33e31bbd6 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/__init__.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/__init__.py @@ -79,7 +79,8 @@ class TestScaleIODriver(test.TestCase): }, 500 ) - VOLUME_NOT_FOUND_ERROR = 78 + OLD_VOLUME_NOT_FOUND_ERROR = 78 + VOLUME_NOT_FOUND_ERROR = 79 HTTPS_MOCK_RESPONSES = {} __COMMON_HTTPS_MOCK_RESPONSES = { diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py b/cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py index 248d8deb5..4dee67db1 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py @@ -44,6 +44,8 @@ class ScaleIODriver(scaleio.ScaleIODriver): override='test_domain') configuration.set_override('sio_storage_pools', override='test_domain:test_pool') + configuration.set_override('max_over_subscription_ratio', + override=5.0) if 'san_thin_provision' in kwargs: configuration.set_override( 'san_thin_provision', diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_attach_detach_volume.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_attach_detach_volume.py new file mode 100644 index 000000000..20329c8ba --- /dev/null +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/test_attach_detach_volume.py @@ -0,0 +1,45 @@ +# Copyright (c) 2016 EMC Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import context +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_volume +from cinder.tests.unit.volume.drivers.emc import scaleio + + +class TestAttachDetachVolume(scaleio.TestScaleIODriver): + + def setUp(self): + super(TestAttachDetachVolume, self).setUp() + ctx = context.RequestContext('fake', 'fake', auth_token=True) + self.fake_path = '/fake/path/vol-xx' + self.volume = fake_volume.fake_volume_obj( + ctx, **{'provider_id': fake.PROVIDER_ID}) + self.driver.connector = FakeConnector() + + def test_attach_volume(self): + path = self.driver._sio_attach_volume(self.volume) + self.assertEqual(self.fake_path, path) + + def test_detach_volume(self): + self.driver._sio_detach_volume(self.volume) + + +class FakeConnector(object): + def connect_volume(self, connection_properties): + return {'path': '/fake/path/vol-xx'} + + def disconnect_volume(self, connection_properties, volume): + return None diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py index 73b7d2c8b..e60d64f38 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py @@ -12,12 +12,17 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import ddt +import mock + from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio +@ddt.ddt class TestCreateVolume(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.create_volume()``""" def setUp(self): @@ -118,3 +123,16 @@ class TestCreateVolume(scaleio.TestScaleIODriver): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) + + @ddt.data({'provisioning:type': 'thin'}, {'provisioning:type': 'thin'}) + def test_create_thin_thick_volume(self, extraspecs): + self.driver._get_volumetype_extraspecs = mock.MagicMock() + self.driver._get_volumetype_extraspecs.return_value = extraspecs + self.driver.create_volume(self.volume) + + def test_create_volume_bad_provisioning_type(self): + extraspecs = {'provisioning:type': 'other'} + self.driver._get_volumetype_extraspecs = mock.MagicMock() + self.driver._get_volumetype_extraspecs.return_value = extraspecs + self.assertRaises(exception.VolumeBackendAPIException, + self.test_create_volume) diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py index 0965f0e05..2b135e968 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py @@ -68,7 +68,7 @@ class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver): 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { - 'errorCode': self.VOLUME_NOT_FOUND_ERROR, + 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'BadStatus Volume Test', }, 400 ), diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py index 6e8b78dfc..3048799c5 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py @@ -63,14 +63,14 @@ class TestDeleteSnapShot(scaleio.TestScaleIODriver): 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: mocks.MockHTTPSResponse( { - 'errorCode': self.VOLUME_NOT_FOUND_ERROR, + 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'Test Delete Invalid Snapshot', }, 400 ), 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id): mocks.MockHTTPSResponse( { - 'errorCode': self.VOLUME_NOT_FOUND_ERROR, + 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'Test Delete Invalid Snapshot', }, 400, ) diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py index b54b72093..bfff4e1ab 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py @@ -69,7 +69,7 @@ class TestExtendVolume(scaleio.TestScaleIODriver): 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id): mocks.MockHTTPSResponse( { - 'errorCode': self.VOLUME_NOT_FOUND_ERROR, + 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'BadStatus Volume Test', }, 400 ), diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_misc.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_misc.py index b8dd9f30f..6d9ae1895 100644 --- a/cinder/tests/unit/volume/drivers/emc/scaleio/test_misc.py +++ b/cinder/tests/unit/volume/drivers/emc/scaleio/test_misc.py @@ -61,8 +61,11 @@ class TestMisc(scaleio.TestScaleIODriver): ): '"{}"'.format(self.POOL_NAME).encode('ascii', 'ignore'), 'types/StoragePool/instances/action/querySelectedStatistics': { '"{}"'.format(self.POOL_NAME): { - 'capacityInUseInKb': 502, - 'capacityLimitInKb': 1024, + 'capacityAvailableForVolumeAllocationInKb': 5000000, + 'capacityLimitInKb': 16000000, + 'spareCapacityInKb': 6000000, + 'thickCapacityInUseInKb': 266, + 'thinCapacityAllocatedInKm': 0, }, }, 'instances/Volume::{}/action/setVolumeName'.format( @@ -75,6 +78,13 @@ class TestMisc(scaleio.TestScaleIODriver): self.RESPONSE_MODE.BadStatus: { 'types/Domain/instances/getByName::' + self.domain_name_enc: self.BAD_STATUS_RESPONSE, + 'instances/Volume::{}/action/setVolumeName'.format( + self.volume['provider_id']): mocks.MockHTTPSResponse( + { + 'message': 'Invalid volume.', + 'httpStatusCode': 400, + 'errorCode': self.VOLUME_NOT_FOUND_ERROR + }, 400), }, self.RESPONSE_MODE.Invalid: { 'types/Domain/instances/getByName::' + @@ -192,6 +202,12 @@ class TestMisc(scaleio.TestScaleIODriver): self.new_volume['id'] ) + def test_rename_volume_non_sio(self): + self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) + rc = self.driver._rename_volume( + self.volume, self.new_volume['id']) + self.assertIsNone(rc) + def test_default_provisioning_type_unspecified(self): empty_storage_type = {} self.assertEqual( diff --git a/cinder/tests/unit/test_emc_vmax.py b/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py similarity index 95% rename from cinder/tests/unit/test_emc_vmax.py rename to cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py index 443bb6a82..3f0009c39 100644 --- a/cinder/tests/unit/test_emc_vmax.py +++ b/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py @@ -13,15 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -import ddt import os import shutil import tempfile import time +import unittest from xml.dom import minidom +import ddt import mock -from oslo_service import loopingcall from oslo_utils import units import six @@ -29,6 +29,7 @@ from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import test +from cinder.tests.unit import utils from cinder.volume import configuration as conf from cinder.volume.drivers.emc import emc_vmax_common @@ -157,6 +158,20 @@ class Fake_CIMProperty(object): cimproperty.value = '10.10.10.10' return cimproperty + def fake_getSupportedReplicationTypesCIMProperty(self, reptypes): + cimproperty = Fake_CIMProperty() + if reptypes == 'V3': + cimproperty.value = [6, 7] + elif reptypes == 'V3_SYNC': + cimproperty.value = [6] + elif reptypes == 'V3_ASYNC': + cimproperty.value = [7] + elif reptypes == 'V2': + cimproperty.value = [10] + else: + cimproperty.value = [2, 3, 4, 5] + return cimproperty + class Fake_CIM_TierPolicyServiceCapabilities(object): @@ -286,8 +301,11 @@ class EMCVMAXCommonData(object): poolname = 'gold' totalmanagedspace_bits = '1000000000000' subscribedcapacity_bits = '500000000000' + remainingmanagedspace_bits = '500000000000' + maxsubscriptionpercent = 150 totalmanagedspace_gbs = 931 - subscribedcapacity_gbs = 466 + subscribedcapacity_gbs = 465 + remainingmanagedspace_gbs = 465 fake_host = 'HostX@Backend#gold+1234567891011' fake_host_v3 = 'HostX@Backend#Bronze+SRP_1+1234567891011' fake_host_2_v3 = 'HostY@Backend#SRP_1+1234567891011' @@ -708,7 +726,7 @@ class FakeEcomConnection(object): return result def ModifyInstance(self, objectpath, PropertyList=None): - pass + pass def DeleteInstance(self, objectpath): pass @@ -1099,6 +1117,8 @@ class FakeEcomConnection(object): pool['SystemName'] = self.data.storage_system pool['TotalManagedSpace'] = self.data.totalmanagedspace_bits pool['EMCSubscribedCapacity'] = self.data.subscribedcapacity_bits + pool['RemainingManagedSpace'] = self.data.remainingmanagedspace_bits + pool['EMCMaxSubscriptionPercent'] = self.data.maxsubscriptionpercent return pool def _getinstance_replicationgroup(self, objectpath): @@ -1144,6 +1164,14 @@ class FakeEcomConnection(object): else: targetmaskinggroup['ElementName'] = ( self.data.storagegroupname) + if 'EMCMaximumIO' in objectpath: + targetmaskinggroup['EMCMaximumIO'] = objectpath['EMCMaximumIO'] + if 'EMCMaximumBandwidth' in objectpath: + targetmaskinggroup['EMCMaximumBandwidth'] = ( + objectpath['EMCMaximumBandwidth']) + if 'EMCMaxIODynamicDistributionType' in objectpath: + targetmaskinggroup['EMCMaxIODynamicDistributionType'] = ( + objectpath['EMCMaxIODynamicDistributionType']) return targetmaskinggroup def _getinstance_unit(self, objectpath): @@ -1704,24 +1732,23 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): configuration.append_config_values = mock.Mock(return_value=0) configuration.config_group = 'ISCSINoFAST' configuration.cinder_emc_config_file = self.config_file_path - self.stubs.Set(configuration, 'safe_get', - self.fake_safe_get({'driver_use_ssl': - True, - 'volume_backend_name': - 'ISCSINoFAST'})) - self.stubs.Set(emc_vmax_iscsi.EMCVMAXISCSIDriver, - 'smis_do_iscsi_discovery', - self.fake_do_iscsi_discovery) - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(configuration, 'safe_get', + self.fake_safe_get({'driver_use_ssl': + True, + 'volume_backend_name': + 'ISCSINoFAST'})) + self.mock_object(emc_vmax_iscsi.EMCVMAXISCSIDriver, + 'smis_do_iscsi_discovery', + self.fake_do_iscsi_discovery) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) - + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver @@ -1894,8 +1921,10 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): def fake_do_iscsi_discovery(self, volume): output = [] - item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80' - output.append(item) + properties = {} + properties['target_portal'] = '10.10.0.50:3260' + properties['target_iqn'] = 'iqn.1992-04.com.emc:50000973f006dd80' + output.append(properties) return output def fake_sleep(self, seconds): @@ -2310,7 +2339,8 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): vol['name'], extraSpecs) self.assertTrue(return_to_default) - def test_wait_for_job_complete(self): + @unittest.skip("Skip until bug #1578986 is fixed") + def _test_wait_for_job_complete(self): myjob = SE_ConcreteJob() myjob.classname = 'SE_ConcreteJob' myjob['InstanceID'] = '9999' @@ -2330,18 +2360,12 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.assertTrue(self.driver.utils._is_job_finished.return_value) self.driver.utils._is_job_finished.reset_mock() - # Save the original state and restore it after this test - loopingcall_orig = loopingcall.FixedIntervalLoopingCall - loopingcall.FixedIntervalLoopingCall = mock.Mock() rc, errordesc = self.driver.utils.wait_for_job_complete(conn, myjob) self.assertEqual(0, rc) self.assertIsNone(errordesc) - loopingcall.FixedIntervalLoopingCall.assert_called_once_with( - mock.ANY) - loopingcall.FixedIntervalLoopingCall.reset_mock() - loopingcall.FixedIntervalLoopingCall = loopingcall_orig - def test_wait_for_job_complete_bad_job_state(self): + @unittest.skip("Skip until bug #1578986 is fixed") + def _test_wait_for_job_complete_bad_job_state(self): myjob = SE_ConcreteJob() myjob.classname = 'SE_ConcreteJob' myjob['InstanceID'] = '9999' @@ -2358,12 +2382,14 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.assertEqual(-1, rc) self.assertEqual('Job finished with an error', errordesc) - def test_wait_for_sync(self): + @unittest.skip("Skip until bug #1578986 is fixed") + def _test_wait_for_sync(self): mysync = 'fakesync' conn = self.fake_ecom_connection() self.driver.utils._is_sync_complete = mock.Mock( return_value=True) + self.driver.utils._get_interval_in_secs = mock.Mock(return_value=0) rc = self.driver.utils.wait_for_sync(conn, mysync) self.assertIsNotNone(rc) self.driver.utils._is_sync_complete.assert_called_once_with( @@ -2371,16 +2397,10 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.assertTrue(self.driver.utils._is_sync_complete.return_value) self.driver.utils._is_sync_complete.reset_mock() - # Save the original state and restore it after this test - loopingcall_orig = loopingcall.FixedIntervalLoopingCall - loopingcall.FixedIntervalLoopingCall = mock.Mock() rc = self.driver.utils.wait_for_sync(conn, mysync) self.assertIsNotNone(rc) - loopingcall.FixedIntervalLoopingCall.assert_called_once_with( - mock.ANY) - loopingcall.FixedIntervalLoopingCall.reset_mock() - loopingcall.FixedIntervalLoopingCall = loopingcall_orig + @unittest.skip("Skip until bug #1578986 is fixed") def test_wait_for_sync_extra_specs(self): mysync = 'fakesync' conn = self.fake_ecom_connection() @@ -2396,6 +2416,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.driver.utils._is_sync_complete = mock.Mock( return_value=True) + self.driver.utils._get_interval_in_secs = mock.Mock(return_value=0) rc = self.driver.utils.wait_for_sync(conn, mysync, extraSpecs) self.assertIsNotNone(rc) self.driver.utils._is_sync_complete.assert_called_once_with( @@ -2407,15 +2428,8 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.driver.utils._get_interval_in_secs(extraSpecs)) self.driver.utils._is_sync_complete.reset_mock() - # Save the original state and restore it after this test - loopingcall_orig = loopingcall.FixedIntervalLoopingCall - loopingcall.FixedIntervalLoopingCall = mock.Mock() rc = self.driver.utils.wait_for_sync(conn, mysync) self.assertIsNotNone(rc) - loopingcall.FixedIntervalLoopingCall.assert_called_once_with( - mock.ANY) - loopingcall.FixedIntervalLoopingCall.reset_mock() - loopingcall.FixedIntervalLoopingCall = loopingcall_orig bExists = os.path.exists(file_name) if bExists: os.remove(file_name) @@ -2844,22 +2858,25 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): # Bug 1393555 - policy has been deleted by another process. def test_get_capacities_associated_to_policy(self): conn = self.fake_ecom_connection() - total_capacity_gb, free_capacity_gb = ( + (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, + array_max_over_subscription) = ( self.driver.common.fast.get_capacities_associated_to_policy( conn, self.data.storage_system, self.data.policyrule)) # The capacities associated to the policy have been found. self.assertEqual(self.data.totalmanagedspace_gbs, total_capacity_gb) - self.assertEqual(self.data.subscribedcapacity_gbs, free_capacity_gb) + self.assertEqual(self.data.remainingmanagedspace_gbs, free_capacity_gb) self.driver.common.fast.utils.get_existing_instance = mock.Mock( return_value=None) - total_capacity_gb_2, free_capacity_gb_2 = ( + (total_capacity_gb_2, free_capacity_gb_2, provisioned_capacity_gb_2, + array_max_over_subscription_2) = ( self.driver.common.fast.get_capacities_associated_to_policy( conn, self.data.storage_system, self.data.policyrule)) # The capacities have not been found as the policy has been # removed externally. self.assertEqual(0, total_capacity_gb_2) self.assertEqual(0, free_capacity_gb_2) + self.assertEqual(0, provisioned_capacity_gb_2) # Bug 1393555 - storage group has been deleted by another process. def test_find_storage_masking_group(self): @@ -2995,7 +3012,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_pool_capacities', - return_value=(1234, 1200)) + return_value=(1234, 1200, 1200, 1)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'is_tiering_policy_enabled', @@ -3326,6 +3343,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.driver.create_snapshot, self.data.test_volume) + @unittest.skip("Skip until bug #1578986 is fixed") @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', @@ -3594,6 +3612,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): add_volumes, remove_volumes) # Bug https://bugs.launchpad.net/cinder/+bug/1442376 + @unittest.skip("Skip until bug #1578986 is fixed") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', @@ -3614,7 +3633,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_clone_with_different_meta_sizes( + def _test_create_clone_with_different_meta_sizes( self, mock_volume_type, mock_volume, mock_meta, mock_size, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" @@ -3823,7 +3842,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_update_pool_stats', - return_value={1, 2, 3}) + return_value={1, 2, 3, 4, 5}) def test_ssl_support(self, pool_stats): self.driver.common.update_volume_stats() self.assertTrue(self.driver.common.ecomUseSSL) @@ -3853,21 +3872,25 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): configuration.safe_get.return_value = 'ISCSIFAST' configuration.config_group = 'ISCSIFAST' - self.stubs.Set(emc_vmax_iscsi.EMCVMAXISCSIDriver, - 'smis_do_iscsi_discovery', - self.fake_do_iscsi_discovery) - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_iscsi.EMCVMAXISCSIDriver, + 'smis_do_iscsi_discovery', + self.fake_do_iscsi_discovery) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver + self.patcher = mock.patch( + 'oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + self.patcher.start() def create_fake_config_file_fast(self): @@ -3937,8 +3960,10 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): def fake_do_iscsi_discovery(self, volume): output = [] - item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80' - output.append(item) + properties = {} + properties['target_portal'] = '10.10.0.50:3260' + properties['target_iqn'] = 'iqn.1992-04.com.emc:50000973f006dd80' + output.append(properties) return output def fake_sleep(self, seconds): @@ -3950,7 +3975,11 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_capacities_associated_to_policy', - return_value=(1234, 1200)) + return_value=(1234, 1200, 1200, 1)) + @mock.patch.object( + emc_vmax_utils.EMCVMAXUtils, + 'get_pool_capacities', + return_value=(1234, 1200, 1200, 1)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_tier_policy_by_name', @@ -3967,7 +3996,8 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): mock_storage_system, mock_is_fast_enabled, mock_get_policy, - mock_capacity): + mock_pool_capacities, + mock_capacities_associated_to_policy): self.driver.get_volume_stats(True) @mock.patch.object( @@ -4231,6 +4261,11 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): self.driver.create_snapshot, self.data.test_volume) + @unittest.skip("Skip until bug #1578986 is fixed") + @mock.patch.object( + emc_vmax_utils.EMCVMAXUtils, + 'wait_for_job_complete', + return_value=(0, 'success')) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', @@ -4248,7 +4283,8 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_volume_from_same_size_meta_snapshot( - self, mock_volume_type, mock_sync_sv, mock_meta, mock_size): + self, mock_volume_type, mock_sync_sv, mock_meta, mock_size, + mock_wait): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common common.fast.is_volume_in_default_SG = mock.Mock(return_value=True) @@ -4493,15 +4529,15 @@ class EMCVMAXFCDriverNoFastTestCase(test.TestCase): configuration.safe_get.return_value = 'FCNoFAST' configuration.config_group = 'FCNoFAST' - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() @@ -4580,7 +4616,7 @@ class EMCVMAXFCDriverNoFastTestCase(test.TestCase): @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_pool_capacities', - return_value=(1234, 1200)) + return_value=(1234, 1200, 1200, 1)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'is_tiering_policy_enabled', @@ -5054,21 +5090,21 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase): self.create_fake_config_file_fast() self.addCleanup(self._cleanup) + self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path configuration.safe_get.return_value = 'FCFAST' configuration.config_group = 'FCFAST' - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) - + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() driver.common.conn = FakeEcomConnection() @@ -5152,7 +5188,11 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase): @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_capacities_associated_to_policy', - return_value=(1234, 1200)) + return_value=(1234, 1200, 1200, 1)) + @mock.patch.object( + emc_vmax_utils.EMCVMAXUtils, + 'get_pool_capacities', + return_value=(1234, 1200, 1200, 1)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_tier_policy_by_name', @@ -5169,7 +5209,8 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase): mock_storage_system, mock_is_fast_enabled, mock_get_policy, - mock_capacity): + mock_pool_capacities, + mock_capacities_associated_to_policy): self.driver.get_volume_stats(True) @mock.patch.object( @@ -5450,6 +5491,7 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase): self.driver.create_snapshot, self.data.test_volume) + @unittest.skip("Skip until bug #1578986 is fixed") @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', @@ -5710,6 +5752,7 @@ class EMCV3DriverTestCase(test.TestCase): self.data = EMCVMAXCommonData() self.data.storage_system = 'SYMMETRIX-+-000197200056' + self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') self.tempdir = tempfile.mkdtemp() super(EMCV3DriverTestCase, self).setUp() @@ -5721,18 +5764,22 @@ class EMCV3DriverTestCase(test.TestCase): def set_configuration(self): configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path - configuration.safe_get.return_value = 'V3' + configuration.safe_get.return_value = 3 configuration.config_group = 'V3' - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) + self.patcher = mock.patch( + 'oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + self.patcher.start() driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() @@ -6009,6 +6056,7 @@ class EMCV3DriverTestCase(test.TestCase): return_value=self.default_extraspec()) self.driver.delete_volume(self.data.test_volume_v3) + @unittest.skip("Skip until bug #1578986 is fixed") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', @@ -6027,6 +6075,8 @@ class EMCV3DriverTestCase(test.TestCase): common = self.driver.common common.provisionv3.utils.get_v3_default_sg_instance_name = mock.Mock( return_value=(None, None, self.data.default_sg_instance_name)) + common.utils.is_clone_licensed = ( + mock.Mock(return_value=True)) common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_snapshot(self.data.test_volume_v3) @@ -6045,6 +6095,7 @@ class EMCV3DriverTestCase(test.TestCase): return_value=self.default_extraspec()) self.driver.delete_snapshot(self.data.test_volume_v3) + @unittest.skip("Skip until bug #1578986 is fixed") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', @@ -6073,6 +6124,8 @@ class EMCV3DriverTestCase(test.TestCase): cloneVol['BlockSize'] = self.data.block_size cloneVol['host'] = self.data.fake_host_v3 common = self.driver.common + common.utils.is_clone_licensed = ( + mock.Mock(return_value=True)) common._initial_setup = mock.Mock( return_value=self.default_extraspec()) common._get_or_create_storage_group_v3 = mock.Mock( @@ -6201,8 +6254,8 @@ class EMCV3DriverTestCase(test.TestCase): 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_cgsnapshot_v3_success( - self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, - mock_rg): + self, _mock_volume_type, _mock_storage, _mock_cg, + _mock_members, mock_rg): provisionv3 = self.driver.common.provisionv3 provisionv3.create_group_replica = mock.Mock(return_value=(0, None)) self.driver.create_cgsnapshot( @@ -6385,6 +6438,8 @@ class EMCV3DriverTestCase(test.TestCase): self.data.test_volume['volume_name'] = "vmax-1234567" e = exception.VolumeBackendAPIException('CreateElementReplica Ex') common = self.driver.common + common.utils.is_clone_licensed = ( + mock.Mock(return_value=True)) volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} common._create_v3_volume = ( @@ -6405,6 +6460,9 @@ class EMCV3DriverTestCase(test.TestCase): targetInstance = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) deviceID = targetInstance['DeviceID'] + common._delete_from_pool_v3(storageConfigService, targetInstance, + targetInstance['Name'], deviceID, + extraSpecs) common._delete_from_pool_v3.assert_called_with(storageConfigService, targetInstance, targetInstance['Name'], @@ -6516,18 +6574,18 @@ class EMCV2MultiPoolDriverTestCase(test.TestCase): configuration.cinder_emc_config_file = self.config_file_path configuration.config_group = 'MULTI_POOL' - self.stubs.Set(emc_vmax_iscsi.EMCVMAXISCSIDriver, - 'smis_do_iscsi_discovery', - self.fake_do_iscsi_discovery) - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_iscsi.EMCVMAXISCSIDriver, + 'smis_do_iscsi_discovery', + self.fake_do_iscsi_discovery) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() @@ -6618,8 +6676,10 @@ class EMCV2MultiPoolDriverTestCase(test.TestCase): def fake_do_iscsi_discovery(self, volume): output = [] - item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80' - output.append(item) + properties = {} + properties['target_portal'] = '10.10.0.50:3260' + properties['target_iqn'] = 'iqn.1992-04.com.emc:50000973f006dd80' + output.append(properties) return output def fake_sleep(self, seconds): @@ -6803,15 +6863,15 @@ class EMCV3MultiSloDriverTestCase(test.TestCase): configuration.cinder_emc_config_file = self.config_file_path configuration.config_group = 'MULTI_SLO_V3' - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() @@ -7110,16 +7170,15 @@ class EMCV2MultiPoolDriverMultipleEcomsTestCase(test.TestCase): configuration.safe_get.return_value = 'MULTI_ECOM' configuration.config_group = 'MULTI_ECOM' - self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) instancename = FakeCIMInstanceName() - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.stubs.Set(time, 'sleep', - self.fake_sleep) - self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', - self.fake_is_v3) - + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(time, 'sleep', + self.fake_sleep) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() driver.common.conn = FakeEcomConnection() @@ -7821,6 +7880,7 @@ class EMCVMAXFCTest(test.TestCase): self.assertEqual(0, len(mvInstances)) +@ddt.ddt class EMCVMAXUtilsTest(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() @@ -7884,6 +7944,124 @@ class EMCVMAXUtilsTest(test.TestCase): emc_vmax_provision.COPY_ON_WRITE, extraSpecs) self.assertIsNotNone(rsdInstance) + def getinstance_capability(self, reptypes): + repservicecap = CIM_ReplicationServiceCapabilities() + repservicecap['CreationClassName'] = ( + 'CIM_ReplicationServiceCapabilities') + + classcimproperty = Fake_CIMProperty() + supportedReplicationTypes = ( + classcimproperty.fake_getSupportedReplicationTypesCIMProperty( + reptypes)) + properties = {u'SupportedReplicationTypes': supportedReplicationTypes} + repservicecap.properties = properties + return repservicecap + + @ddt.data(('V3', True), ('V3_ASYNC', True), ('V3_SYNC', True), + ('V2', False)) + @ddt.unpack + def test_is_clone_licensed(self, reptypes, isV3): + conn = FakeEcomConnection() + capabilityInstanceName = self.getinstance_capability(reptypes) + conn.GetInstance = mock.Mock( + return_value=capabilityInstanceName) + self.assertTrue(self.driver.utils.is_clone_licensed( + conn, capabilityInstanceName, isV3)) + + def test_is_clone_licensed_false(self): + conn = FakeEcomConnection() + isV3 = True + reptypes = None + capabilityInstanceName = self.getinstance_capability(reptypes) + conn.GetInstance = mock.Mock( + return_value=capabilityInstanceName) + self.assertFalse(self.driver.utils.is_clone_licensed( + conn, capabilityInstanceName, isV3)) + + def test_get_pool_capacities(self): + conn = FakeEcomConnection() + + (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, + array_max_over_subscription) = ( + self.driver.utils.get_pool_capacities( + conn, self.data.poolname, self.data.storage_system)) + self.assertEqual(931, total_capacity_gb) + self.assertEqual(465, free_capacity_gb) + self.assertEqual(465, provisioned_capacity_gb) + self.assertEqual(1.5, array_max_over_subscription) + + def test_get_pool_capacities_none_array_max_oversubscription(self): + conn = FakeEcomConnection() + null_emcmaxsubscriptionpercent = { + 'TotalManagedSpace': '1000000000000', + 'ElementName': 'gold', + 'RemainingManagedSpace': '500000000000', + 'SystemName': 'SYMMETRIX+000195900551', + 'CreationClassName': 'Symm_VirtualProvisioningPool', + 'EMCSubscribedCapacity': '500000000000'} + conn.GetInstance = mock.Mock( + return_value=null_emcmaxsubscriptionpercent) + (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, + array_max_over_subscription) = ( + self.driver.utils.get_pool_capacities( + conn, self.data.poolname, self.data.storage_system)) + self.assertEqual(65534, array_max_over_subscription) + + def test_get_ratio_from_max_sub_per(self): + max_subscription_percent_float = ( + self.driver.utils.get_ratio_from_max_sub_per(150)) + self.assertEqual(1.5, max_subscription_percent_float) + + def test_get_ratio_from_max_sub_per_none_value(self): + max_subscription_percent_float = ( + self.driver.utils.get_ratio_from_max_sub_per(str(0))) + self.assertIsNone(max_subscription_percent_float) + + def test_update_storage_QOS(self): + conn = FakeEcomConnection() + pywbem = mock.Mock() + pywbem.cim_obj = mock.Mock() + pywbem.cim_obj.CIMInstance = mock.Mock() + emc_vmax_utils.pywbem = pywbem + + extraSpecs = {'volume_backend_name': 'V3_BE', + 'qos': { + 'maxIOPS': '6000', + 'maxMBPS': '6000', + 'DistributionType': 'Always' + }} + + storageGroupInstanceName = { + 'CreationClassName': 'CIM_DeviceMaskingGroup', + 'EMCMaximumIO': 6000, + 'EMCMaximumBandwidth': 5000, + 'EMCMaxIODynamicDistributionType': 1 + + } + modifiedstorageGroupInstance = { + 'CreationClassName': 'CIM_DeviceMaskingGroup', + 'EMCMaximumIO': 6000, + 'EMCMaximumBandwidth': 6000, + 'EMCMaxIODynamicDistributionType': 1 + + } + conn.ModifyInstance = ( + mock.Mock(return_value=modifiedstorageGroupInstance)) + self.driver.common.utils.update_storagegroup_qos( + conn, storageGroupInstanceName, extraSpecs) + + modifiedInstance = self.driver.common.utils.update_storagegroup_qos( + conn, storageGroupInstanceName, extraSpecs) + self.assertIsNotNone(modifiedInstance) + self.assertEqual( + 6000, modifiedInstance['EMCMaximumIO']) + self.assertEqual( + 6000, modifiedInstance['EMCMaximumBandwidth']) + self.assertEqual( + 1, modifiedInstance['EMCMaxIODynamicDistributionType']) + self.assertEqual('CIM_DeviceMaskingGroup', + modifiedInstance['CreationClassName']) + class EMCVMAXCommonTest(test.TestCase): def setUp(self): @@ -8017,3 +8195,56 @@ class EMCVMAXProvisionTest(test.TestCase): masking.provision.add_members_to_masking_group.assert_called_with( conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs) + + +class EMCVMAXISCSITest(test.TestCase): + def setUp(self): + self.data = EMCVMAXCommonData() + + super(EMCVMAXISCSITest, self).setUp() + + configuration = mock.Mock() + configuration.safe_get.return_value = 'iSCSITests' + configuration.config_group = 'iSCSITests' + self.mock_object(emc_vmax_iscsi.EMCVMAXISCSIDriver, + 'smis_do_iscsi_discovery', + self.fake_do_iscsi_discovery) + emc_vmax_common.EMCVMAXCommon._gather_info = mock.Mock() + driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) + driver.db = FakeDB() + self.driver = driver + + def fake_do_iscsi_discovery(self, volume): + output = [] + properties = {} + properties['target_portal'] = '10.10.0.50:3260' + properties['target_iqn'] = 'iqn.1992-04.com.emc:50000973f006dd80' + output.append(properties) + properties = {} + properties['target_portal'] = '10.10.0.51:3260' + properties['target_iqn'] = 'iqn.1992-04.com.emc:50000973f006dd81' + output.append(properties) + return output + + def test_parse_target_list(self): + targets = ["10.10.10.31:3260,0 iqn.1f:29.ID2", + "10.10.10.32:3260,0 iqn.2f:29.ID2"] + out_targets = self.driver._parse_target_list(targets) + self.assertEqual('10.10.10.31:3260', out_targets[0]['target_portal']) + self.assertEqual('iqn.1f:29.ID2', out_targets[0]['target_iqn']) + self.assertEqual('10.10.10.32:3260', out_targets[1]['target_portal']) + self.assertEqual('iqn.2f:29.ID2', out_targets[1]['target_iqn']) + + def test_smis_get_iscsi_properties(self): + self.driver.iscsi_ip_addresses = ['10.10.0.50', '10.10.0.51'] + device_info = {'hostlunid': 1} + self.driver.common.find_device_number = ( + mock.Mock(return_value=device_info)) + properties = self.driver.smis_get_iscsi_properties( + self.data.test_volume, self.data.connector, True) + self.assertEqual([1, 1], properties['target_luns']) + self.assertEqual(['iqn.1992-04.com.emc:50000973f006dd80', + 'iqn.1992-04.com.emc:50000973f006dd81'], + properties['target_iqns']) + self.assertEqual(['10.10.0.50:3260', '10.10.0.51:3260'], + properties['target_portals']) diff --git a/cinder/tests/unit/test_emc_xtremio.py b/cinder/tests/unit/volume/drivers/emc/test_emc_xtremio.py similarity index 87% rename from cinder/tests/unit/test_emc_xtremio.py rename to cinder/tests/unit/volume/drivers/emc/test_emc_xtremio.py index e8ec06501..66dec560f 100644 --- a/cinder/tests/unit/test_emc_xtremio.py +++ b/cinder/tests/unit/volume/drivers/emc/test_emc_xtremio.py @@ -495,6 +495,33 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): self.driver.client.handle_errors, response, '', '') + def test_update_migrated_volume(self, req): + original = self.data.test_volume + new = self.data.test_volume2 + update = (self.driver. + update_migrated_volume({}, + original, new, 'available')) + req.assert_called_once_with('volumes', 'PUT', + {'name': original['id']}, new['id'], + None, 'v2') + self.assertEqual({'_name_id': None, + 'provider_location': None}, update) + + def test_update_migrated_volume_failed_rename(self, req): + req.side_effect = exception.VolumeBackendAPIException( + data='failed rename') + original = self.data.test_volume + new = copy.deepcopy(self.data.test_volume2) + fake_provider = '__provider' + new['provider_location'] = fake_provider + new['_name_id'] = None + update = (self.driver. + update_migrated_volume({}, + original, new, 'available')) + self.assertEqual({'_name_id': new['id'], + 'provider_location': fake_provider}, + update) + # ##### Connection ##### def test_no_portals_configured(self, req): req.side_effect = xms_request @@ -640,11 +667,13 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, - 'vol-size': '3', + 'vol-size': '1000000', }, } ref_vol = {"source-name": "unmanaged1"} - self.driver.manage_existing_get_size(self.data.test_volume, ref_vol) + size = self.driver.manage_existing_get_size(self.data.test_volume, + ref_vol) + self.assertEqual(1, size) def test_manage_volume_size_invalid_input(self, req): self.assertRaises(exception.ManageExistingInvalidReference, @@ -673,6 +702,65 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): self.assertRaises(exception.VolumeNotFound, self.driver.unmanage, self.data.test_volume2) + def test_manage_snapshot(self, req): + req.side_effect = xms_request + vol_uid = self.data.test_snapshot.volume_id + xms_data['volumes'] = {1: {'name': vol_uid, + 'index': 1, + 'vol-size': '3', + }, + 2: {'name': 'unmanaged', + 'index': 2, + 'ancestor-vol-id': ['', vol_uid, 1], + 'vol-size': '3'} + } + ref_vol = {"source-name": "unmanaged"} + self.driver.manage_existing_snapshot(self.data.test_snapshot, ref_vol) + + def test_get_manage_snapshot_size(self, req): + req.side_effect = xms_request + vol_uid = self.data.test_snapshot.volume_id + xms_data['volumes'] = {1: {'name': vol_uid, + 'index': 1, + 'vol-size': '3', + }, + 2: {'name': 'unmanaged', + 'index': 2, + 'ancestor-vol-id': ['', vol_uid, 1], + 'vol-size': '3'} + } + ref_vol = {"source-name": "unmanaged"} + self.driver.manage_existing_snapshot_get_size(self.data.test_snapshot, + ref_vol) + + def test_manage_snapshot_invalid_snapshot(self, req): + req.side_effect = xms_request + xms_data['volumes'] = {1: {'name': 'unmanaged1', + 'index': 1, + 'vol-size': '3', + 'ancestor-vol-id': []} + } + ref_vol = {"source-name": "unmanaged1"} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_snapshot, + self.data.test_snapshot, ref_vol) + + def test_unmanage_snapshot(self, req): + req.side_effect = xms_request + vol_uid = self.data.test_snapshot.volume_id + xms_data['volumes'] = {1: {'name': vol_uid, + 'index': 1, + 'vol-size': '3', + }, + 2: {'name': 'unmanaged', + 'index': 2, + 'ancestor-vol-id': ['', vol_uid, 1], + 'vol-size': '3'} + } + ref_vol = {"source-name": "unmanaged"} + self.driver.manage_existing_snapshot(self.data.test_snapshot, ref_vol) + self.driver.unmanage_snapshot(self.data.test_snapshot) + # ##### Consistancy Groups ##### @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_create(self, get_all_for_cgsnapshot, req): @@ -702,6 +790,24 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): remove_volumes=[d.test_volume2]) self.assertEqual(1, len(xms_data['consistency-group-volumes'])) + @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') + def test_create_cg(self, get_all_for_cgsnapshot, req): + req.side_effect = xms_request + d = self.data + snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) + snapshot_obj.consistencygroup_id = d.group['id'] + get_all_for_cgsnapshot.return_value = [snapshot_obj] + self.driver.create_consistencygroup(d.context, d.group) + self.driver.update_consistencygroup(d.context, d.group, + add_volumes=[d.test_volume, + d.test_volume2]) + self.driver.db = mock.Mock() + (self.driver.db. + volume_get_all_by_group.return_value) = [mock.MagicMock()] + res = self.driver.create_cgsnapshot(d.context, d.cgsnapshot, + [snapshot_obj]) + self.assertEqual((None, None), res) + @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_delete(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request @@ -716,7 +822,7 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): self.driver.db = mock.Mock() (self.driver.db. volume_get_all_by_group.return_value) = [mock.MagicMock()] - self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) + self.driver.create_cgsnapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.driver.delete_consistencygroup(d.context, d.group, []) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') @@ -739,7 +845,9 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, []) + res = self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, + [snapshot_obj]) + self.assertEqual((None, None), res) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_from_src_snapshot(self, get_all_for_cgsnapshot, req): @@ -767,9 +875,11 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase): snapshot1 = (fake_snapshot .fake_snapshot_obj (d.context, volume_id=d.test_volume['id'])) - self.driver.create_consistencygroup_from_src(d.context, cg_obj, - [new_vol1], - d.cgsnapshot, [snapshot1]) + res = self.driver.create_consistencygroup_from_src(d.context, cg_obj, + [new_vol1], + d.cgsnapshot, + [snapshot1]) + self.assertEqual((None, None), res) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_from_src_cg(self, get_all_for_cgsnapshot, req): diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py b/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py index 96d999868..3a3a25409 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py @@ -27,6 +27,7 @@ from cinder.tests.unit.volume.drivers.emc.vnx import utils from cinder.volume.drivers.emc.vnx import adapter from cinder.volume.drivers.emc.vnx import client from cinder.volume.drivers.emc.vnx import common +from cinder.volume.drivers.emc.vnx import driver from cinder.volume.drivers.emc.vnx import utils as vnx_utils SYMBOL_TYPE = '_type' @@ -365,7 +366,6 @@ def _build_client(): def patch_client(func): @six.wraps(func) - @utils.patch_looping_call def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) @@ -388,7 +388,6 @@ PROTOCOL_MAPPING = { def patch_adapter_init(protocol): def inner_patch_adapter(func): @six.wraps(func) - @utils.patch_looping_call def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) @@ -406,12 +405,12 @@ def _patch_adapter_prop(adapter, client): adapter.serial_number = client.get_serial() except KeyError: adapter.serial_number = 'faked_serial_number' + adapter.VERSION = driver.EMCVNXDriver.VERSION def patch_adapter(protocol): def inner_patch_adapter(func): @six.wraps(func) - @utils.patch_looping_call def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py index 617c57440..47724f71a 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py @@ -215,8 +215,8 @@ class TestCommonAdapter(test.TestCase): self.assertEqual(2, len(pool_stats)) for stat in pool_stats: self.assertTrue(stat['fast_cache_enabled']) - self.assertTrue(stat['pool_name'] in [pools[0].name, - pools[1].name]) + self.assertIn(stat['pool_name'], [pools[0].name, + pools[1].name]) self.assertFalse(stat['replication_enabled']) self.assertEqual([], stat['replication_targets']) @@ -270,9 +270,6 @@ class TestCommonAdapter(test.TestCase): def test_update_volume_stats(self, vnx_common, mocked): with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'): stats = vnx_common.update_volume_stats() - self.assertEqual( - adapter.CommonAdapter.VERSION, stats['driver_version']) - self.assertEqual(adapter.CommonAdapter.VENDOR, stats['vendor_name']) pools_stats = stats['pools'] for stat in pools_stats: self.assertFalse(stat['replication_enabled']) diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py index bb0b1fb7c..39f5ebd2e 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import unittest + from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \ @@ -45,9 +47,12 @@ class TestCondition(test.TestCase): class TestClient(test.TestCase): def setUp(self): super(TestClient, self).setUp() + self.origin_timeout = vnx_common.DEFAULT_TIMEOUT + vnx_common.DEFAULT_TIMEOUT = 0 def tearDown(self): super(TestClient, self).tearDown() + vnx_common.DEFAULT_TIMEOUT = self.origin_timeout @res_mock.patch_client def test_create_lun(self, client, mocked): @@ -100,6 +105,7 @@ class TestClient(test.TestCase): lun = client.vnx.get_lun() lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH) + @unittest.skip("Skip until bug #1578986 is fixed") @utils.patch_sleep @res_mock.patch_client def test_migrate_lun_with_retry(self, client, mocked, mock_sleep): @@ -129,16 +135,14 @@ class TestClient(test.TestCase): r = client.session_finished(lun) self.assertTrue(r) - @utils.patch_sleep @res_mock.patch_client - def test_migrate_lun_error(self, client, mocked, mock_sleep): + def test_migrate_lun_error(self, client, mocked): lun = client.vnx.get_lun() self.assertRaises(storops_ex.VNXMigrationError, client.migrate_lun, src_id=4, dst_id=5) lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH) - mock_sleep.assert_not_called() @res_mock.patch_client def test_verify_migration(self, client, mocked): @@ -237,9 +241,10 @@ class TestClient(test.TestCase): def test_expand_lun_already_expanded(self, client, _ignore): client.expand_lun('lun', 10) - @utils.patch_no_sleep + @unittest.skip("Skip until bug #1578986 is fixed") + @utils.patch_sleep @res_mock.patch_client - def test_expand_lun_not_ops_ready(self, client, _ignore): + def test_expand_lun_not_ops_ready(self, client, _ignore, sleep_mock): self.assertRaises(storops_ex.VNXLunPreparingError, client.expand_lun, 'lun', 10) lun = client.vnx.get_lun() @@ -290,19 +295,16 @@ class TestClient(test.TestCase): def test_modify_snapshot(self, client, mocked): client.modify_snapshot('snap_name', True, True) - @utils.patch_no_sleep @res_mock.patch_client def test_create_cg_snapshot(self, client, mocked): snap = client.create_cg_snapshot('cg_snap_name', 'cg_name') self.assertIsNotNone(snap) - @utils.patch_no_sleep @res_mock.patch_client def test_create_cg_snapshot_already_existed(self, client, mocked): snap = client.create_cg_snapshot('cg_snap_name', 'cg_name') self.assertIsNotNone(snap) - @utils.patch_no_sleep @res_mock.patch_client def test_delete_cg_snapshot(self, client, mocked): client.delete_cg_snapshot(cg_snap_name='test_snap') diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py index 47483833b..5d8e8f561 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py @@ -139,8 +139,8 @@ class TestExtraSpecs(test.TestCase): def test_get_raw_data(self): spec_obj = common.ExtraSpecs({'key1': 'value1'}) - self.assertTrue('key1' in spec_obj) - self.assertFalse('key2' in spec_obj) + self.assertIn('key1', spec_obj) + self.assertNotIn('key2', spec_obj) self.assertEqual('value1', spec_obj['key1']) @res_mock.mock_storage_resources diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py index 53dc1df4e..0c602fe8d 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py @@ -47,11 +47,13 @@ class TestEMCVNXDriver(test.TestCase): _driver = self._get_driver('iscsi') driver_name = str(_driver.adapter) self.assertIn('ISCSIAdapter', driver_name) + self.assertEqual(driver.EMCVNXDriver.VERSION, _driver.VERSION) def test_init_fc_driver(self): _driver = self._get_driver('FC') driver_name = str(_driver.adapter) self.assertIn('FCAdapter', driver_name) + self.assertEqual(driver.EMCVNXDriver.VERSION, _driver.VERSION) def test_create_volume(self): _driver = self._get_driver('iscsi') diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py index 74cd18f26..9fa244687 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py @@ -165,7 +165,7 @@ class TestTaskflow(test.TestCase): store=store_spec) engine.run() snap_name = engine.storage.fetch('new_cg_snap_name') - self.assertTrue(isinstance(snap_name, res_mock.StorageObjectMock)) + self.assertIsInstance(snap_name, res_mock.StorageObjectMock) @res_mock.patch_client def test_create_cg_snapshot_task_revert(self, client, mocked): diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py index 7f4aa08fb..9d3547756 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py @@ -36,36 +36,33 @@ class TestUtils(test.TestCase): super(TestUtils, self).tearDown() common.DEFAULT_TIMEOUT = self.origin_timeout - @ut_utils.patch_looping_call def test_wait_until(self): - mock_testmethod = mock.Mock(side_effect=[False, True]) - utils.wait_until(mock_testmethod) - mock_testmethod.assert_has_calls([mock.call(), mock.call()]) + mock_testmethod = mock.Mock(return_value=True) + utils.wait_until(mock_testmethod, interval=0) + mock_testmethod.assert_has_calls([mock.call()]) - @ut_utils.patch_looping_call def test_wait_until_with_exception(self): - mock_testmethod = mock.Mock(side_effect=[ - False, storops_ex.VNXAttachSnapError('Unknown error')]) + mock_testmethod = mock.Mock( + side_effect=storops_ex.VNXAttachSnapError('Unknown error')) mock_testmethod.__name__ = 'test_method' self.assertRaises(storops_ex.VNXAttachSnapError, utils.wait_until, mock_testmethod, - timeout=20, + timeout=1, + interval=0, reraise_arbiter=( lambda ex: not isinstance( ex, storops_ex.VNXCreateLunError))) - mock_testmethod.assert_has_calls([mock.call(), mock.call()]) + mock_testmethod.assert_has_calls([mock.call()]) - @ut_utils.patch_looping_call def test_wait_until_with_params(self): - mock_testmethod = mock.Mock(side_effect=[False, True]) - mock_testmethod.__name__ = 'test_method' + mock_testmethod = mock.Mock(return_value=True) utils.wait_until(mock_testmethod, param1=1, param2='test') mock_testmethod.assert_has_calls( - [mock.call(param1=1, param2='test'), - mock.call(param1=1, param2='test')]) + [mock.call(param1=1, param2='test')]) + mock_testmethod.assert_has_calls([mock.call(param1=1, param2='test')]) @res_mock.mock_driver_input def test_retype_need_migration_when_host_changed(self, driver_in): diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/utils.py b/cinder/tests/unit/volume/drivers/emc/vnx/utils.py index 508d9b754..7b208fd4f 100644 --- a/cinder/tests/unit/volume/drivers/emc/vnx/utils.py +++ b/cinder/tests/unit/volume/drivers/emc/vnx/utils.py @@ -20,24 +20,16 @@ import mock import six import yaml -from cinder.tests.unit import utils from cinder.volume.drivers.emc.vnx import client from cinder.volume.drivers.emc.vnx import common -patch_looping_call = mock.patch( - 'oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - patch_sleep = mock.patch('time.sleep') patch_vnxsystem = mock.patch('storops.VNXSystem') -patch_no_sleep = mock.patch('time.sleep', new=lambda x: None) - - def load_yaml(file_name): yaml_file = '{}/{}'.format(path.dirname( path.abspath(__file__)), file_name) diff --git a/cinder/tests/unit/volume/drivers/hitachi/__init__.py b/cinder/tests/unit/volume/drivers/hitachi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_hitachi_hbsd_horcm_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py similarity index 100% rename from cinder/tests/unit/test_hitachi_hbsd_horcm_fc.py rename to cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py diff --git a/cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py similarity index 100% rename from cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py rename to cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py diff --git a/cinder/tests/unit/test_hitachi_hbsd_snm2_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py similarity index 100% rename from cinder/tests/unit/test_hitachi_hbsd_snm2_iscsi.py rename to cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py new file mode 100644 index 000000000..ced2469d9 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py @@ -0,0 +1,874 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +import os +import paramiko +import time + +from oslo_concurrency import processutils as putils + +from cinder import exception +from cinder import test +from cinder import utils +from cinder.volume.drivers.hitachi import hnas_backend + + +evsfs_list = "\n\ +FS ID FS Label FS Permanent ID EVS ID EVS Label\n\ +----- ----------- ------------------ ------ ---------\n\ + 1026 gold 0xaadee0e035cfc0b7 1 EVS-Manila\n\ + 1029 test_hdp 0xaadee09634acfcac 1 EVS-Manila\n\ + 1030 fs-cinder 0xaadfcf742fba644e 2 EVS-Cinder\n\ + 1031 cinder2 0xaadfcf7e0769a6bc 3 EVS-Test\n\ + 1024 fs02-husvm 0xaac8715e2e9406cd 3 EVS-Test\n\ +\n" + +cluster_getmac = "cluster MAC: 83-68-96-AA-DA-5D" + +version = "\n\ +Model: HNAS 4040 \n\n\ +Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\n\ +Hardware: NAS Platform (M2SEKW1339109) \n\n\ +board MMB1 \n\ +mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\n\ +board MFB1 \n\ +mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \ + RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \ + WD v00E2 DI v001A FC v0002 \n\ +Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\n\ +board MCP \n\ +Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\ +\n" + +evsipaddr = "\n\ +EVS Type Label IP Address Mask Port \n\ +---------- --------------- ------------------ --------------- ------\n\ +admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\ +admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\ +evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ +evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ +evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\ +\n" + +df_f = "\n\ +ID Label EVS Size Used Snapshots Deduped Avail \ +Thin ThinSize ThinAvail FS Type\n\ +---- ---------- --- ------ ------------ --------- ------- ------------ \ +---- -------- --------- --------------------\n\ +1025 fs-cinder 2 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ + No 32 KB,WFS-2,128 DSBs\n\ +\n" + +nfs_export = "\n\ +Export name: /export01-husvm \n\ +Export path: /export01-husvm \n\ +File system label: fs-cinder \n\ +File system size: 250 GB \n\ +File system free space: 228 GB \n\ +File system state: \n\ +formatted = Yes \n\ +mounted = Yes \n\ +failed = No \n\ +thin provisioned = No \n\ +Access snapshots: Yes \n\ +Display snapshots: Yes \n\ +Read Caching: Disabled \n\ +Disaster recovery setting: \n\ +Recovered = No \n\ +Transfer setting = Use file system default \n\n\ +Export configuration: \n\ +127.0.0.1 \n\ +\n" + +iscsi_one_target = "\n\ +Alias : cinder-default \n\ +Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\ +Comment : \n\ +Secret : pxr6U37LZZJBoMc \n\ +Authentication : Enabled \n\ +Logical units : No logical units. \n\ +\n\ + LUN Logical Unit \n\ + ---- -------------------------------- \n\ + 0 cinder-lu \n\ + 1 volume-99da7ae7-1e7f-4d57-8bf... \n\ +\n\ +Access configuration: \n\ +" + +df_f_single_evs = "\n\ +ID Label Size Used Snapshots Deduped Avail \ +Thin ThinSize ThinAvail FS Type\n\ +---- ---------- ------ ------------ --------- ------- ------------ \ +---- -------- --------- --------------------\n\ +1025 fs-cinder 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ + No 32 KB,WFS-2,128 DSBs\n\ +\n" + +nfs_export_tb = "\n\ +Export name: /export01-husvm \n\ +Export path: /export01-husvm \n\ +File system label: fs-cinder \n\ +File system size: 250 TB \n\ +File system free space: 228 TB \n\ +\n" + +nfs_export_not_available = "\n\ +Export name: /export01-husvm \n\ +Export path: /export01-husvm \n\ +File system label: fs-cinder \n\ + *** not available *** \n\ +\n" + +evs_list = "\n\ +Node EVS ID Type Label Enabled Status IP Address Port \n\ +---- ------ ------- --------------- ------- ------ ------------------- ---- \n\ + 1 Cluster hnas4040 Yes Online 192.0.2.200 eth1 \n\ + 1 0 Admin hnas4040 Yes Online 192.0.2.2 eth1 \n\ + 172.24.44.15 eth0 \n\ + 172.24.49.101 ag2 \n\ + 1 1 Service EVS-Manila Yes Online 172.24.49.32 ag2 \n\ + 172.24.48.32 ag4 \n\ + 1 2 Service EVS-Cinder Yes Online 172.24.49.21 ag2 \n\ + 1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\ +\n" + +iscsilu_list = "Name : cinder-lu \n\ +Comment: \n\ +Path : /.cinder/cinder-lu.iscsi \n\ +Size : 2 GB \n\ +File System : fs-cinder \n\ +File System Mounted : YES \n\ +Logical Unit Mounted: No" + +iscsilu_list_tb = "Name : test-lu \n\ +Comment: \n\ +Path : /.cinder/test-lu.iscsi \n\ +Size : 2 TB \n\ +File System : fs-cinder \n\ +File System Mounted : YES \n\ +Logical Unit Mounted: No" + +add_targetsecret = "Target created successfully." + +iscsi_target_list = "\n\ +Alias : cinder-GoldIsh\n\ +Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\ +Comment :\n\ +Secret : None\n\ +Authentication : Enabled\n\ +Logical units : No logical units.\n\ +Access configuration :\n\ +\n\ +Alias : cinder-default\n\ +Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\ +Comment :\n\ +Secret : pxr6U37LZZJBoMc\n\ +Authentication : Enabled\n\ +Logical units : Logical units :\n\ +\n\ + LUN Logical Unit\n\ + ---- --------------------------------\n\ + 0 cinder-lu\n\ + 1 volume-99da7ae7-1e7f-4d57-8bf...\n\ +\n\ +Access configuration :\n\ +" + +backend_opts = {'mgmt_ip0': '0.0.0.0', + 'cluster_admin_ip0': None, + 'ssh_port': '22', + 'username': 'supervisor', + 'password': 'supervisor', + 'ssh_private_key': 'test_key'} + +target_chap_disable = "\n\ +Alias : cinder-default \n\ +Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\ +Comment : \n\ +Secret : \n\ +Authentication : Disabled \n\ +Logical units : No logical units. \n\ +\n\ + LUN Logical Unit \n\ + ---- -------------------------------- \n\ + 0 cinder-lu \n\ + 1 volume-99da7ae7-1e7f-4d57-8bf... \n\ +\n\ +Access configuration: \n\ +" + +file_clone_stat = "Clone: /nfs_cinder/cinder-lu \n\ + SnapshotFile: FileHandle[00000000004010000d20116826ffffffffffffff] \n\ +\n\ + SnapshotFile: FileHandle[00000000004029000d81f26826ffffffffffffff] \n\ +" + +file_clone_stat_snap_file1 = "\ +FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\ +References: \n\ + Clone: /nfs_cinder/cinder-lu \n\ + Clone: /nfs_cinder/snapshot-lu-1 \n\ + Clone: /nfs_cinder/snapshot-lu-2 \n\ +" + +file_clone_stat_snap_file2 = "\ +FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\ +References: \n\ + Clone: /nfs_cinder/volume-not-used \n\ + Clone: /nfs_cinder/snapshot-1 \n\ + Clone: /nfs_cinder/snapshot-2 \n\ +" + +not_a_clone = "\ +file-clone-stat: failed to get predecessor snapshot-files: File is not a clone" + + +class HDSHNASBackendTest(test.TestCase): + + def __init__(self, *args, **kwargs): + super(HDSHNASBackendTest, self).__init__(*args, **kwargs) + + def setUp(self): + super(HDSHNASBackendTest, self).setUp() + self.hnas_backend = hnas_backend.HNASSSHBackend(backend_opts) + + def test_run_cmd(self): + self.mock_object(os.path, 'isfile', + mock.Mock(return_value=True)) + self.mock_object(utils, 'execute') + self.mock_object(time, 'sleep') + self.mock_object(paramiko, 'SSHClient') + self.mock_object(paramiko.RSAKey, 'from_private_key_file') + self.mock_object(putils, 'ssh_execute', + mock.Mock(return_value=(df_f, ''))) + + out, err = self.hnas_backend._run_cmd('ssh', '0.0.0.0', + 'supervisor', 'supervisor', + 'df', '-a') + + self.assertIn('fs-cinder', out) + self.assertIn('WFS-2,128 DSBs', out) + + def test_run_cmd_retry_exception(self): + self.hnas_backend.cluster_admin_ip0 = '172.24.44.11' + + exceptions = [putils.ProcessExecutionError(stderr='Connection reset'), + putils.ProcessExecutionError(stderr='Failed to establish' + ' SSC connection'), + putils.ProcessExecutionError(stderr='Connection reset'), + putils.ProcessExecutionError(stderr='Connection reset'), + putils.ProcessExecutionError(stderr='Connection reset')] + + self.mock_object(os.path, 'isfile', + mock.Mock(return_value=True)) + self.mock_object(utils, 'execute') + self.mock_object(time, 'sleep') + self.mock_object(paramiko, 'SSHClient') + self.mock_object(paramiko.RSAKey, 'from_private_key_file') + self.mock_object(putils, 'ssh_execute', + mock.Mock(side_effect=exceptions)) + + self.assertRaises(exception.HNASConnError, self.hnas_backend._run_cmd, + 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', + '-a') + + def test_run_cmd_exception_without_retry(self): + self.mock_object(os.path, 'isfile', + mock.Mock(return_value=True)) + self.mock_object(utils, 'execute') + self.mock_object(time, 'sleep') + self.mock_object(paramiko, 'SSHClient') + self.mock_object(paramiko.RSAKey, 'from_private_key_file') + self.mock_object(putils, 'ssh_execute', + mock.Mock(side_effect=putils.ProcessExecutionError + (stderr='Error'))) + + self.assertRaises(putils.ProcessExecutionError, + self.hnas_backend._run_cmd, 'ssh', '0.0.0.0', + 'supervisor', 'supervisor', 'df', '-a') + + def test_get_targets_empty_list(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=('No targets', ''))) + + out = self.hnas_backend._get_targets('2') + self.assertEqual([], out) + + def test_get_targets_not_found(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(iscsi_target_list, ''))) + + out = self.hnas_backend._get_targets('2', 'fake-volume') + self.assertEqual([], out) + + def test__get_unused_luid_number_0(self): + tgt_info = { + 'alias': 'cinder-default', + 'secret': 'pxr6U37LZZJBoMc', + 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default', + 'lus': [ + {'id': '1', + 'name': 'cinder-lu2'}, + {'id': '2', + 'name': 'volume-test2'} + ], + 'auth': 'Enabled' + } + + out = self.hnas_backend._get_unused_luid(tgt_info) + + self.assertEqual(0, out) + + def test__get_unused_no_luns(self): + tgt_info = { + 'alias': 'cinder-default', + 'secret': 'pxr6U37LZZJBoMc', + 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default', + 'lus': [], + 'auth': 'Enabled' + } + + out = self.hnas_backend._get_unused_luid(tgt_info) + + self.assertEqual(0, out) + + def test_get_version(self): + expected_out = { + 'hardware': 'NAS Platform (M2SEKW1339109)', + 'mac': '83-68-96-AA-DA-5D', + 'version': '11.2.3319.14', + 'model': 'HNAS 4040', + 'serial': 'B1339745' + } + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[ + (cluster_getmac, ''), + (version, '')])) + + out = self.hnas_backend.get_version() + + self.assertEqual(expected_out, out) + + def test_get_evs(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + out = self.hnas_backend.get_evs('fs-cinder') + + self.assertEqual('2', out) + + def test_get_export_list(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(nfs_export, ''), + (evsfs_list, ''), + (evs_list, '')])) + + out = self.hnas_backend.get_export_list() + + self.assertEqual('fs-cinder', out[0]['fs']) + self.assertEqual(250.0, out[0]['size']) + self.assertEqual(228.0, out[0]['free']) + self.assertEqual('/export01-husvm', out[0]['path']) + + def test_get_export_list_data_not_available(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(nfs_export_not_available, ''), + (evsfs_list, ''), + (evs_list, '')])) + + out = self.hnas_backend.get_export_list() + + self.assertEqual('fs-cinder', out[0]['fs']) + self.assertEqual('/export01-husvm', out[0]['path']) + self.assertEqual(-1, out[0]['size']) + self.assertEqual(-1, out[0]['free']) + + def test_get_export_list_tb(self): + size = float(250 * 1024) + free = float(228 * 1024) + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(nfs_export_tb, ''), + (evsfs_list, ''), + (evs_list, '')])) + + out = self.hnas_backend.get_export_list() + + self.assertEqual('fs-cinder', out[0]['fs']) + self.assertEqual(size, out[0]['size']) + self.assertEqual(free, out[0]['free']) + self.assertEqual('/export01-husvm', out[0]['path']) + + def test_file_clone(self): + path1 = '/.cinder/path1' + path2 = '/.cinder/path2' + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.file_clone('fs-cinder', path1, path2) + + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'file-clone-create', + '-f', 'fs-cinder', + path1, path2)] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_file_clone_wrong_fs(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.assertRaises(exception.InvalidParameterValue, + self.hnas_backend.file_clone, 'fs-fake', 'src', + 'dst') + + def test_get_evs_info(self): + expected_out = {'evs_number': '1'} + expected_out2 = {'evs_number': '2'} + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsipaddr, ''))) + + out = self.hnas_backend.get_evs_info() + + self.hnas_backend._run_cmd.assert_called_with('evsipaddr', '-l') + self.assertEqual(expected_out, out['10.0.0.20']) + self.assertEqual(expected_out, out['172.24.44.20']) + self.assertEqual(expected_out2, out['172.24.44.21']) + + def test_get_fs_info(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(df_f, ''))) + + out = self.hnas_backend.get_fs_info('fs-cinder') + + self.assertEqual('2', out['evs_id']) + self.assertEqual('fs-cinder', out['label']) + self.assertEqual('228', out['available_size']) + self.assertEqual('250', out['total_size']) + self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder') + + def test_get_fs_empty_return(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=('Not mounted', ''))) + + out = self.hnas_backend.get_fs_info('fs-cinder') + self.assertEqual({}, out) + + def test_get_fs_info_single_evs(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(df_f_single_evs, ''))) + + out = self.hnas_backend.get_fs_info('fs-cinder') + + self.assertEqual('fs-cinder', out['label']) + self.assertEqual('228', out['available_size']) + self.assertEqual('250', out['total_size']) + self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder') + + def test_get_fs_tb(self): + available_size = float(228 * 1024 ** 2) + total_size = float(250 * 1024 ** 2) + + df_f_tb = "\n\ +ID Label EVS Size Used Snapshots Deduped Avail \ +Thin ThinSize ThinAvail FS Type\n\ +---- ---------- --- ------ ------------ --------- ------- ------------ \ +---- -------- --------- --------------------\n\ +1025 fs-cinder 2 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \ + No 32 KB,WFS-2,128 DSBs\n\ +\n" + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(df_f_tb, ''))) + + out = self.hnas_backend.get_fs_info('fs-cinder') + + self.assertEqual('2', out['evs_id']) + self.assertEqual('fs-cinder', out['label']) + self.assertEqual(str(available_size), out['available_size']) + self.assertEqual(str(total_size), out['total_size']) + self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder') + + def test_get_fs_single_evs_tb(self): + available_size = float(228 * 1024 ** 2) + total_size = float(250 * 1024 ** 2) + + df_f_tb = "\n\ +ID Label Size Used Snapshots Deduped Avail \ +Thin ThinSize ThinAvail FS Type\n\ +---- ---------- ------ ------------ --------- ------- ------------ \ +---- -------- --------- --------------------\n\ +1025 fs-cinder 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \ + No 32 KB,WFS-2,128 DSBs\n\ +\n" + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(df_f_tb, ''))) + + out = self.hnas_backend.get_fs_info('fs-cinder') + + self.assertEqual('fs-cinder', out['label']) + self.assertEqual(str(available_size), out['available_size']) + self.assertEqual(str(total_size), out['total_size']) + self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder') + + def test_create_lu(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.create_lu('fs-cinder', '128', 'cinder-lu') + + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'iscsi-lu', 'add', + '-e', 'cinder-lu', + 'fs-cinder', + '/.cinder/cinder-lu.' + 'iscsi', '128G')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_delete_lu(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.delete_lu('fs-cinder', 'cinder-lu') + + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'iscsi-lu', 'del', '-d', + '-f', 'cinder-lu')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_extend_lu(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.extend_lu('fs-cinder', '128', 'cinder-lu') + + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'iscsi-lu', 'expand', + 'cinder-lu', '128G')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_cloned_lu(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.create_cloned_lu('cinder-lu', 'fs-cinder', 'snap') + + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'iscsi-lu', 'clone', + '-e', 'cinder-lu', + 'snap', + '/.cinder/snap.iscsi')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_get_existing_lu_info(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsilu_list, '')])) + + out = self.hnas_backend.get_existing_lu_info('cinder-lu', None, None) + + self.assertEqual('cinder-lu', out['name']) + self.assertEqual('fs-cinder', out['filesystem']) + self.assertEqual(2.0, out['size']) + + def test_get_existing_lu_info_tb(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsilu_list_tb, '')])) + + out = self.hnas_backend.get_existing_lu_info('test-lu', None, None) + + self.assertEqual('test-lu', out['name']) + self.assertEqual('fs-cinder', out['filesystem']) + self.assertEqual(2048.0, out['size']) + + def test_rename_existing_lu(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + self.hnas_backend.rename_existing_lu('fs-cinder', 'cinder-lu', + 'new-lu-name') + + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'iscsi-lu', 'mod', '-n', + "'new-lu-name'", + 'cinder-lu')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_check_lu(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_target_list, '')])) + + out = self.hnas_backend.check_lu('cinder-lu', 'fs-cinder') + + self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name']) + self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret']) + self.assertTrue(out['mapped']) + calls = [mock.call('evsfs', 'list'), mock.call('console-context', + '--evs', '2', + 'iscsi-target', 'list')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_check_lu_not_found(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_target_list, '')])) + + # passing a volume fake-volume not mapped + out = self.hnas_backend.check_lu('fake-volume', 'fs-cinder') + self.assertFalse(out['mapped']) + self.assertEqual(0, out['id']) + self.assertIsNone(out['tgt']) + + def test_add_iscsi_conn(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_target_list, ''), + (evsfs_list, '')])) + + out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260, + 'cinder-default', 'initiator') + + self.assertEqual('cinder-lu', out['lu_name']) + self.assertEqual('fs-cinder', out['fs']) + self.assertEqual('0', out['lu_id']) + self.assertEqual(3260, out['port']) + calls = [mock.call('evsfs', 'list'), + mock.call('console-context', '--evs', '2', 'iscsi-target', + 'list')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_add_iscsi_conn_not_mapped_volume(self): + not_mapped = {'mapped': False, + 'id': 0, + 'tgt': None} + + self.mock_object(self.hnas_backend, 'check_lu', + mock.Mock(return_value=not_mapped)) + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_target_list, ''), + ('', '')])) + + out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260, + 'cinder-default', 'initiator') + + self.assertEqual('cinder-lu', out['lu_name']) + self.assertEqual('fs-cinder', out['fs']) + self.assertEqual(2, out['lu_id']) + self.assertEqual(3260, out['port']) + calls = [mock.call('evsfs', 'list'), + mock.call('console-context', '--evs', '2', 'iscsi-target', + 'list')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_del_iscsi_conn(self): + iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default' + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(iscsi_one_target, ''))) + + self.hnas_backend.del_iscsi_conn('2', iqn, '0') + + calls = [mock.call('console-context', '--evs', '2', 'iscsi-target', + 'list', iqn), + mock.call('console-context', '--evs', '2', 'iscsi-target', + 'dellu', '-f', iqn, '0')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_del_iscsi_conn_volume_not_found(self): + iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-fake' + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(iscsi_one_target, ''))) + + self.hnas_backend.del_iscsi_conn('2', iqn, '10') + + self.hnas_backend._run_cmd.assert_called_with('console-context', + '--evs', '2', + 'iscsi-target', 'list', + iqn) + + def test_check_target(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_target_list, '')])) + + out = self.hnas_backend.check_target('fs-cinder', 'cinder-default') + + self.assertTrue(out['found']) + self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name']) + self.assertEqual('cinder-default', out['tgt']['alias']) + self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret']) + + def test_check_target_not_found(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_target_list, '')])) + + out = self.hnas_backend.check_target('fs-cinder', 'cinder-fake') + + self.assertFalse(out['found']) + self.assertIsNone(out['tgt']) + + def test_set_target_secret(self): + targetalias = 'cinder-default' + secret = 'pxr6U37LZZJBoMc' + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret) + + calls = [mock.call('evsfs', 'list'), + mock.call('console-context', '--evs', '2', 'iscsi-target', + 'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable', + 'cinder-default')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_set_target_secret_empty_target_list(self): + targetalias = 'cinder-default' + secret = 'pxr6U37LZZJBoMc' + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + ('does not exist', ''), + ('', '')])) + + self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret) + + calls = [mock.call('console-context', '--evs', '2', 'iscsi-target', + 'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable', + 'cinder-default')] + self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) + + def test_get_target_secret(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_one_target, '')])) + out = self.hnas_backend.get_target_secret('cinder-default', + 'fs-cinder') + + self.assertEqual('pxr6U37LZZJBoMc', out) + + self.hnas_backend._run_cmd.assert_called_with('console-context', + '--evs', '2', + 'iscsi-target', 'list', + 'cinder-default') + + def test_get_target_secret_chap_disabled(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (target_chap_disable, '')])) + out = self.hnas_backend.get_target_secret('cinder-default', + 'fs-cinder') + + self.assertEqual('', out) + + self.hnas_backend._run_cmd.assert_called_with('console-context', + '--evs', '2', + 'iscsi-target', 'list', + 'cinder-default') + + def test_get_target_iqn(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (iscsi_one_target, ''), + (add_targetsecret, '')])) + + out = self.hnas_backend.get_target_iqn('cinder-default', 'fs-cinder') + + self.assertEqual('iqn.2014-12.10.10.10.10:evstest1.cinder-default', + out) + + def test_create_target(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(return_value=(evsfs_list, ''))) + + self.hnas_backend.create_target('cinder-default', 'fs-cinder', + 'pxr6U37LZZJBoMc') + + def test_check_snapshot_parent_true(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock( + side_effect=[(evsfs_list, ''), + (file_clone_stat, ''), + (file_clone_stat_snap_file1, ''), + (file_clone_stat_snap_file2, '')])) + out = self.hnas_backend.check_snapshot_parent('cinder-lu', + 'snapshot-lu-1', + 'fs-cinder') + + self.assertTrue(out) + self.hnas_backend._run_cmd.assert_called_with('console-context', + '--evs', '2', + 'file-clone-stat' + '-snapshot-file', '-f', + 'fs-cinder', + '00000000004010000d2011' + '6826ffffffffffffff]') + + def test_check_snapshot_parent_false(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock( + side_effect=[(evsfs_list, ''), + (file_clone_stat, ''), + (file_clone_stat_snap_file1, ''), + (file_clone_stat_snap_file2, '')])) + out = self.hnas_backend.check_snapshot_parent('cinder-lu', + 'snapshot-lu-3', + 'fs-cinder') + + self.assertFalse(out) + self.hnas_backend._run_cmd.assert_called_with('console-context', + '--evs', '2', + 'file-clone-stat' + '-snapshot-file', '-f', + 'fs-cinder', + '00000000004029000d81f26' + '826ffffffffffffff]') + + def test_check_a_not_cloned_file(self): + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock( + side_effect=[(evsfs_list, ''), + (not_a_clone, '')])) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.hnas_backend.check_snapshot_parent, + 'cinder-lu', 'snapshot-name', 'fs-cinder') + + def test_get_export_path(self): + export_out = '/export01-husvm' + + self.mock_object(self.hnas_backend, '_run_cmd', + mock.Mock(side_effect=[(evsfs_list, ''), + (nfs_export, '')])) + + out = self.hnas_backend.get_export_path(export_out, 'fs-cinder') + + self.assertEqual(export_out, out) + self.hnas_backend._run_cmd.assert_called_with('console-context', + '--evs', '2', + 'nfs-export', 'list', + export_out) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py new file mode 100644 index 000000000..f8b4f980e --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py @@ -0,0 +1,591 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock + +from oslo_concurrency import processutils as putils + +from cinder import context +from cinder import exception +from cinder import test +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder.volume import configuration as conf +from cinder.volume.drivers.hitachi.hnas_backend import HNASSSHBackend +from cinder.volume.drivers.hitachi import hnas_iscsi as iscsi +from cinder.volume.drivers.hitachi import hnas_utils +from cinder.volume import volume_types + + +# The following information is passed on to tests, when creating a volume +_VOLUME = {'name': 'volume-cinder', + 'id': fake.VOLUME_ID, + 'size': 128, + 'host': 'host1@hnas-iscsi-backend#default', + 'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-' + '4182-afb8-1755025c35b8'} + +_VOLUME2 = {'name': 'volume-clone', + 'id': fake.VOLUME2_ID, + 'size': 150, + 'host': 'host1@hnas-iscsi-backend#default', + 'provider_location': '83-68-96-AA-DA-5D.volume-8fe1802a-316b-' + '5237-1c57-c35b81755025'} + +_SNAPSHOT = { + 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', + 'id': fake.SNAPSHOT_ID, + 'size': 128, + 'volume_type': None, + 'provider_location': None, + 'volume_size': 128, + 'volume': _VOLUME, + 'volume_name': _VOLUME['name'], + 'host': 'host1@hnas-iscsi-backend#silver', + 'volume_type_id': fake.VOLUME_TYPE_ID, +} + + +class HNASiSCSIDriverTest(test.TestCase): + """Test HNAS iSCSI volume driver.""" + def setUp(self): + super(HNASiSCSIDriverTest, self).setUp() + self.context = context.get_admin_context() + self.volume = fake_volume.fake_volume_obj( + self.context, **_VOLUME) + self.volume_clone = fake_volume.fake_volume_obj( + self.context, **_VOLUME2) + self.snapshot = self.instantiate_snapshot(_SNAPSHOT) + + self.volume_type = fake_volume.fake_volume_type_obj( + None, + **{'name': 'silver'} + ) + + self.parsed_xml = { + 'username': 'supervisor', + 'password': 'supervisor', + 'hnas_cmd': 'ssc', + 'fs': {'fs2': 'fs2'}, + 'ssh_port': '22', + 'port': '3260', + 'services': { + 'default': { + 'hdp': 'fs2', + 'iscsi_ip': '172.17.39.132', + 'iscsi_port': '3260', + 'port': '22', + 'volume_type': 'default', + 'label': 'svc_0', + 'evs': '1', + 'tgt': { + 'alias': 'test', + 'secret': 'itEpgB5gPefGhW2' + } + }, + 'silver': { + 'hdp': 'fs3', + 'iscsi_ip': '172.17.39.133', + 'iscsi_port': '3260', + 'port': '22', + 'volume_type': 'silver', + 'label': 'svc_1', + 'evs': '2', + 'tgt': { + 'alias': 'iscsi-test', + 'secret': 'itEpgB5gPefGhW2' + } + } + }, + 'cluster_admin_ip0': None, + 'ssh_private_key': None, + 'chap_enabled': True, + 'mgmt_ip0': '172.17.44.15', + 'ssh_enabled': None + } + + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.hds_hnas_iscsi_config_file = 'fake.xml' + + self.mock_object(hnas_utils, 'read_cinder_conf', + mock.Mock(return_value=self.parsed_xml)) + + self.driver = iscsi.HNASISCSIDriver(configuration=self.configuration) + + @staticmethod + def instantiate_snapshot(snap): + snap = snap.copy() + snap['volume'] = fake_volume.fake_volume_obj( + None, **snap['volume']) + snapshot = fake_snapshot.fake_snapshot_obj( + None, expected_attrs=['volume'], **snap) + return snapshot + + def test_get_service_target_chap_enabled(self): + lu_info = {'mapped': False, + 'id': 1, + 'tgt': {'alias': 'iscsi-test', + 'secret': 'itEpgB5gPefGhW2'}} + tgt = {'found': True, + 'tgt': { + 'alias': 'cinder-default', + 'secret': 'pxr6U37LZZJBoMc', + 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default', + 'lus': [ + {'id': '0', + 'name': 'cinder-lu'}, + {'id': '1', + 'name': 'volume-99da7ae7-1e7f-4d57-8bf...'} + ], + 'auth': 'Enabled'}} + iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default' + + self.mock_object(HNASSSHBackend, 'get_evs', + mock.Mock(return_value='1')) + self.mock_object(HNASSSHBackend, 'check_lu', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'check_target', + mock.Mock(return_value=tgt)) + self.mock_object(HNASSSHBackend, 'get_target_secret', + mock.Mock(return_value='')) + self.mock_object(HNASSSHBackend, 'set_target_secret') + self.mock_object(HNASSSHBackend, 'get_target_iqn', + mock.Mock(return_value=iqn)) + + self.driver._get_service_target(self.volume) + + def test_get_service_target_chap_disabled(self): + lu_info = {'mapped': False, + 'id': 1, + 'tgt': {'alias': 'iscsi-test', + 'secret': 'itEpgB5gPefGhW2'}} + tgt = {'found': False, + 'tgt': { + 'alias': 'cinder-default', + 'secret': 'pxr6U37LZZJBoMc', + 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default', + 'lus': [ + {'id': '0', + 'name': 'cinder-lu'}, + {'id': '1', + 'name': 'volume-99da7ae7-1e7f-4d57-8bf...'} + ], + 'auth': 'Enabled'}} + iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default' + + self.driver.config['chap_enabled'] = False + + self.mock_object(HNASSSHBackend, 'get_evs', + mock.Mock(return_value='1')) + self.mock_object(HNASSSHBackend, 'check_lu', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'check_target', + mock.Mock(return_value=tgt)) + self.mock_object(HNASSSHBackend, 'get_target_iqn', + mock.Mock(return_value=iqn)) + self.mock_object(HNASSSHBackend, 'create_target') + + self.driver._get_service_target(self.volume) + + def test_get_service_target_no_more_targets_exception(self): + iscsi.MAX_HNAS_LUS_PER_TARGET = 4 + lu_info = {'mapped': False, 'id': 1, + 'tgt': {'alias': 'iscsi-test', 'secret': 'itEpgB5gPefGhW2'}} + tgt = {'found': True, + 'tgt': { + 'alias': 'cinder-default', 'secret': 'pxr6U37LZZJBoMc', + 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default', + 'lus': [ + {'id': '0', 'name': 'volume-0'}, + {'id': '1', 'name': 'volume-1'}, + {'id': '2', 'name': 'volume-2'}, + {'id': '3', 'name': 'volume-3'}, ], + 'auth': 'Enabled'}} + + self.mock_object(HNASSSHBackend, 'get_evs', + mock.Mock(return_value='1')) + self.mock_object(HNASSSHBackend, 'check_lu', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'check_target', + mock.Mock(return_value=tgt)) + + self.assertRaises(exception.NoMoreTargets, + self.driver._get_service_target, self.volume) + + def test_check_pool_and_fs(self): + self.mock_object(hnas_utils, 'get_pool', + mock.Mock(return_value='default')) + self.driver._check_pool_and_fs(self.volume, 'fs2') + + def test_check_pool_and_fs_mismatch(self): + self.mock_object(hnas_utils, 'get_pool', + mock.Mock(return_value='default')) + + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver._check_pool_and_fs, self.volume, + 'fs-cinder') + + def test_check_pool_and_fs_host_mismatch(self): + self.mock_object(hnas_utils, 'get_pool', + mock.Mock(return_value='silver')) + + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver._check_pool_and_fs, self.volume, + 'fs3') + + def test_do_setup(self): + evs_info = {'172.17.39.132': {'evs_number': 1}, + '172.17.39.133': {'evs_number': 2}, + '172.17.39.134': {'evs_number': 3}} + + version_info = { + 'mac': '83-68-96-AA-DA-5D', + 'model': 'HNAS 4040', + 'version': '12.4.3924.11', + 'hardware': 'NAS Platform', + 'serial': 'B1339109', + } + + self.mock_object(HNASSSHBackend, 'get_fs_info', + mock.Mock(return_value=True)) + self.mock_object(HNASSSHBackend, 'get_evs_info', + mock.Mock(return_value=evs_info)) + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + + self.driver.do_setup(None) + + HNASSSHBackend.get_fs_info.assert_called_with('fs2') + self.assertTrue(HNASSSHBackend.get_evs_info.called) + + def test_do_setup_portal_not_found(self): + evs_info = {'172.17.48.132': {'evs_number': 1}, + '172.17.39.133': {'evs_number': 2}, + '172.17.39.134': {'evs_number': 3}} + + version_info = { + 'mac': '83-68-96-AA-DA-5D', + 'model': 'HNAS 4040', + 'version': '12.4.3924.11', + 'hardware': 'NAS Platform', + 'serial': 'B1339109', + } + + self.mock_object(HNASSSHBackend, 'get_fs_info', + mock.Mock(return_value=True)) + self.mock_object(HNASSSHBackend, 'get_evs_info', + mock.Mock(return_value=evs_info)) + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + + self.assertRaises(exception.InvalidParameterValue, + self.driver.do_setup, None) + + def test_do_setup_umounted_filesystem(self): + self.mock_object(HNASSSHBackend, 'get_fs_info', + mock.Mock(return_value=False)) + + self.assertRaises(exception.ParameterNotFound, self.driver.do_setup, + None) + + def test_initialize_connection(self): + lu_info = {'mapped': True, + 'id': 1, + 'tgt': {'alias': 'iscsi-test', + 'secret': 'itEpgB5gPefGhW2'}} + + conn = {'lun_name': 'cinder-lu', + 'initiator': 'initiator', + 'hdp': 'fs-cinder', + 'lu_id': '0', + 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default', + 'port': 3260} + + connector = {'initiator': 'fake_initiator'} + + self.mock_object(HNASSSHBackend, 'get_evs', + mock.Mock(return_value=2)) + self.mock_object(HNASSSHBackend, 'check_lu', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'add_iscsi_conn', + mock.Mock(return_value=conn)) + + self.driver.initialize_connection(self.volume, connector) + + HNASSSHBackend.add_iscsi_conn.assert_called_with(self.volume.name, + 'fs2', '22', + 'iscsi-test', + connector[ + 'initiator']) + + def test_initialize_connection_command_error(self): + lu_info = {'mapped': True, + 'id': 1, + 'tgt': {'alias': 'iscsi-test', + 'secret': 'itEpgB5gPefGhW2'}} + + connector = {'initiator': 'fake_initiator'} + + self.mock_object(HNASSSHBackend, 'get_evs', + mock.Mock(return_value=2)) + self.mock_object(HNASSSHBackend, 'check_lu', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'add_iscsi_conn', + mock.Mock(side_effect=putils.ProcessExecutionError)) + + self.assertRaises(exception.ISCSITargetAttachFailed, + self.driver.initialize_connection, self.volume, + connector) + + def test_terminate_connection(self): + connector = {} + lu_info = {'mapped': True, + 'id': 1, + 'tgt': {'alias': 'iscsi-test', + 'secret': 'itEpgB5gPefGhW2'}} + + self.mock_object(HNASSSHBackend, 'get_evs', + mock.Mock(return_value=2)) + self.mock_object(HNASSSHBackend, 'check_lu', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'del_iscsi_conn') + + self.driver.terminate_connection(self.volume, connector) + + HNASSSHBackend.del_iscsi_conn.assert_called_with('1', + 'iscsi-test', + lu_info['id']) + + def test_get_volume_stats(self): + self.driver.pools = [{'pool_name': 'default', + 'service_label': 'svc_0', + 'fs': '172.17.39.132:/fs2'}, + {'pool_name': 'silver', + 'service_label': 'svc_1', + 'fs': '172.17.39.133:/fs3'}] + + fs_cinder = { + 'evs_id': '2', + 'total_size': '250', + 'label': 'fs-cinder', + 'available_size': '228', + 'used_size': '21.4', + 'id': '1025' + } + + self.mock_object(HNASSSHBackend, 'get_fs_info', + mock.Mock(return_value=fs_cinder)) + + stats = self.driver.get_volume_stats(refresh=True) + + self.assertEqual('5.0.0', stats['driver_version']) + self.assertEqual('Hitachi', stats['vendor_name']) + self.assertEqual('iSCSI', stats['storage_protocol']) + + def test_create_volume(self): + version_info = {'mac': '83-68-96-AA-DA-5D'} + expected_out = { + 'provider_location': version_info['mac'] + '.' + self.volume.name + } + + self.mock_object(HNASSSHBackend, 'create_lu') + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + out = self.driver.create_volume(self.volume) + + self.assertEqual(expected_out, out) + HNASSSHBackend.create_lu.assert_called_with('fs2', u'128', + self.volume.name) + + def test_create_volume_missing_fs(self): + self.volume.host = 'host1@hnas-iscsi-backend#missing' + + self.assertRaises(exception.ParameterNotFound, + self.driver.create_volume, self.volume) + + def test_delete_volume(self): + self.mock_object(HNASSSHBackend, 'delete_lu') + + self.driver.delete_volume(self.volume) + + HNASSSHBackend.delete_lu.assert_called_once_with( + self.parsed_xml['fs']['fs2'], self.volume.name) + + def test_extend_volume(self): + new_size = 200 + self.mock_object(HNASSSHBackend, 'extend_lu') + + self.driver.extend_volume(self.volume, new_size) + + HNASSSHBackend.extend_lu.assert_called_once_with( + self.parsed_xml['fs']['fs2'], new_size, + self.volume.name) + + def test_create_cloned_volume(self): + clone_name = self.volume_clone.name + version_info = {'mac': '83-68-96-AA-DA-5D'} + expected_out = { + 'provider_location': + version_info['mac'] + '.' + self.volume_clone.name + } + + self.mock_object(HNASSSHBackend, 'create_cloned_lu') + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + self.mock_object(HNASSSHBackend, 'extend_lu') + + out = self.driver.create_cloned_volume(self.volume_clone, self.volume) + self.assertEqual(expected_out, out) + HNASSSHBackend.create_cloned_lu.assert_called_with(self.volume.name, + 'fs2', + clone_name) + + def test_functions_with_pass(self): + self.driver.check_for_setup_error() + self.driver.ensure_export(None, self.volume) + self.driver.create_export(None, self.volume, 'connector') + self.driver.remove_export(None, self.volume) + + def test_create_snapshot(self): + lu_info = {'lu_mounted': 'No', + 'name': 'cinder-lu', + 'fs_mounted': 'YES', + 'filesystem': 'FS-Cinder', + 'path': '/.cinder/cinder-lu.iscsi', + 'size': 2.0} + version_info = {'mac': '83-68-96-AA-DA-5D'} + expected_out = { + 'provider_location': version_info['mac'] + '.' + self.snapshot.name + } + + self.mock_object(HNASSSHBackend, 'get_existing_lu_info', + mock.Mock(return_value=lu_info)) + self.mock_object(volume_types, 'get_volume_type', + mock.Mock(return_value=self.volume_type)) + self.mock_object(HNASSSHBackend, 'create_cloned_lu') + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + + out = self.driver.create_snapshot(self.snapshot) + self.assertEqual(expected_out, out) + + def test_delete_snapshot(self): + lu_info = {'filesystem': 'FS-Cinder'} + + self.mock_object(volume_types, 'get_volume_type', + mock.Mock(return_value=self.volume_type)) + self.mock_object(HNASSSHBackend, 'get_existing_lu_info', + mock.Mock(return_value=lu_info)) + self.mock_object(HNASSSHBackend, 'delete_lu') + + self.driver.delete_snapshot(self.snapshot) + + def test_create_volume_from_snapshot(self): + version_info = {'mac': '83-68-96-AA-DA-5D'} + expected_out = { + 'provider_location': version_info['mac'] + '.' + self.snapshot.name + } + + self.mock_object(HNASSSHBackend, 'create_cloned_lu') + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + + out = self.driver.create_volume_from_snapshot(self.volume, + self.snapshot) + self.assertEqual(expected_out, out) + HNASSSHBackend.create_cloned_lu.assert_called_with(self.snapshot.name, + 'fs2', + self.volume.name) + + def test_manage_existing_get_size(self): + existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'} + lu_info = { + 'name': 'volume-cinder', + 'comment': None, + 'path': ' /.cinder/volume-cinder', + 'size': 128, + 'filesystem': 'fs-cinder', + 'fs_mounted': 'Yes', + 'lu_mounted': 'Yes' + } + + self.mock_object(HNASSSHBackend, 'get_existing_lu_info', + mock.Mock(return_value=lu_info)) + + out = self.driver.manage_existing_get_size(self.volume, + existing_vol_ref) + + self.assertEqual(lu_info['size'], out) + HNASSSHBackend.get_existing_lu_info.assert_called_with( + 'volume-cinder', lu_info['filesystem']) + + def test_manage_existing_get_size_no_source_name(self): + existing_vol_ref = {} + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, self.volume, + existing_vol_ref) + + def test_manage_existing_get_size_wrong_source_name(self): + existing_vol_ref = {'source-name': 'fs-cinder/volume/cinder'} + + self.mock_object(HNASSSHBackend, 'get_existing_lu_info', + mock.Mock(return_value={})) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, self.volume, + existing_vol_ref) + + def test_manage_existing_get_size_volume_not_found(self): + existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'} + + self.mock_object(HNASSSHBackend, 'get_existing_lu_info', + mock.Mock(return_value={})) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, self.volume, + existing_vol_ref) + + def test_manage_existing(self): + self.volume.volume_type = self.volume_type + existing_vol_ref = {'source-name': 'fs2/volume-cinder'} + metadata = {'service_label': 'default'} + version_info = {'mac': '83-68-96-AA-DA-5D'} + expected_out = { + 'provider_location': version_info['mac'] + '.' + self.volume.name + } + self.mock_object(HNASSSHBackend, 'rename_existing_lu') + self.mock_object(volume_types, 'get_volume_type_extra_specs', + mock.Mock(return_value=metadata)) + self.mock_object(HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + + out = self.driver.manage_existing(self.volume, existing_vol_ref) + + self.assertEqual(expected_out, out) + HNASSSHBackend.rename_existing_lu.assert_called_with('fs2', + 'volume-cinder', + self.volume.name) + + def test_unmanage(self): + self.mock_object(HNASSSHBackend, 'rename_existing_lu') + + self.driver.unmanage(self.volume) + + HNASSSHBackend.rename_existing_lu.assert_called_with( + self.parsed_xml['fs']['fs2'], + self.volume.name, 'unmanage-' + self.volume.name) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py new file mode 100644 index 000000000..8efc6190d --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py @@ -0,0 +1,586 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +import os + +from oslo_concurrency import processutils as putils +import socket + +from cinder import context +from cinder import exception +from cinder.image import image_utils +from cinder import test +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.hitachi import hnas_backend as backend +from cinder.volume.drivers.hitachi import hnas_nfs as nfs +from cinder.volume.drivers.hitachi import hnas_utils +from cinder.volume.drivers import nfs as base_nfs + +_VOLUME = {'name': 'cinder-volume', + 'id': fake.VOLUME_ID, + 'size': 128, + 'host': 'host1@hnas-nfs-backend#default', + 'volume_type': 'default', + 'provider_location': 'hnas'} + +_SNAPSHOT = { + 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', + 'id': fake.SNAPSHOT_ID, + 'size': 128, + 'volume_type': None, + 'provider_location': None, + 'volume_size': 128, + 'volume': _VOLUME, + 'volume_name': _VOLUME['name'], + 'host': 'host1@hnas-iscsi-backend#silver', + 'volume_type_id': fake.VOLUME_TYPE_ID, +} + + +class HNASNFSDriverTest(test.TestCase): + """Test HNAS NFS volume driver.""" + + def __init__(self, *args, **kwargs): + super(HNASNFSDriverTest, self).__init__(*args, **kwargs) + + def instantiate_snapshot(self, snap): + snap = snap.copy() + snap['volume'] = fake_volume.fake_volume_obj( + None, **snap['volume']) + snapshot = fake_snapshot.fake_snapshot_obj( + None, expected_attrs=['volume'], **snap) + return snapshot + + def setUp(self): + super(HNASNFSDriverTest, self).setUp() + self.context = context.get_admin_context() + + self.volume = fake_volume.fake_volume_obj( + self.context, + **_VOLUME) + + self.snapshot = self.instantiate_snapshot(_SNAPSHOT) + + self.volume_type = fake_volume.fake_volume_type_obj( + None, + **{'name': 'silver'} + ) + self.clone = fake_volume.fake_volume_obj( + None, + **{'id': fake.VOLUME2_ID, + 'size': 128, + 'host': 'host1@hnas-nfs-backend#default', + 'volume_type': 'default', + 'provider_location': 'hnas'}) + + # xml parsed from utils + self.parsed_xml = { + 'username': 'supervisor', + 'password': 'supervisor', + 'hnas_cmd': 'ssc', + 'ssh_port': '22', + 'services': { + 'default': { + 'hdp': '172.24.49.21:/fs-cinder', + 'volume_type': 'default', + 'label': 'svc_0', + 'ctl': '1', + 'export': { + 'fs': 'fs-cinder', + 'path': '/export-cinder/volume' + } + }, + }, + 'cluster_admin_ip0': None, + 'ssh_private_key': None, + 'chap_enabled': 'True', + 'mgmt_ip0': '172.17.44.15', + 'ssh_enabled': None + } + + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.hds_hnas_nfs_config_file = 'fake.xml' + + self.mock_object(hnas_utils, 'read_cinder_conf', + mock.Mock(return_value=self.parsed_xml)) + + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.max_over_subscription_ratio = 20.0 + self.configuration.reserved_percentage = 0 + self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml' + self.configuration.nfs_shares_config = 'fake_nfs_share.xml' + self.configuration.num_shell_tries = 2 + + self.driver = nfs.HNASNFSDriver(configuration=self.configuration) + + def test_check_pool_and_share_mismatch_exception(self): + # passing a share that does not exists in config should raise an + # exception + nfs_shares = '172.24.49.21:/nfs_share' + + self.mock_object(hnas_utils, 'get_pool', + mock.Mock(return_value='default')) + + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver._check_pool_and_share, self.volume, + nfs_shares) + + def test_check_pool_and_share_type_mismatch_exception(self): + nfs_shares = '172.24.49.21:/fs-cinder' + self.volume.host = 'host1@hnas-nfs-backend#gold' + + # returning a pool different from 'default' should raise an exception + self.mock_object(hnas_utils, 'get_pool', + mock.Mock(return_value='default')) + + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver._check_pool_and_share, self.volume, + nfs_shares) + + def test_do_setup(self): + version_info = { + 'mac': '83-68-96-AA-DA-5D', + 'model': 'HNAS 4040', + 'version': '12.4.3924.11', + 'hardware': 'NAS Platform', + 'serial': 'B1339109', + } + export_list = [ + {'fs': 'fs-cinder', + 'name': '/fs-cinder', + 'free': 228.0, + 'path': '/fs-cinder', + 'evs': ['172.24.49.21'], + 'size': 250.0} + ] + + showmount = "Export list for 172.24.49.21: \n\ +/fs-cinder * \n\ +/shares/9bcf0bcc-8cc8-437e38bcbda9 127.0.0.1,10.1.0.5,172.24.44.141 \n\ +" + + self.mock_object(backend.HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + self.mock_object(self.driver, '_load_shares_config') + self.mock_object(backend.HNASSSHBackend, 'get_export_list', + mock.Mock(return_value=export_list)) + self.mock_object(self.driver, '_execute', + mock.Mock(return_value=(showmount, ''))) + + self.driver.do_setup(None) + + self.driver._execute.assert_called_with('showmount', '-e', + '172.24.49.21') + self.assertTrue(backend.HNASSSHBackend.get_export_list.called) + + def test_do_setup_execute_exception(self): + version_info = { + 'mac': '83-68-96-AA-DA-5D', + 'model': 'HNAS 4040', + 'version': '12.4.3924.11', + 'hardware': 'NAS Platform', + 'serial': 'B1339109', + } + + export_list = [ + {'fs': 'fs-cinder', + 'name': '/fs-cinder', + 'free': 228.0, + 'path': '/fs-cinder', + 'evs': ['172.24.49.21'], + 'size': 250.0} + ] + + self.mock_object(backend.HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + self.mock_object(self.driver, '_load_shares_config') + self.mock_object(backend.HNASSSHBackend, 'get_export_list', + mock.Mock(return_value=export_list)) + self.mock_object(self.driver, '_execute', + mock.Mock(side_effect=putils.ProcessExecutionError)) + + self.assertRaises(putils.ProcessExecutionError, self.driver.do_setup, + None) + + def test_do_setup_missing_export(self): + version_info = { + 'mac': '83-68-96-AA-DA-5D', + 'model': 'HNAS 4040', + 'version': '12.4.3924.11', + 'hardware': 'NAS Platform', + 'serial': 'B1339109', + } + export_list = [ + {'fs': 'fs-cinder', + 'name': '/wrong-fs', + 'free': 228.0, + 'path': '/fs-cinder', + 'evs': ['172.24.49.21'], + 'size': 250.0} + ] + + showmount = "Export list for 172.24.49.21: \n\ +/fs-cinder * \n\ +" + + self.mock_object(backend.HNASSSHBackend, 'get_version', + mock.Mock(return_value=version_info)) + self.mock_object(self.driver, '_load_shares_config') + self.mock_object(backend.HNASSSHBackend, 'get_export_list', + mock.Mock(return_value=export_list)) + self.mock_object(self.driver, '_execute', + mock.Mock(return_value=(showmount, ''))) + + self.assertRaises(exception.InvalidParameterValue, + self.driver.do_setup, None) + + def test_create_volume(self): + self.mock_object(self.driver, '_ensure_shares_mounted') + self.mock_object(self.driver, '_do_create_volume') + + out = self.driver.create_volume(self.volume) + + self.assertEqual('172.24.49.21:/fs-cinder', out['provider_location']) + self.assertTrue(self.driver._ensure_shares_mounted.called) + + def test_create_volume_exception(self): + # pool 'original' doesnt exists in services + self.volume.host = 'host1@hnas-nfs-backend#original' + + self.mock_object(self.driver, '_ensure_shares_mounted') + + self.assertRaises(exception.ParameterNotFound, + self.driver.create_volume, self.volume) + + def test_create_cloned_volume(self): + self.volume.size = 150 + + self.mock_object(self.driver, 'extend_volume') + self.mock_object(backend.HNASSSHBackend, 'file_clone') + + out = self.driver.create_cloned_volume(self.volume, self.clone) + + self.assertEqual('hnas', out['provider_location']) + + def test_get_volume_stats(self): + self.driver.pools = [{'pool_name': 'default', + 'service_label': 'default', + 'fs': '172.24.49.21:/easy-stack'}, + {'pool_name': 'cinder_svc', + 'service_label': 'cinder_svc', + 'fs': '172.24.49.26:/MNT-CinderTest2'}] + + self.mock_object(self.driver, '_update_volume_stats') + self.mock_object(self.driver, '_get_capacity_info', + mock.Mock(return_value=(150, 50, 100))) + + out = self.driver.get_volume_stats() + + self.assertEqual('5.0.0', out['driver_version']) + self.assertEqual('Hitachi', out['vendor_name']) + self.assertEqual('NFS', out['storage_protocol']) + + def test_create_volume_from_snapshot(self): + self.mock_object(backend.HNASSSHBackend, 'file_clone') + + self.driver.create_volume_from_snapshot(self.volume, self.snapshot) + + def test_create_snapshot(self): + self.mock_object(backend.HNASSSHBackend, 'file_clone') + self.driver.create_snapshot(self.snapshot) + + def test_delete_snapshot(self): + self.mock_object(self.driver, '_execute') + + self.driver.delete_snapshot(self.snapshot) + + def test_delete_snapshot_execute_exception(self): + self.mock_object(self.driver, '_execute', + mock.Mock(side_effect=putils.ProcessExecutionError)) + + self.driver.delete_snapshot(self.snapshot) + + def test_extend_volume(self): + share_mount_point = '/fs-cinder' + data = image_utils.imageutils.QemuImgInfo + data.virtual_size = 200 * 1024 ** 3 + + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value=share_mount_point)) + self.mock_object(image_utils, 'qemu_img_info', + mock.Mock(return_value=data)) + + self.driver.extend_volume(self.volume, 200) + + self.driver._get_mount_point_for_share.assert_called_with('hnas') + + def test_extend_volume_resizing_exception(self): + share_mount_point = '/fs-cinder' + data = image_utils.imageutils.QemuImgInfo + data.virtual_size = 2048 ** 3 + + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value=share_mount_point)) + self.mock_object(image_utils, 'qemu_img_info', + mock.Mock(return_value=data)) + + self.mock_object(image_utils, 'resize_image') + + self.assertRaises(exception.InvalidResults, + self.driver.extend_volume, self.volume, 200) + + def test_manage_existing(self): + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} + + self.mock_object(os.path, 'isfile', mock.Mock(return_value=True)) + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value='/fs-cinder/cinder-volume')) + self.mock_object(utils, 'resolve_hostname', + mock.Mock(return_value='172.24.49.21')) + self.mock_object(self.driver, '_ensure_shares_mounted') + self.mock_object(self.driver, '_execute') + + out = self.driver.manage_existing(self.volume, existing_vol_ref) + + loc = {'provider_location': '172.24.49.21:/fs-cinder'} + self.assertEqual(loc, out) + + os.path.isfile.assert_called_once_with('/fs-cinder/cinder-volume/') + self.driver._get_mount_point_for_share.assert_called_once_with( + '172.24.49.21:/fs-cinder') + utils.resolve_hostname.assert_called_with('172.24.49.21') + self.driver._ensure_shares_mounted.assert_called_once_with() + + def test_manage_existing_name_matches(self): + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} + + self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', + mock.Mock(return_value=('172.24.49.21:/fs-cinder', + '/mnt/silver', + self.volume.name))) + + out = self.driver.manage_existing(self.volume, existing_vol_ref) + + loc = {'provider_location': '172.24.49.21:/fs-cinder'} + self.assertEqual(loc, out) + + def test_manage_existing_exception(self): + existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} + + self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', + mock.Mock(return_value=('172.24.49.21:/fs-cinder', + '/mnt/silver', + 'cinder-volume'))) + self.mock_object(self.driver, '_execute', + mock.Mock(side_effect=putils.ProcessExecutionError)) + + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.manage_existing, self.volume, + existing_vol_ref) + + def test_manage_existing_missing_source_name(self): + # empty source-name should raise an exception + existing_vol_ref = {} + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, self.volume, + existing_vol_ref) + + def test_manage_existing_missing_volume_in_backend(self): + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} + + self.mock_object(self.driver, '_ensure_shares_mounted') + self.mock_object(utils, 'resolve_hostname', + mock.Mock(side_effect=['172.24.49.21', + '172.24.49.22'])) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, self.volume, + existing_vol_ref) + + def test_manage_existing_get_size(self): + existing_vol_ref = { + 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', + } + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + expected_size = 1 + + self.mock_object(self.driver, '_ensure_shares_mounted') + self.mock_object(utils, 'resolve_hostname', + mock.Mock(return_value='172.24.49.21')) + self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', + mock.Mock(return_value='/mnt/silver')) + self.mock_object(os.path, 'isfile', + mock.Mock(return_value=True)) + self.mock_object(utils, 'get_file_size', + mock.Mock(return_value=expected_size)) + + out = self.driver.manage_existing_get_size(self.volume, + existing_vol_ref) + + self.assertEqual(1, out) + utils.get_file_size.assert_called_once_with( + '/mnt/silver/cinder-volume') + utils.resolve_hostname.assert_called_with('172.24.49.21') + + def test_manage_existing_get_size_exception(self): + existing_vol_ref = { + 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', + } + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + + self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', + mock.Mock(return_value=('172.24.49.21:/fs-cinder', + '/mnt/silver', + 'cinder-volume'))) + + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.manage_existing_get_size, self.volume, + existing_vol_ref) + + def test_manage_existing_get_size_resolving_hostname_exception(self): + existing_vol_ref = { + 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', + } + + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + + self.mock_object(self.driver, '_ensure_shares_mounted') + self.mock_object(utils, 'resolve_hostname', + mock.Mock(side_effect=socket.gaierror)) + + self.assertRaises(socket.gaierror, + self.driver.manage_existing_get_size, self.volume, + existing_vol_ref) + + def test_unmanage(self): + path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' + vol_str = 'volume-' + self.volume.id + vol_path = os.path.join(path, vol_str) + new_path = os.path.join(path, 'unmanage-' + vol_str) + + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value=path)) + self.mock_object(self.driver, '_execute') + + self.driver.unmanage(self.volume) + + self.driver._execute.assert_called_with('mv', vol_path, new_path, + run_as_root=False, + check_exit_code=True) + self.driver._get_mount_point_for_share.assert_called_with( + self.volume.provider_location) + + def test_unmanage_volume_exception(self): + path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' + + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value=path)) + self.mock_object(self.driver, '_execute', + mock.Mock(side_effect=ValueError)) + + self.driver.unmanage(self.volume) + + def test_manage_existing_snapshot(self): + nfs_share = "172.24.49.21:/fs-cinder" + nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID + path = "unmanage-snapshot-" + fake.SNAPSHOT_ID + loc = {'provider_location': '172.24.49.21:/fs-cinder'} + existing_ref = {'source-name': '172.24.49.21:/fs-cinder/' + + fake.SNAPSHOT_ID} + + self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', + mock.Mock(return_value=(nfs_share, nfs_mount, path))) + self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', + mock.Mock(return_value=True)) + self.mock_object(self.driver, '_execute') + self.mock_object(backend.HNASSSHBackend, 'get_export_path', + mock.Mock(return_value='fs-cinder')) + + out = self.driver.manage_existing_snapshot(self.snapshot, + existing_ref) + + self.assertEqual(loc, out) + + def test_manage_existing_snapshot_not_parent_exception(self): + nfs_share = "172.24.49.21:/fs-cinder" + nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID + path = "unmanage-snapshot-" + fake.SNAPSHOT_ID + + existing_ref = {'source-name': '172.24.49.21:/fs-cinder/' + + fake.SNAPSHOT_ID} + + self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', + mock.Mock(return_value=(nfs_share, nfs_mount, path))) + self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', + mock.Mock(return_value=False)) + self.mock_object(backend.HNASSSHBackend, 'get_export_path', + mock.Mock(return_value='fs-cinder')) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_snapshot, self.snapshot, + existing_ref) + + def test_manage_existing_snapshot_get_size(self): + existing_ref = { + 'source-name': '172.24.49.21:/fs-cinder/cinder-snapshot', + } + self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] + expected_size = 1 + + self.mock_object(self.driver, '_ensure_shares_mounted') + self.mock_object(utils, 'resolve_hostname', + mock.Mock(return_value='172.24.49.21')) + self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', + mock.Mock(return_value='/mnt/silver')) + self.mock_object(os.path, 'isfile', + mock.Mock(return_value=True)) + self.mock_object(utils, 'get_file_size', + mock.Mock(return_value=expected_size)) + + out = self.driver.manage_existing_snapshot_get_size( + self.snapshot, existing_ref) + + self.assertEqual(1, out) + utils.get_file_size.assert_called_once_with( + '/mnt/silver/cinder-snapshot') + utils.resolve_hostname.assert_called_with('172.24.49.21') + + def test_unmanage_snapshot(self): + path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' + snapshot_name = 'snapshot-' + self.snapshot.id + old_path = os.path.join(path, snapshot_name) + new_path = os.path.join(path, 'unmanage-' + snapshot_name) + + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value=path)) + self.mock_object(self.driver, '_execute') + + self.driver.unmanage_snapshot(self.snapshot) + + self.driver._execute.assert_called_with('mv', old_path, new_path, + run_as_root=False, + check_exit_code=True) + self.driver._get_mount_point_for_share.assert_called_with( + self.snapshot.provider_location) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py new file mode 100644 index 000000000..094d075e1 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py @@ -0,0 +1,327 @@ +# Copyright (c) 2016 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import copy +import ddt +import mock +import os + +from xml.etree import ElementTree as ETree + +from cinder import context +from cinder import exception +from cinder import test +from cinder.tests.unit import fake_constants +from cinder.tests.unit import fake_volume +from cinder.volume import configuration as conf +from cinder.volume.drivers.hitachi import hnas_iscsi +from cinder.volume.drivers.hitachi import hnas_utils +from cinder.volume import volume_types + +_VOLUME = {'name': 'cinder-volume', + 'id': fake_constants.VOLUME_ID, + 'size': 128, + 'host': 'host1@hnas-nfs-backend#default', + 'volume_type': 'default', + 'provider_location': 'hnas'} + +service_parameters = ['volume_type', 'hdp'] +optional_parameters = ['ssc_cmd', 'cluster_admin_ip0', 'iscsi_ip'] + +config_from_cinder_conf = { + 'username': 'supervisor', + 'fs': {'easy-stack': 'easy-stack', + 'silver': 'silver'}, + 'ssh_port': 22, + 'chap_enabled': True, + 'cluster_admin_ip0': None, + 'ssh_private_key': None, + 'mgmt_ip0': '172.24.44.15', + 'ssc_cmd': 'ssc', + 'services': { + 'default': { + 'label': u'svc_0', + 'volume_type': 'default', + 'hdp': 'easy-stack'}, + 'FS-CinderDev1': { + 'label': u'svc_1', + 'volume_type': 'FS-CinderDev1', + 'hdp': 'silver'}}, + 'password': 'supervisor'} + +valid_XML_str = ''' + + 172.24.44.15 + supervisor + supervisor + False + /home/ubuntu/.ssh/id_rsa + + default + 172.24.49.21 + easy-stack + + + silver + 172.24.49.32 + FS-CinderDev1 + + +''' + +XML_no_authentication = ''' + + 172.24.44.15 + supervisor + False + +''' + +XML_empty_authentication_param = ''' + + 172.24.44.15 + supervisor + + False + + + default + 172.24.49.21 + easy-stack + + +''' + +# missing mgmt_ip0 +XML_without_mandatory_params = ''' + + supervisor + supervisor + False + + default + 172.24.49.21 + easy-stack + + +''' + +XML_no_services_configured = ''' + + 172.24.44.15 + supervisor + supervisor + 10 + False + /home/ubuntu/.ssh/id_rsa + +''' + +parsed_xml = {'username': 'supervisor', 'password': 'supervisor', + 'ssc_cmd': 'ssc', 'iscsi_ip': None, 'ssh_port': 22, + 'fs': {'easy-stack': 'easy-stack', + 'FS-CinderDev1': 'FS-CinderDev1'}, + 'cluster_admin_ip0': None, + 'ssh_private_key': '/home/ubuntu/.ssh/id_rsa', + 'services': { + 'default': {'hdp': 'easy-stack', 'volume_type': 'default', + 'label': 'svc_0'}, + 'silver': {'hdp': 'FS-CinderDev1', 'volume_type': 'silver', + 'label': 'svc_1'}}, + 'mgmt_ip0': '172.24.44.15'} + +valid_XML_etree = ETree.XML(valid_XML_str) +invalid_XML_etree_no_authentication = ETree.XML(XML_no_authentication) +invalid_XML_etree_empty_parameter = ETree.XML(XML_empty_authentication_param) +invalid_XML_etree_no_mandatory_params = ETree.XML(XML_without_mandatory_params) +invalid_XML_etree_no_service = ETree.XML(XML_no_services_configured) + + +@ddt.ddt +class HNASUtilsTest(test.TestCase): + + def __init__(self, *args, **kwargs): + super(HNASUtilsTest, self).__init__(*args, **kwargs) + + def setUp(self): + super(HNASUtilsTest, self).setUp() + + self.fake_conf = conf.Configuration(hnas_utils.CONF) + self.fake_conf.append_config_values(hnas_iscsi.iSCSI_OPTS) + + self.override_config('hnas_username', 'supervisor') + self.override_config('hnas_password', 'supervisor') + self.override_config('hnas_mgmt_ip0', '172.24.44.15') + self.override_config('hnas_svc0_volume_type', 'default') + self.override_config('hnas_svc0_hdp', 'easy-stack') + self.override_config('hnas_svc0_iscsi_ip', '172.24.49.21') + self.override_config('hnas_svc1_volume_type', 'FS-CinderDev1') + self.override_config('hnas_svc1_hdp', 'silver') + self.override_config('hnas_svc1_iscsi_ip', '172.24.49.32') + + self.context = context.get_admin_context() + self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) + self.volume_type = (fake_volume.fake_volume_type_obj(None, **{ + 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'})) + + def test_read_xml_config(self): + self.mock_object(os, 'access', mock.Mock(return_value=True)) + self.mock_object(ETree, 'parse', + mock.Mock(return_value=ETree.ElementTree)) + self.mock_object(ETree.ElementTree, 'getroot', + mock.Mock(return_value=valid_XML_etree)) + + xml_path = 'xml_file_found' + out = hnas_utils.read_xml_config(xml_path, + service_parameters, + optional_parameters) + + self.assertEqual(parsed_xml, out) + + def test_read_xml_config_parser_error(self): + xml_file = 'hnas_nfs.xml' + self.mock_object(os, 'access', mock.Mock(return_value=True)) + self.mock_object(ETree, 'parse', + mock.Mock(side_effect=ETree.ParseError)) + + self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config, + xml_file, service_parameters, optional_parameters) + + def test_read_xml_config_not_found(self): + self.mock_object(os, 'access', mock.Mock(return_value=False)) + + xml_path = 'xml_file_not_found' + self.assertRaises(exception.NotFound, hnas_utils.read_xml_config, + xml_path, service_parameters, optional_parameters) + + def test_read_xml_config_without_services_configured(self): + xml_file = 'hnas_nfs.xml' + + self.mock_object(os, 'access', mock.Mock(return_value=True)) + self.mock_object(ETree, 'parse', + mock.Mock(return_value=ETree.ElementTree)) + self.mock_object(ETree.ElementTree, 'getroot', + mock.Mock(return_value=invalid_XML_etree_no_service)) + + self.assertRaises(exception.ParameterNotFound, + hnas_utils.read_xml_config, xml_file, + service_parameters, optional_parameters) + + def test_read_xml_config_empty_authentication_parameter(self): + xml_file = 'hnas_nfs.xml' + + self.mock_object(os, 'access', mock.Mock(return_value=True)) + self.mock_object(ETree, 'parse', + mock.Mock(return_value=ETree.ElementTree)) + self.mock_object(ETree.ElementTree, 'getroot', + mock.Mock(return_value= + invalid_XML_etree_empty_parameter)) + + self.assertRaises(exception.ParameterNotFound, + hnas_utils.read_xml_config, xml_file, + service_parameters, optional_parameters) + + def test_read_xml_config_mandatory_parameters_missing(self): + xml_file = 'hnas_nfs.xml' + + self.mock_object(os, 'access', mock.Mock(return_value=True)) + self.mock_object(ETree, 'parse', + mock.Mock(return_value=ETree.ElementTree)) + self.mock_object(ETree.ElementTree, 'getroot', + mock.Mock(return_value= + invalid_XML_etree_no_mandatory_params)) + + self.assertRaises(exception.ParameterNotFound, + hnas_utils.read_xml_config, xml_file, + service_parameters, optional_parameters) + + def test_read_config_xml_without_authentication_parameter(self): + xml_file = 'hnas_nfs.xml' + + self.mock_object(os, 'access', mock.Mock(return_value=True)) + self.mock_object(ETree, 'parse', + mock.Mock(return_value=ETree.ElementTree)) + self.mock_object(ETree.ElementTree, 'getroot', + mock.Mock(return_value= + invalid_XML_etree_no_authentication)) + + self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config, + xml_file, service_parameters, optional_parameters) + + def test_get_pool_with_vol_type(self): + self.mock_object(volume_types, 'get_volume_type_extra_specs', + mock.Mock(return_value={'service_label': 'silver'})) + + self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID + self.volume.volume_type = self.volume_type + + out = hnas_utils.get_pool(parsed_xml, self.volume) + + self.assertEqual('silver', out) + + def test_get_pool_with_vol_type_id_none(self): + self.volume.volume_type_id = None + self.volume.volume_type = self.volume_type + + out = hnas_utils.get_pool(parsed_xml, self.volume) + + self.assertEqual('default', out) + + def test_get_pool_with_missing_service_label(self): + self.mock_object(volume_types, 'get_volume_type_extra_specs', + mock.Mock(return_value={'service_label': 'gold'})) + + self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID + self.volume.volume_type = self.volume_type + + out = hnas_utils.get_pool(parsed_xml, self.volume) + + self.assertEqual('default', out) + + def test_get_pool_without_vol_type(self): + out = hnas_utils.get_pool(parsed_xml, self.volume) + self.assertEqual('default', out) + + def test_read_cinder_conf_nfs(self): + out = hnas_utils.read_cinder_conf(self.fake_conf, 'nfs') + + self.assertEqual(config_from_cinder_conf, out) + + def test_read_cinder_conf_iscsi(self): + local_config = copy.deepcopy(config_from_cinder_conf) + + local_config['services']['FS-CinderDev1']['iscsi_ip'] = '172.24.49.32' + local_config['services']['default']['iscsi_ip'] = '172.24.49.21' + + out = hnas_utils.read_cinder_conf(self.fake_conf, 'iscsi') + + self.assertEqual(local_config, out) + + def test_read_cinder_conf_break(self): + self.override_config('hnas_username', None) + self.override_config('hnas_password', None) + self.override_config('hnas_mgmt_ip0', None) + out = hnas_utils.read_cinder_conf(self.fake_conf, 'nfs') + self.assertIsNone(out) + + @ddt.data('hnas_username', 'hnas_password', + 'hnas_mgmt_ip0', 'hnas_svc0_iscsi_ip', 'hnas_svc0_volume_type', + 'hnas_svc0_hdp', ) + def test_init_invalid_conf_parameters(self, attr_name): + self.override_config(attr_name, None) + + self.assertRaises(exception.InvalidParameterValue, + hnas_utils.read_cinder_conf, self.fake_conf, 'iscsi') diff --git a/cinder/tests/unit/volume/drivers/hpe/__init__.py b/cinder/tests/unit/volume/drivers/hpe/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/fake_hpe_3par_client.py b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py similarity index 89% rename from cinder/tests/unit/fake_hpe_3par_client.py rename to cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py index b4567efd5..ebe402689 100644 --- a/cinder/tests/unit/fake_hpe_3par_client.py +++ b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py @@ -19,7 +19,8 @@ import sys import mock -from cinder.tests.unit import fake_hpe_client_exceptions as hpeexceptions +from cinder.tests.unit.volume.drivers.hpe \ + import fake_hpe_client_exceptions as hpeexceptions hpe3par = mock.Mock() hpe3par.version = "4.2.0" diff --git a/cinder/tests/unit/fake_hpe_client_exceptions.py b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py similarity index 95% rename from cinder/tests/unit/fake_hpe_client_exceptions.py rename to cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py index c496e07bf..f753eb3ed 100644 --- a/cinder/tests/unit/fake_hpe_client_exceptions.py +++ b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py @@ -78,8 +78,10 @@ class HTTPConflict(ClientException): message = "Conflict" def __init__(self, error=None): - if error and 'message' in error: - self._error_desc = error['message'] + if error: + super(HTTPConflict, self).__init__(error) + if 'message' in error: + self._error_desc = error['message'] def get_description(self): return self._error_desc diff --git a/cinder/tests/unit/fake_hpe_lefthand_client.py b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_lefthand_client.py similarity index 90% rename from cinder/tests/unit/fake_hpe_lefthand_client.py rename to cinder/tests/unit/volume/drivers/hpe/fake_hpe_lefthand_client.py index 2360de097..d4f05ee20 100644 --- a/cinder/tests/unit/fake_hpe_lefthand_client.py +++ b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_lefthand_client.py @@ -19,7 +19,8 @@ import sys import mock -from cinder.tests.unit import fake_hpe_client_exceptions as hpeexceptions +from cinder.tests.unit.volume.drivers.hpe \ + import fake_hpe_client_exceptions as hpeexceptions hpelefthand = mock.Mock() hpelefthand.version = "2.1.0" diff --git a/cinder/tests/unit/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py similarity index 99% rename from cinder/tests/unit/test_hpe3par.py rename to cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py index df72641e9..b21a76427 100644 --- a/cinder/tests/unit/test_hpe3par.py +++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py @@ -18,14 +18,14 @@ import mock import ast -from oslo_config import cfg from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder import test -from cinder.tests.unit import fake_hpe_3par_client as hpe3parclient +from cinder.tests.unit.volume.drivers.hpe \ + import fake_hpe_3par_client as hpe3parclient from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.hpe import hpe_3par_fc as hpefcdriver from cinder.volume.drivers.hpe import hpe_3par_iscsi as hpedriver @@ -35,7 +35,6 @@ from cinder.volume import volume_types hpeexceptions = hpe3parclient.hpeexceptions -CONF = cfg.CONF HPE3PAR_CPG = 'OpenStackCPG' HPE3PAR_CPG2 = 'fakepool' @@ -78,6 +77,8 @@ HPE3PAR_CPG_MAP = 'OpenStackCPG:DestOpenStackCPG fakepool:destfakepool' SYNC_MODE = 1 PERIODIC_MODE = 2 SYNC_PERIOD = 900 +# EXISTENT_PATH error code returned from hpe3parclient +EXISTENT_PATH = 73 class Comment(object): @@ -2294,6 +2295,7 @@ class HPE3PARBaseDriver(object): self.driver.create_volume_from_snapshot(self.volume, self.snapshot) ex = hpeexceptions.HTTPConflict("In use") + ex._error_code = 32 mock_client.deleteVolume = mock.Mock(side_effect=ex) # Deleting the snapshot that a volume is dependent on should fail @@ -5479,6 +5481,67 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase): self.assertEqual('fakehost.foo', host['name']) + def test_concurrent_create_host(self): + # tests concurrent requests to create host + # setup_mock_client driver with default configuration + # and return the mock HTTP 3PAR client + mock_client = self.setup_driver() + mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} + mock_client.getCPG.return_value = {} + mock_client.queryHost.side_effect = [ + None, + {'members': [{'name': self.FAKE_HOST}] + }] + mock_client.createHost.side_effect = [ + hpeexceptions.HTTPConflict( + {'code': EXISTENT_PATH, + 'desc': 'host WWN/iSCSI name already used by another host'})] + mock_client.getHost.side_effect = [ + hpeexceptions.HTTPNotFound('fake'), + {'name': self.FAKE_HOST, + 'FCPaths': [{'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'model': None, + 'portPos': {'cardPort': 1, 'node': 1, + 'slot': 2}, + 'vendor': None, + 'wwn': self.wwn[0]}, + {'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'model': None, + 'portPos': {'cardPort': 1, 'node': 0, + 'slot': 2}, + 'vendor': None, + 'wwn': self.wwn[1]}]}] + + with mock.patch.object(hpecommon.HPE3PARCommon, + '_create_client') as mock_create_client: + mock_create_client.return_value = mock_client + common = self.driver._login() + host = self.driver._create_host( + common, + self.volume, + self.connector) + expected = [ + mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), + mock.call.getCPG(HPE3PAR_CPG), + mock.call.getHost(self.FAKE_HOST), + mock.call.queryHost(wwns=['123456789012345', + '123456789054321']), + mock.call.createHost( + self.FAKE_HOST, + FCWwns=['123456789012345', '123456789054321'], + optional={'domain': None, 'persona': 2}), + mock.call.queryHost(wwns=['123456789012345', + '123456789054321']), + mock.call.getHost(self.FAKE_HOST)] + + mock_client.assert_has_calls(expected) + + self.assertEqual(self.FAKE_HOST, host['name']) + def test_create_modify_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client diff --git a/cinder/tests/unit/test_hpe_xp_fc.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe_xp_fc.py similarity index 100% rename from cinder/tests/unit/test_hpe_xp_fc.py rename to cinder/tests/unit/volume/drivers/hpe/test_hpe_xp_fc.py diff --git a/cinder/tests/unit/test_hpelefthand.py b/cinder/tests/unit/volume/drivers/hpe/test_hpelefthand.py similarity index 99% rename from cinder/tests/unit/test_hpelefthand.py rename to cinder/tests/unit/volume/drivers/hpe/test_hpelefthand.py index 3e90e972c..be8437b89 100644 --- a/cinder/tests/unit/test_hpelefthand.py +++ b/cinder/tests/unit/volume/drivers/hpe/test_hpelefthand.py @@ -24,7 +24,8 @@ from cinder import context from cinder import exception from cinder.objects import fields from cinder import test -from cinder.tests.unit import fake_hpe_lefthand_client as hpelefthandclient +from cinder.tests.unit.volume.drivers.hpe \ + import fake_hpe_lefthand_client as hpelefthandclient from cinder.volume.drivers.hpe import hpe_lefthand_iscsi from cinder.volume import volume_types diff --git a/cinder/tests/unit/volume/drivers/huawei/__init__.py b/cinder/tests/unit/volume/drivers/huawei/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_huawei_drivers.py b/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py similarity index 94% rename from cinder/tests/unit/test_huawei_drivers.py rename to cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py index ec96f12b7..c83ae9620 100644 --- a/cinder/tests/unit/test_huawei_drivers.py +++ b/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. """Tests for huawei drivers.""" +import collections import copy import ddt import json @@ -20,6 +21,7 @@ import mock import re import tempfile import time +import unittest from xml.dom import minidom from cinder import context @@ -44,6 +46,9 @@ from cinder.volume import volume_types admin_contex = context.get_admin_context() +vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') +Volume = collections.namedtuple('Volume', vol_attrs) + PROVIDER_LOCATION = '11' HOST = 'ubuntu001@backend001#OpenStack_Pool' ID = '21ec7341-9256-497b-97d9-ef48edcf0635' @@ -276,7 +281,8 @@ FAKE_GET_LOGIN_STORAGE_RESPONSE = """ "data": { "username": "admin", "iBaseToken": "2001031430", - "deviceid": "210235G7J20000000000" + "deviceid": "210235G7J20000000000", + "accountstate": 2 } } """ @@ -303,7 +309,10 @@ FAKE_STORAGE_POOL_RESPONSE = """ "USERFREECAPACITY": "985661440", "ID": "0", "NAME": "OpenStack_Pool", - "USERTOTALCAPACITY": "985661440" + "USERTOTALCAPACITY": "985661440", + "TIER0CAPACITY": "100", + "TIER1CAPACITY": "0", + "TIER2CAPACITY": "0" }] } """ @@ -484,6 +493,18 @@ FAKE_GET_SNAPSHOT_INFO_RESPONSE = """ } """ +FAKE_SNAPSHOT_COUNT_RESPONSE = """ +{ + "data":{ + "COUNT":"2" + }, + "error":{ + "code":0, + "description":"0" + } +} +""" + # A fake response of get iscsi response FAKE_GET_ISCSI_INFO_RESPONSE = """ @@ -1208,6 +1229,14 @@ MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition?ID=1' '/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) +MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/associate?TYPE=27&ASSOCIATEOBJTYPE=21' + '&ASSOCIATEOBJID=1/GET'] = ( + FAKE_COMMON_SUCCESS_RESPONSE) + +MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/associate?TYPE=27&ASSOCIATEOBJTYPE=256' + '&ASSOCIATEOBJID=11/GET'] = ( + FAKE_COMMON_SUCCESS_RESPONSE) + MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup?range=[0-8191]/GET'] = ( FAKE_QUERY_LUN_GROUP_INFO_RESPONSE) @@ -1236,10 +1265,26 @@ MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) +MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=27' + '&ASSOCIATEOBJID=11/DELETE'] = ( + FAKE_COMMON_SUCCESS_RESPONSE) + MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_COUNT_RESPONSE) +MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/count?TYPE=27&ASSOCIATEOBJTYPE=256' + '&ASSOCIATEOBJID=1/GET'] = ( + FAKE_SNAPSHOT_COUNT_RESPONSE) + +MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/count?TYPE=27&ASSOCIATEOBJTYPE=256' + '&ASSOCIATEOBJID=11/GET'] = ( + FAKE_SNAPSHOT_COUNT_RESPONSE) + +MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=27' + '&ASSOCIATEOBJID=11/GET'] = ( + FAKE_LUN_ASSOCIATE_RESPONSE) + MAP_COMMAND_TO_FAKE_RESPONSE['/lun/expand/PUT'] = ( FAKE_LUN_INFO_RESPONSE) @@ -2104,6 +2149,9 @@ class HuaweiTestBase(test.TestCase): def setUp(self): super(HuaweiTestBase, self).setUp() + self.configuration = mock.Mock(spec=conf.Configuration) + self.driver = FakeISCSIStorage(configuration=self.configuration) + self.driver.do_setup() self.volume = fake_volume.fake_volume_obj( admin_contex, host=HOST, provider_location=PROVIDER_LOCATION, @@ -2144,6 +2192,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): super(HuaweiISCSIDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hypermetro_devices = hypermetro_devices + self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') self.stubs.Set(time, 'sleep', Fake_sleep) self.driver = FakeISCSIStorage(configuration=self.configuration) self.driver.do_setup() @@ -2182,6 +2231,32 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): device_id = self.driver.client.login() self.assertEqual('210235G7J20000000000', device_id) + @ddt.data(constants.PWD_EXPIRED, constants.PWD_RESET) + def test_login_password_expires_and_reset_fail(self, state): + with mock.patch.object(self.driver.client, 'logout') as mock_logout: + self.mock_object(FakeClient, 'do_call', + mock.Mock(return_value={"error": {"code": 0}, + "data": { + "username": "admin", + "iBaseToken": "2001031430", + "deviceid": "210235G7J20000000000", + "accountstate": state}})) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.client.login) + mock_logout.assert_called_once_with() + + def test_login_logout_fail(self): + login_info = {"error": {"code": 0}, + "data": {"username": "admin", + "iBaseToken": "2001031430", + "deviceid": "210235G7J20000000000", + "accountstate": 3}} + logout_info = {"error": {"code": 1}, "data": {}} + self.mock_object(FakeClient, 'do_call', + mock.Mock(side_effect=[login_info, logout_info])) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.client.login) + def test_check_volume_exist_on_array(self): self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', mock.Mock(return_value=None)) @@ -2216,6 +2291,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_delete_snapshot_success(self): self.driver.delete_snapshot(self.snapshot) + @unittest.skip("Skip until bug #1578986 is fixed") def test_create_volume_from_snapsuccess(self): self.mock_object( huawei_driver.HuaweiBaseDriver, @@ -2232,6 +2308,19 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.assertEqual(driver_data, model_update['replication_driver_data']) self.assertEqual('available', model_update['replication_status']) + @mock.patch.object(huawei_driver.HuaweiISCSIDriver, + 'initialize_connection', + return_value={"data": {'target_lun': 1}}) + def test_initialize_connection_snapshot_success(self, mock_iscsi_init): + iscsi_properties = self.driver.initialize_connection_snapshot( + self.snapshot, FakeConnector) + volume = Volume(id=self.snapshot.id, + provider_location=self.snapshot.provider_location, + lun_type='27', + metadata=None) + self.assertEqual(1, iscsi_properties['data']['target_lun']) + mock_iscsi_init.assert_called_with(volume, FakeConnector) + def test_initialize_connection_success_multipath_portgroup(self): temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True @@ -2361,12 +2450,23 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): driver.initialize_connection, self.volume, temp_connector) + @mock.patch.object(huawei_driver.HuaweiISCSIDriver, + 'terminate_connection') + def test_terminate_connection_snapshot_success(self, mock_iscsi_term): + self.driver.terminate_connection_snapshot(self.snapshot, + FakeConnector) + volume = Volume(id=self.snapshot.id, + provider_location=self.snapshot.provider_location, + lun_type='27', + metadata=None) + mock_iscsi_term.assert_called_with(volume, FakeConnector) + def test_terminate_connection_success(self): self.driver.terminate_connection(self.volume, FakeConnector) def test_get_volume_status(self): data = self.driver.get_volume_stats() - self.assertEqual('2.0.7', data['driver_version']) + self.assertEqual(self.driver.VERSION, data['driver_version']) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 6291456}) @@ -2489,20 +2589,31 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): "ID": "0", "USERFREECAPACITY": "36", "USERTOTALCAPACITY": "48", - "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE}, + "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, + "TIER0CAPACITY": "48", + "TIER1CAPACITY": "0", + "TIER2CAPACITY": "0"}, {"NAME": "test002", "ID": "1", "USERFREECAPACITY": "37", "USERTOTALCAPACITY": "49", - "USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE}, + "USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE, + "TIER0CAPACITY": "0", + "TIER1CAPACITY": "49", + "TIER2CAPACITY": "0"}, {"NAME": "test003", "ID": "0", "USERFREECAPACITY": "36", "DATASPACE": "35", "USERTOTALCAPACITY": "48", - "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE}] + "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, + "TIER0CAPACITY": "0", + "TIER1CAPACITY": "0", + "TIER2CAPACITY": "48"}] pool_name = 'test001' - test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48'} + test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48', + 'TIER0CAPACITY': '48', 'TIER1CAPACITY': '0', + 'TIER2CAPACITY': '0'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) @@ -2517,7 +2628,9 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.assertEqual(test_info, pool_info) pool_name = 'test003' - test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48'} + test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48', + 'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', + 'TIER2CAPACITY': '48'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) @@ -3028,7 +3141,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', - return_value={'USERCAPACITY': 2097152}) + return_value={'USERCAPACITY': 3097152}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_get_size_success(self, @@ -3038,17 +3151,17 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): 'source-id': 'ID1'} size = self.driver.manage_existing_snapshot_get_size(self.snapshot, external_ref) - self.assertEqual(1, size) + self.assertEqual(2, size) external_ref = {'source-name': 'test1'} size = self.driver.manage_existing_snapshot_get_size(self.snapshot, external_ref) - self.assertEqual(1, size) + self.assertEqual(2, size) external_ref = {'source-id': 'ID1'} size = self.driver.manage_existing_snapshot_get_size(self.snapshot, external_ref) - self.assertEqual(1, size) + self.assertEqual(2, size) @mock.patch.object(rest_client.RestClient, 'rename_snapshot') def test_unmanage_snapshot(self, mock_rename): @@ -3141,6 +3254,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): mock.Mock(return_value=False)) self.driver.delete_volume(self.replica_volume) + @unittest.skip("Skip until bug #1578986 is fixed") def test_wait_volume_online(self): replica = FakeReplicaPairManager(self.driver.client, self.driver.replica_client, @@ -3159,6 +3273,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.driver.client, lun_info) + @unittest.skip("Skip until bug #1578986 is fixed") def test_wait_second_access(self): pair_id = '1' access_ro = constants.REPLICA_SECOND_RO @@ -3175,8 +3290,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.assertRaises(exception.VolumeBackendAPIException, common_driver.wait_second_access, pair_id, access_rw) - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) + @unittest.skip("Skip until bug #1578986 is fixed") def test_wait_replica_ready(self): normal_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, @@ -3401,6 +3515,9 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.assertEqual(self.replica_volume.id, v_id) self.assertEqual('error', v_update['replication_status']) + @unittest.skip("Skip until bug #1578986 is fixed") + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) @mock.patch.object(replication.PairOp, 'is_primary', side_effect=[False, True]) @mock.patch.object(replication.ReplicaCommonDriver, 'split') @@ -3454,6 +3571,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): common_driver.protect_second(replica_id) common_driver.unprotect_second(replica_id) + @unittest.skip("Skip until bug #1578986 is fixed") def test_replication_driver_sync(self): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) @@ -3614,6 +3732,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): def setUp(self): super(HuaweiFCDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) + self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') self.huawei_conf = FakeHuaweiConf(self.configuration, 'FC') self.configuration.hypermetro_devices = hypermetro_devices self.stubs.Set(time, 'sleep', Fake_sleep) @@ -3633,7 +3752,8 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): def test_delete_volume_success(self): self.driver.delete_volume(self.volume) - def test_create_snapshot_success(self): + @mock.patch.object(rest_client, 'RestClient') + def test_create_snapshot_success(self, mock_client): lun_info = self.driver.create_snapshot(self.snapshot) self.assertEqual(11, lun_info['provider_location']) @@ -3645,11 +3765,25 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): def test_delete_snapshot_success(self): self.driver.delete_snapshot(self.snapshot) + @unittest.skip("Skip until bug #1578986 is fixed") def test_create_volume_from_snapsuccess(self): lun_info = self.driver.create_volume_from_snapshot(self.volume, self.volume) self.assertEqual('1', lun_info['provider_location']) + @mock.patch.object(huawei_driver.HuaweiFCDriver, + 'initialize_connection', + return_value={"data": {'target_lun': 1}}) + def test_initialize_connection_snapshot_success(self, mock_fc_init): + iscsi_properties = self.driver.initialize_connection_snapshot( + self.snapshot, FakeConnector) + volume = Volume(id=self.snapshot.id, + provider_location=self.snapshot.provider_location, + lun_type='27', + metadata=None) + self.assertEqual(1, iscsi_properties['data']['target_lun']) + mock_fc_init.assert_called_with(volume, FakeConnector) + def test_initialize_connection_success(self): iscsi_properties = self.driver.initialize_connection(self.volume, FakeConnector) @@ -3683,6 +3817,17 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): FakeConnector) self.assertEqual(1, fc_properties['data']['target_lun']) + @mock.patch.object(huawei_driver.HuaweiFCDriver, + 'terminate_connection') + def test_terminate_connection_snapshot_success(self, mock_fc_term): + self.driver.terminate_connection_snapshot(self.snapshot, + FakeConnector) + volume = Volume(id=self.snapshot.id, + provider_location=self.snapshot.provider_location, + lun_type='27', + metadata=None) + mock_fc_term.assert_called_with(volume, FakeConnector) + def test_terminate_connection_success(self): self.driver.client.terminateFlag = True self.driver.terminate_connection(self.volume, FakeConnector) @@ -3715,7 +3860,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): 'get_remote_device_by_wwn', mock.Mock(return_value=remote_device_info)) data = self.driver.get_volume_stats() - self.assertEqual('2.0.7', data['driver_version']) + self.assertEqual(self.driver.VERSION, data['driver_version']) self.assertTrue(data['pools'][0]['replication_enabled']) self.assertListEqual(['sync', 'async'], data['pools'][0]['replication_type']) @@ -3732,9 +3877,60 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): 'try_get_remote_wwn', mock.Mock(return_value={})) data = self.driver.get_volume_stats() - self.assertEqual('2.0.7', data['driver_version']) + self.assertEqual(self.driver.VERSION, data['driver_version']) self.assertNotIn('replication_enabled', data['pools'][0]) + @ddt.data({'TIER0CAPACITY': '100', + 'TIER1CAPACITY': '0', + 'TIER2CAPACITY': '0', + 'disktype': 'ssd'}, + {'TIER0CAPACITY': '0', + 'TIER1CAPACITY': '100', + 'TIER2CAPACITY': '0', + 'disktype': 'sas'}, + {'TIER0CAPACITY': '0', + 'TIER1CAPACITY': '0', + 'TIER2CAPACITY': '100', + 'disktype': 'nl_sas'}, + {'TIER0CAPACITY': '100', + 'TIER1CAPACITY': '100', + 'TIER2CAPACITY': '100', + 'disktype': 'mix'}, + {'TIER0CAPACITY': '0', + 'TIER1CAPACITY': '0', + 'TIER2CAPACITY': '0', + 'disktype': ''}) + def test_get_volume_disk_type(self, disk_type_value): + response_dict = json.loads(FAKE_STORAGE_POOL_RESPONSE) + storage_pool_sas = copy.deepcopy(response_dict) + storage_pool_sas['data'][0]['TIER0CAPACITY'] = ( + disk_type_value['TIER0CAPACITY']) + storage_pool_sas['data'][0]['TIER1CAPACITY'] = ( + disk_type_value['TIER1CAPACITY']) + storage_pool_sas['data'][0]['TIER2CAPACITY'] = ( + disk_type_value['TIER2CAPACITY']) + driver = FakeISCSIStorage(configuration=self.configuration) + driver.do_setup() + driver.replica = None + + self.mock_object(rest_client.RestClient, 'get_all_pools', + mock.Mock(return_value=storage_pool_sas['data'])) + data = driver.get_volume_stats() + if disk_type_value['disktype']: + self.assertEqual(disk_type_value['disktype'], + data['pools'][0]['disk_type']) + else: + self.assertIsNone(data['pools'][0].get('disk_type')) + + def test_get_disk_type_pool_info_none(self): + driver = FakeISCSIStorage(configuration=self.configuration) + driver.do_setup() + driver.replica = None + self.mock_object(rest_client.RestClient, 'get_pool_info', + mock.Mock(return_value=None)) + data = driver.get_volume_stats() + self.assertIsNone(data['pools'][0].get('disk_type')) + def test_extend_volume(self): self.driver.extend_volume(self.volume, 3) @@ -3780,7 +3976,8 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): '12') self.assertFalse(result) - @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') + @unittest.skip("Skip until bug #1578986 is fixed") + @mock.patch.object(rest_client, 'RestClient') def test_migrate_volume_success(self, mock_add_lun_to_partition): # Migrate volume without new type. empty_dict = {} @@ -3912,7 +4109,8 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): test_new_type, None, test_host) self.assertTrue(retype) - @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') + @unittest.skip("Skip until bug #1578986 is fixed") + @mock.patch.object(rest_client, 'RestClient') @mock.patch.object( huawei_driver.HuaweiBaseDriver, '_get_volume_type', @@ -4169,6 +4367,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): self.volume, FakeConnector) + @unittest.skip("Skip until bug #1578986 is fixed") def test_wait_volume_ready_success(self): flag = self.driver.metro._wait_volume_ready("11") self.assertIsNone(flag) diff --git a/cinder/tests/unit/test_huawei_drivers_compatibility.py b/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers_compatibility.py similarity index 100% rename from cinder/tests/unit/test_huawei_drivers_compatibility.py rename to cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers_compatibility.py diff --git a/cinder/tests/unit/volume/drivers/ibm/__init__.py b/cinder/tests/unit/volume/drivers/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_ibm_flashsystem.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py similarity index 92% rename from cinder/tests/unit/test_ibm_flashsystem.py rename to cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py index ae589b630..11383abed 100644 --- a/cinder/tests/unit/test_ibm_flashsystem.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py @@ -476,6 +476,24 @@ class FlashSystemManagementSimulator(object): return ('Virtual Disk, id [%s], successfully created' % (vdisk_info['id']), '') + def _cmd_chvdisk(self, **kwargs): + """chvdisk command + + svcask chvdisk -name -udid + -open_access_scsi_id + """ + + if 'obj' not in kwargs: + return self._errors['CMMVC50000'] + + source_name = kwargs['obj'].strip('\'\"') + dest_name = kwargs['name'].strip('\'\"') + vol = self._volumes_list[source_name] + vol['name'] = dest_name + del self._volumes_list[source_name] + self._volumes_list[dest_name] = vol + return ('', '') + def _cmd_rmvdisk(self, **kwargs): """svctask rmvdisk -force """ @@ -1270,3 +1288,78 @@ class FlashSystemDriverTestCase(test.TestCase): # case 4: If there is no vdisk mapped to host, host should be removed self.assertIsNone(self.driver._get_host_from_connector(self.connector)) + + def test_flashsystem_manage_existing(self): + # case 1: manage a vdisk good path + kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.driver.manage_existing(vol1, existing_ref) + self.driver.delete_volume(vol1) + + # case 2: manage a vdisk not exist + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, vol1, existing_ref) + + # case 3: manage a vdisk without name and uid + kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, vol1, existing_ref) + vdisk1 = {'obj': u'unmanage-vol-01'} + self.sim._cmd_rmvdisk(**vdisk1) + + @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, + '_get_vdiskhost_mappings') + def test_flashsystem_manage_existing_get_size_mapped( + self, + _get_vdiskhost_mappings_mock): + # manage a vdisk with mappings + _get_vdiskhost_mappings_mock.return_value = {'mapped': u'yes'} + kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + vol1, + existing_ref) + + # clean environment + vdisk1 = {'obj': u'unmanage-vol-01'} + self.sim._cmd_rmvdisk(**vdisk1) + + def test_flashsystem_manage_existing_get_size_bad_ref(self): + # bad existing_ref + vol1 = self._generate_vol_info(None, None) + existing_ref = {} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, vol1, + existing_ref) + + def test_flashsystem_manage_existing_get_size_vdisk_not_exist(self): + # vdisk not exist + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + vol1, + existing_ref) + + def test_flashsystem_manage_existing_get_size(self): + # good path + kwargs = {'name': u'unmanage-vol-01', 'size': u'10001', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + vdisk_size = self.driver.manage_existing_get_size(vol1, existing_ref) + self.assertEqual(10001, vdisk_size) + + # clean environment + vdisk1 = {'obj': u'unmanage-vol-01'} + self.sim._cmd_rmvdisk(**vdisk1) diff --git a/cinder/tests/unit/test_ibm_flashsystem_iscsi.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py similarity index 79% rename from cinder/tests/unit/test_ibm_flashsystem_iscsi.py rename to cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py index dfc1148db..352220b39 100644 --- a/cinder/tests/unit/test_ibm_flashsystem_iscsi.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py @@ -26,7 +26,8 @@ import random from cinder import context from cinder import exception from cinder import test -from cinder.tests.unit import test_ibm_flashsystem as fscommon +from cinder.tests.unit.volume.drivers.ibm \ + import test_ibm_flashsystem as fscommon from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import flashsystem_iscsi @@ -343,3 +344,78 @@ class FlashSystemISCSIDriverTestCase(test.TestCase): self.driver._delete_host(host2) self.sim.set_protocol('iSCSI') self._reset_flags() + + def test_flashsystem_manage_existing(self): + # case 1: manage a vdisk good path + kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.driver.manage_existing(vol1, existing_ref) + self.driver.delete_volume(vol1) + + # case 2: manage a vdisk not exist + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, vol1, existing_ref) + + # case 3: manage a vdisk without name and uid + kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, vol1, existing_ref) + vdisk1 = {'obj': u'unmanage-vol-01'} + self.sim._cmd_rmvdisk(**vdisk1) + + @mock.patch.object(flashsystem_iscsi.FlashSystemISCSIDriver, + '_get_vdiskhost_mappings') + def test_flashsystem_manage_existing_get_size_mapped( + self, + _get_vdiskhost_mappings_mock): + # case 2: manage a vdisk with mappings + _get_vdiskhost_mappings_mock.return_value = {'mapped': u'yes'} + kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + vol1, + existing_ref) + + # clean environment + vdisk1 = {'obj': u'unmanage-vol-01'} + self.sim._cmd_rmvdisk(**vdisk1) + + def test_flashsystem_manage_existing_get_size_bad_ref(self): + # bad existing_ref + vol1 = self._generate_vol_info(None, None) + existing_ref = {} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, vol1, + existing_ref) + + def test_flashsystem_manage_existing_get_size_vdisk_not_exist(self): + # vdisk not exist + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + vol1, + existing_ref) + + def test_flashsystem_manage_existing_get_size(self): + # good path + kwargs = {'name': u'unmanage-vol-01', 'size': u'10001', 'unit': 'gb'} + self.sim._cmd_mkvdisk(**kwargs) + vol1 = self._generate_vol_info(None) + existing_ref = {'source-name': u'unmanage-vol-01'} + vdisk_size = self.driver.manage_existing_get_size(vol1, existing_ref) + self.assertEqual(10001, vdisk_size) + + # clean environment + vdisk1 = {'obj': u'unmanage-vol-01'} + self.sim._cmd_rmvdisk(**vdisk1) diff --git a/cinder/tests/unit/test_ibm_xiv_ds8k.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py similarity index 92% rename from cinder/tests/unit/test_ibm_xiv_ds8k.py rename to cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py index bec79840f..bc708f59f 100644 --- a/cinder/tests/unit/test_ibm_xiv_ds8k.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py @@ -29,7 +29,7 @@ from cinder.i18n import _ from cinder.objects import fields from cinder import test from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm import xiv_ds8k +from cinder.volume.drivers.ibm import ibm_storage from cinder.volume import volume_types FAKE = "fake" @@ -81,30 +81,37 @@ CG_SNAPSHOT = {'id': CG_SNAPSHOT_ID, CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", } +FAKE_PROXY = 'cinder.tests.unit.volume.drivers.ibm.test_ibm_storage' \ + '.IBMStorageFakeProxyDriver' -class XIVDS8KFakeProxyDriver(object): - """Fake IBM XIV and DS8K Proxy Driver.""" - def __init__(self, xiv_ds8k_info, logger, expt, +class IBMStorageFakeProxyDriver(object): + """Fake IBM Storage driver + + Fake IBM Storage driver for IBM XIV, Spectrum Accelerate, + FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. + """ + + def __init__(self, ibm_storage_info, logger, expt, driver=None, active_backend_id=None): """Initialize Proxy.""" - self.xiv_ds8k_info = xiv_ds8k_info + self.ibm_storage_info = ibm_storage_info self.logger = logger self.exception = expt - self.xiv_ds8k_portal = \ - self.xiv_ds8k_iqn = FAKE + self.storage_portal = \ + self.storage_iqn = FAKE self.volumes = {} self.snapshots = {} self.driver = driver def setup(self, context): - if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\ + if self.ibm_storage_info['user'] != self.driver\ .configuration.san_login: raise self.exception.NotAuthorized() - if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\ + if self.ibm_storage_info['address'] != self.driver\ .configuration.san_ip: raise self.exception.HostNotFound(host='fake') @@ -143,14 +150,14 @@ class XIVDS8KFakeProxyDriver(object): return {'driver_volume_type': 'iscsi', 'data': {'target_discovered': True, - 'target_portal': self.xiv_ds8k_portal, - 'target_iqn': self.xiv_ds8k_iqn, + 'target_portal': self.storage_portal, + 'target_iqn': self.storage_iqn, 'target_lun': lun_id, 'volume_id': volume['id'], 'multipath': True, 'provider_location': "%s,1 %s %s" % ( - self.xiv_ds8k_portal, - self.xiv_ds8k_iqn, + self.storage_portal, + self.storage_iqn, lun_id), }, } @@ -284,19 +291,22 @@ class XIVDS8KFakeProxyDriver(object): return target_id, volume_update_list -class XIVDS8KVolumeDriverTest(test.TestCase): - """Test IBM XIV and DS8K volume driver.""" +class IBMStorageVolumeDriverTest(test.TestCase): + """Test IBM Storage driver + + Test IBM Storage driver for IBM XIV, Spectrum Accelerate, + FlashSystem A9000, FlashSystem A9000R and DS8000 storage Systems. + """ def setUp(self): - """Initialize IBM XIV and DS8K Driver.""" - super(XIVDS8KVolumeDriverTest, self).setUp() + """Initialize IBM Storage Driver.""" + super(IBMStorageVolumeDriverTest, self).setUp() configuration = mock.Mock(conf.Configuration) configuration.san_is_local = False - configuration.xiv_ds8k_proxy = \ - 'cinder.tests.unit.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver' - configuration.xiv_ds8k_connection_type = 'iscsi' - configuration.xiv_chap = 'disabled' + configuration.proxy = FAKE_PROXY + configuration.connection_type = 'iscsi' + configuration.chap = 'disabled' configuration.san_ip = FAKE configuration.management_ips = FAKE configuration.san_login = FAKE @@ -304,35 +314,35 @@ class XIVDS8KVolumeDriverTest(test.TestCase): configuration.san_password = FAKE configuration.append_config_values(mock.ANY) - self.driver = xiv_ds8k.XIVDS8KDriver( + self.driver = ibm_storage.IBMStorageDriver( configuration=configuration) - def test_initialized_should_set_xiv_ds8k_info(self): + def test_initialized_should_set_ibm_storage_info(self): """Test that the san flags are passed to the IBM proxy.""" self.assertEqual( - self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'], + self.driver.proxy.ibm_storage_info['user'], self.driver.configuration.san_login) self.assertEqual( - self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'], + self.driver.proxy.ibm_storage_info['password'], self.driver.configuration.san_password) self.assertEqual( - self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'], + self.driver.proxy.ibm_storage_info['address'], self.driver.configuration.san_ip) self.assertEqual( - self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'], + self.driver.proxy.ibm_storage_info['vol_pool'], self.driver.configuration.san_clustername) def test_setup_should_fail_if_credentials_are_invalid(self): - """Test that the xiv_ds8k_proxy validates credentials.""" + """Test that the proxy validates credentials.""" - self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid' + self.driver.proxy.ibm_storage_info['user'] = 'invalid' self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) def test_setup_should_fail_if_connection_is_invalid(self): - """Test that the xiv_ds8k_proxy validates connection.""" + """Test that the proxy validates connection.""" - self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \ + self.driver.proxy.ibm_storage_info['address'] = \ 'invalid' self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) @@ -341,7 +351,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): self.driver.do_setup(None) self.driver.create_volume(VOLUME) - has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME) + has_volume = self.driver.proxy.volume_exists(VOLUME) self.assertTrue(has_volume) self.driver.delete_volume(VOLUME) @@ -351,7 +361,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): self.driver.do_setup(None) self.assertFalse( - self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE}) + self.driver.proxy.volume_exists({'name': FAKE}) ) def test_delete_volume(self): @@ -360,7 +370,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.delete_volume(VOLUME) - has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME) + has_volume = self.driver.proxy.volume_exists(VOLUME) self.assertFalse(has_volume) def test_delete_volume_should_fail_for_not_existing_volume(self): @@ -370,7 +380,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): self.driver.delete_volume(VOLUME) def test_create_volume_should_fail_if_no_pool_space_left(self): - """Verify that the xiv_ds8k_proxy validates volume pool space.""" + """Verify that the proxy validates volume pool space.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeBackendAPIException, @@ -387,7 +397,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): self.driver.initialize_connection(VOLUME, CONNECTOR) self.assertTrue( - self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR)) + self.driver.proxy.is_volume_attached(VOLUME, CONNECTOR)) self.driver.terminate_connection(VOLUME, CONNECTOR) self.driver.delete_volume(VOLUME) @@ -409,7 +419,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): self.driver.initialize_connection(VOLUME, CONNECTOR) self.driver.terminate_connection(VOLUME, CONNECTOR) - self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached( + self.assertFalse(self.driver.proxy.is_volume_attached( VOLUME, CONNECTOR)) @@ -518,7 +528,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): host = { 'host': 'foo', 'capabilities': { - 'location_info': 'xiv_ds8k_fake_1', + 'location_info': 'ibm_storage_fake_1', 'extent_size': '1024' } } @@ -556,7 +566,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase): host = { 'host': 'foo', 'capabilities': { - 'location_info': 'xiv_ds8k_fake_1', + 'location_info': 'ibm_storage_fake_1', 'extent_size': '1024' } } diff --git a/cinder/tests/unit/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py similarity index 98% rename from cinder/tests/unit/test_storwize_svc.py rename to cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py index bd4762e40..47c681414 100644 --- a/cinder/tests/unit/test_storwize_svc.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py @@ -232,7 +232,7 @@ class StorwizeSVCManagementSimulator(object): # Check if name is valid @staticmethod def _is_invalid_name(name): - if re.match(r'^[a-zA-Z_][\w ._-]*$', name): + if re.match(r'^[a-zA-Z_][\w._-]*$', name): return False return True @@ -297,6 +297,12 @@ class StorwizeSVCManagementSimulator(object): if skip: skip = False continue + # Check for a quoted command argument for volumes and strip + # quotes so that the simulater can match it later. Just + # match against test naming convensions for now. + if arg_list[i][0] == '"' and ('volume' in arg_list[i] or + 'snapshot' in arg_list[i]): + arg_list[i] = arg_list[i][1:-1] if arg_list[i][0] == '-': if arg_list[i][1:] in no_param_args: ret[arg_list[i][1:]] = True @@ -648,6 +654,8 @@ port_speed!N/A for row in rows: row.pop(0) self._next_cmd_error['lsfabric'] = '' + if self._next_cmd_error['lsfabric'] == 'remove_rows': + rows = [] return self._print_info_cmd(rows=rows, **kwargs) # Create a vdisk @@ -1002,7 +1010,10 @@ port_speed!N/A rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) found = False - for host in self._hosts_list.values(): + # Sort hosts by names to give predictable order for tests + # depend on it. + for host_name in sorted(self._hosts_list.keys()): + host = self._hosts_list[host_name] filterstr = 'name=' + host['host_name'] if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): @@ -1017,6 +1028,8 @@ port_speed!N/A if self._next_cmd_error['lshost'] == 'missing_host': self._next_cmd_error['lshost'] = '' return self._errors['CMMVC5754E'] + elif self._next_cmd_error['lshost'] == 'bigger_troubles': + return self._errors['CMMVC6527E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] @@ -2373,17 +2386,46 @@ class StorwizeSVCFcDriverTestCase(test.TestCase): self.assertIsNotNone(host_name) def test_storwize_get_host_from_connector_with_lshost_failure(self): - # Create a FC host - del self._connector['initiator'] + self._connector.pop('initiator') helper = self.fc_driver._helpers - host_name = helper.create_host(self._connector) - + # Create two hosts. The first is not related to the connector and + # we use the simulator for that. The second is for the connector. + # We will force the missing_host error for the first host, but + # then tolerate and find the second host on the slow path normally. + if self.USESIM: + self.sim._cmd_mkhost(name='DifferentHost', hbawwpn='123456') + helper.create_host(self._connector) # tell lshost to fail while calling get_host_from_connector if self.USESIM: + # tell lshost to fail while called from get_host_from_connector self.sim.error_injection('lshost', 'missing_host') + # tell lsfabric to skip rows so that we skip past fast path + self.sim.error_injection('lsfabric', 'remove_rows') + # Run test host_name = helper.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) + # Need to assert that lshost was actually called. The way + # we do that is check that the next simulator error for lshost + # has been reset. + self.assertEqual(self.sim._next_cmd_error['lshost'], '', + "lshost was not called in the simulator. The " + "queued error still remains.") + + def test_storwize_get_host_from_connector_with_lshost_failure2(self): + self._connector.pop('initiator') + self._connector['wwpns'] = [] # Clearing will skip over fast-path + helper = self.fc_driver._helpers + if self.USESIM: + # Add a host to the simulator. We don't need it to match the + # connector since we will force a bad failure for lshost. + self.sim._cmd_mkhost(name='DifferentHost', hbawwpn='123456') + # tell lshost to fail badly while called from + # get_host_from_connector + self.sim.error_injection('lshost', 'bigger_troubles') + self.assertRaises(exception.VolumeBackendAPIException, + helper.get_host_from_connector, + self._connector) def test_storwize_initiator_multiple_wwpns_connected(self): @@ -2870,6 +2912,8 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver._helpers.check_fcmapping_interval = 0 + self.mock_gr_sleep = mock.patch.object( + storwize_svc_common.StorwizeSVCCommonDriver, "DEFAULT_GR_SLEEP", 0) def _set_flag(self, flag, value, configuration=None): if not configuration: @@ -3262,13 +3306,56 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): self.driver.delete_volume(vol1) self.driver.delete_snapshot(snap1) - def test_storwize_svc_create_volfromsnap_clone(self): + def test_storwize_svc_create_cloned_volume(self): + vol1 = self._create_volume() + vol2 = testutils.create_volume(self.ctxt) + vol3 = testutils.create_volume(self.ctxt) + + # Try to clone where source size > target size + vol1['size'] = vol2['size'] + 1 + self.assertRaises(exception.InvalidInput, + self.driver.create_cloned_volume, + vol2, vol1) + self._assert_vol_exists(vol2['name'], False) + + # Try to clone where source size = target size + vol1['size'] = vol2['size'] + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_cloned_volume(vol2, vol1) + if self.USESIM: + # validate copyrate was set on the flash copy + for i, fcmap in self.sim._fcmappings_list.items(): + if fcmap['target'] == vol1['name']: + self.assertEqual('49', fcmap['copyrate']) + self._assert_vol_exists(vol2['name'], True) + + # Try to clone where source size < target size + vol3['size'] = vol1['size'] + 1 + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_cloned_volume(vol3, vol1) + if self.USESIM: + # Validate copyrate was set on the flash copy + for i, fcmap in self.sim._fcmappings_list.items(): + if fcmap['target'] == vol1['name']: + self.assertEqual('49', fcmap['copyrate']) + self._assert_vol_exists(vol3['name'], True) + + # Delete in the 'opposite' order to make sure it works + self.driver.delete_volume(vol3) + self._assert_vol_exists(vol3['name'], False) + self.driver.delete_volume(vol2) + self._assert_vol_exists(vol2['name'], False) + self.driver.delete_volume(vol1) + self._assert_vol_exists(vol1['name'], False) + + def test_storwize_svc_create_volume_from_snapshot(self): vol1 = self._create_volume() snap1 = self._generate_vol_info(vol1['name'], vol1['id']) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info(None, None) - vol3 = testutils.create_volume(self.ctxt) - vol4 = testutils.create_volume(self.ctxt) + vol3 = self._generate_vol_info(None, None) # Try to create a volume from a non-existing snapshot snap_novol = self._generate_vol_info('undefined-vol', '12345') @@ -3287,54 +3374,29 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): vol2, snap1) self._assert_vol_exists(vol2['name'], False) - # Try to create where source size != target size - vol2['size'] += 1 + # Try to create where volume size < snapshot size + snap1['volume_size'] += 1 self.assertRaises(exception.InvalidInput, self.driver.create_volume_from_snapshot, vol2, snap1) self._assert_vol_exists(vol2['name'], False) - vol2['size'] -= 1 + snap1['volume_size'] -= 1 - # Succeed + # Try to create where volume size > snapshot size + vol2['size'] += 1 if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) + vol2['size'] -= 1 - # Try to clone where source size > target size - vol2['size'] = vol3['size'] + 1 - self.assertRaises(exception.InvalidInput, - self.driver.create_cloned_volume, - vol3, vol2) - self._assert_vol_exists(vol3['name'], False) - - # Try to clone where source size = target size - vol2['size'] = vol3['size'] + # Try to create where volume size = snapshot size if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(vol3, vol2) - if self.USESIM: - # validate copyrate was set on the flash copy - for i, fcmap in self.sim._fcmappings_list.items(): - if fcmap['target'] == vol2['name']: - self.assertEqual('49', fcmap['copyrate']) + self.driver.create_volume_from_snapshot(vol3, snap1) self._assert_vol_exists(vol3['name'], True) - # Try to clone where source size < target size - vol4['size'] = vol2['size'] + 1 - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(vol4, vol2) - if self.USESIM: - # Validate copyrate was set on the flash copy - for i, fcmap in self.sim._fcmappings_list.items(): - if fcmap['target'] == vol2['name']: - self.assertEqual('49', fcmap['copyrate']) - self._assert_vol_exists(vol4['name'], True) - # Delete in the 'opposite' order to make sure it works - self.driver.delete_volume(vol4) - self._assert_vol_exists(vol4['name'], False) self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) @@ -3486,6 +3548,19 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): # Delete the volume self.driver.delete_volume(volume) + def test_storwize_svc_volume_name(self): + # Create a volume with space in name + volume = self._generate_vol_info(None, None) + volume['name'] = 'volume_ space' + self.driver.create_volume(volume) + self.driver.ensure_export(None, volume) + + # Ensure lsvdisk can find the volume by name + attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) + self.assertIn('name', attributes) + self.assertEqual(volume['name'], attributes['name']) + self.driver.delete_volume(volume) + def test_storwize_svc_volume_params(self): # Option test matrix # Option Value Covered by test # @@ -5079,6 +5154,8 @@ class StorwizeHelpersTestCase(test.TestCase): def setUp(self): super(StorwizeHelpersTestCase, self).setUp() self.storwize_svc_common = storwize_svc_common.StorwizeHelpers(None) + self.mock_wait_time = mock.patch.object( + storwize_svc_common.StorwizeHelpers, "WAIT_TIME", 0) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lslicense') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsguicapabilities') diff --git a/cinder/tests/unit/volume/drivers/infortrend/__init__.py b/cinder/tests/unit/volume/drivers/infortrend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_infortrend_cli.py b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py similarity index 100% rename from cinder/tests/unit/test_infortrend_cli.py rename to cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py diff --git a/cinder/tests/unit/test_infortrend_common.py b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py similarity index 99% rename from cinder/tests/unit/test_infortrend_common.py rename to cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py index e73584a92..58b4e7b0c 100644 --- a/cinder/tests/unit/test_infortrend_common.py +++ b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py @@ -19,8 +19,8 @@ import mock from cinder import exception from cinder import test -from cinder.tests.unit import test_infortrend_cli from cinder.tests.unit import utils +from cinder.tests.unit.volume.drivers.infortrend import test_infortrend_cli from cinder.volume import configuration from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py index 79c2edd7f..2d64e62fd 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py @@ -68,6 +68,10 @@ FAKE_NA_SERVER_API_1_20.set_vfiler('filer') FAKE_NA_SERVER_API_1_20.set_vserver('server') FAKE_NA_SERVER_API_1_20.set_api_version(1, 20) +VOLUME_VSERVER_NAME = 'fake_vserver' +VOLUME_NAMES = ('volume1', 'volume2') +VOLUME_NAME = 'volume1' + FAKE_QUERY = {'volume-attributes': None} @@ -104,6 +108,20 @@ NO_RECORDS_RESPONSE = etree.XML("""
""") +VOLUME_GET_NAME_RESPONSE = etree.XML(""" + + + + + %(volume)s + %(vserver)s + + + + 1 + +""" % {'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME}) + INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML(""" 1 @@ -622,7 +640,7 @@ AGGR_GET_NODE_RESPONSE = etree.XML(""" 'node': NODE_NAME, }) -AGGR_RAID_TYPE = 'raid_dp' +AGGREGATE_RAID_TYPE = 'raid_dp' AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" @@ -639,19 +657,45 @@ AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" %(raid)s + true %(aggr)s 1 -""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGR_RAID_TYPE}) +""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGREGATE_RAID_TYPE}) AGGR_INFO_SSC = { 'name': VOLUME_AGGREGATE_NAME, - 'raid-type': AGGR_RAID_TYPE, + 'raid-type': AGGREGATE_RAID_TYPE, + 'is-hybrid': True, } +AGGR_SIZE_TOTAL = 107374182400 +AGGR_SIZE_AVAILABLE = 59055800320 +AGGR_USED_PERCENT = 45 +AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" + + + + + %(used)s + %(total_size)s + %(available_size)s + + %(aggr)s + + + 1 + +""" % { + 'aggr': VOLUME_AGGREGATE_NAME, + 'used': AGGR_USED_PERCENT, + 'available_size': AGGR_SIZE_AVAILABLE, + 'total_size': AGGR_SIZE_TOTAL, +}) + VOLUME_SIZE_TOTAL = 19922944 VOLUME_SIZE_AVAILABLE = 19791872 VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" @@ -671,9 +715,6 @@ VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" 'total_size': VOLUME_SIZE_TOTAL, }) -VOLUME_VSERVER_NAME = 'fake_vserver' -VOLUME_NAMES = ('volume1', 'volume2') - VOLUME_GET_ITER_LIST_RESPONSE = etree.XML(""" @@ -707,6 +748,7 @@ VOLUME_GET_ITER_SSC_RESPONSE = etree.XML(""" /%(volume)s %(volume)s %(vserver)s + rw false @@ -718,7 +760,15 @@ VOLUME_GET_ITER_SSC_RESPONSE = etree.XML(""" true none + 5 + 12345 + + default + + + en_US + 1 @@ -735,6 +785,11 @@ VOLUME_INFO_SSC = { 'junction-path': '/%s' % VOLUME_NAMES[0], 'aggregate': VOLUME_AGGREGATE_NAMES[0], 'space-guarantee-enabled': True, + 'language': 'en_US', + 'percentage-snapshot-reserve': '5', + 'snapshot-policy': 'default', + 'type': 'rw', + 'size': '12345', 'space-guarantee': 'none', 'qos-policy-group': 'fake_qos_policy_group_name', } @@ -756,27 +811,6 @@ VOLUME_DEDUPE_INFO_SSC = { 'dedupe': True, } -SNAPMIRROR_GET_ITER_RESPONSE = etree.XML(""" - - - - %(vserver)s:%(volume2)s - %(volume2)s - %(vserver)s - %(vserver)s:%(volume1)s - %(volume1)s - %(vserver)s - - - 1 - -""" % { - 'volume1': VOLUME_NAMES[0], - 'volume2': VOLUME_NAMES[1], - 'vserver': VOLUME_VSERVER_NAME, -}) - - STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML(""" @@ -887,20 +921,41 @@ STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML(""" """) -AGGR_DISK_TYPE = 'FCAL' +AGGREGATE_DISK_TYPES = ['SATA', 'SSD'] STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" cluster3-01:v5.19 - %s + %(type0)s + + + + cluster3-01:v5.20 + + %(type0)s + + + + cluster3-01:v5.20 + + %(type1)s + + + + cluster3-01:v5.20 + + %(type1)s - 1 + 4 -""" % AGGR_DISK_TYPE) +""" % { + 'type0': AGGREGATE_DISK_TYPES[0], + 'type1': AGGREGATE_DISK_TYPES[1], +}) SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE = etree.XML(""" @@ -1076,3 +1131,139 @@ ISCSI_INITIATOR_GET_AUTH_ELEM = etree.XML(""" ISCSI_INITIATOR_AUTH_LIST_INFO_FAILURE = etree.XML(""" """ % INITIATOR_IQN) + +CLUSTER_NAME = 'fake_cluster' +REMOTE_CLUSTER_NAME = 'fake_cluster_2' +CLUSTER_ADDRESS_1 = 'fake_cluster_address' +CLUSTER_ADDRESS_2 = 'fake_cluster_address_2' +VSERVER_NAME = 'fake_vserver' +VSERVER_NAME_2 = 'fake_vserver_2' +SM_SOURCE_VSERVER = 'fake_source_vserver' +SM_SOURCE_VOLUME = 'fake_source_volume' +SM_DEST_VSERVER = 'fake_destination_vserver' +SM_DEST_VOLUME = 'fake_destination_volume' + +CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML(""" + + + + + %(addr1)s + %(addr2)s + + available + %(cluster)s + fake_uuid + + %(addr1)s + + %(remote_cluster)s + fake_serial_number + 60 + + + 1 + +""" % { + 'addr1': CLUSTER_ADDRESS_1, + 'addr2': CLUSTER_ADDRESS_2, + 'cluster': CLUSTER_NAME, + 'remote_cluster': REMOTE_CLUSTER_NAME, +}) + +CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML(""" + + + + false + 8 + + + +""") + +VSERVER_PEER_GET_ITER_RESPONSE = etree.XML(""" + + + + + snapmirror + + %(cluster)s + peered + %(vserver2)s + %(vserver1)s + + + 2 + +""" % { + 'cluster': CLUSTER_NAME, + 'vserver1': VSERVER_NAME, + 'vserver2': VSERVER_NAME_2 +}) + +SNAPMIRROR_GET_ITER_RESPONSE = etree.XML(""" + + + + %(vserver)s:%(volume2)s + %(volume2)s + fake_destination_node + %(vserver)s + fake_snapshot + 1442701782 + false + true + 2187 + 109 + 1442701890 + test:manila + 1171456 + initialize + 0 + snapmirrored + fake_snapshot + 1442701782 + DPDefault + v2 + ea8bfcc6-5f1d-11e5-8446-123478563412 + idle + data_protection + daily + %(vserver)s:%(volume1)s + %(volume1)s + %(vserver)s + fake_destination_vserver + + + 1 + +""" % { + 'volume1': VOLUME_NAMES[0], + 'volume2': VOLUME_NAMES[1], + 'vserver': VOLUME_VSERVER_NAME, +}) + +SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML(""" + + + + fake_destination_vserver + fake_destination_volume + true + snapmirrored + daily + fake_source_vserver + fake_source_volume + + + 1 + +""") + +SNAPMIRROR_INITIALIZE_RESULT = etree.XML(""" + + succeeded + +""") diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py index f47388b9c..0323da072 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py @@ -507,7 +507,9 @@ class NetApp7modeClientTestCase(test.TestCase): self.connection.invoke_successfully.side_effect = [ fake_clone_id_response, fake_clone_list_response] - self.client.clone_file(expected_src_path, expected_dest_path) + self.client.clone_file(expected_src_path, + expected_dest_path, + source_snapshot=fake.CG_SNAPSHOT_ID) __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] @@ -519,6 +521,9 @@ class NetApp7modeClientTestCase(test.TestCase): self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) + self.assertEqual( + fake.CG_SNAPSHOT_ID, + actual_request.get_child_by_name('snapshot-name').get_content()) self.assertEqual(actual_request.get_child_by_name( 'destination-exists'), None) self.assertTrue(enable_tunneling) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py index f6db0a731..4fefb8075 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py @@ -19,6 +19,7 @@ import uuid from lxml import etree import mock import six +import time from cinder import exception from cinder import test @@ -537,6 +538,15 @@ class NetAppBaseClientTestCase(test.TestCase): self.client._commit_cg_snapshot.assert_called_once_with( fake.CONSISTENCY_GROUP_ID) + def test_create_cg_snapshot_no_id(self): + self.mock_object(self.client, '_start_cg_snapshot', mock.Mock( + return_value=None)) + + self.assertRaises(exception.VolumeBackendAPIException, + self.client.create_cg_snapshot, + [fake.CG_VOLUME_NAME], + fake.CG_SNAPSHOT_NAME) + def test_start_cg_snapshot(self): snapshot_init = { 'snapshot': fake.CG_SNAPSHOT_NAME, @@ -559,3 +569,25 @@ class NetAppBaseClientTestCase(test.TestCase): self.client.send_request.assert_called_once_with( 'cg-commit', {'cg-id': snapshot_commit['cg-id']}) + + def test_wait_for_busy_snapshot_raise_exception(self): + BUSY_SNAPSHOT = dict(fake.SNAPSHOT) + BUSY_SNAPSHOT['busy'] = True + + # Need to mock sleep as it is called by @utils.retry + self.mock_object(time, 'sleep') + mock_get_snapshot = self.mock_object( + self.client, 'get_snapshot', + mock.Mock(return_value=BUSY_SNAPSHOT) + ) + + self.assertRaises(exception.SnapshotIsBusy, + self.client.wait_for_busy_snapshot, + fake.FLEXVOL, fake.SNAPSHOT_NAME) + + calls = [ + mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), + mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), + mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), + ] + mock_get_snapshot.assert_has_calls(calls) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py index 206be3d5a..e9fe65d10 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py @@ -173,7 +173,7 @@ class NetAppCmodeClientTestCase(test.TestCase): max_page_length=10) num_records = result.get_child_content('num-records') - self.assertEqual('1', num_records) + self.assertEqual('4', num_records) args = copy.deepcopy(storage_disk_get_iter_args) args['max-records'] = 10 @@ -576,6 +576,31 @@ class NetAppCmodeClientTestCase(test.TestCase): self.assertEqual(1, self.connection.invoke_successfully.call_count) + @ddt.data({'supports_is_backup': True, 'is_snapshot': True}, + {'supports_is_backup': True, 'is_snapshot': False}, + {'supports_is_backup': False, 'is_snapshot': True}, + {'supports_is_backup': False, 'is_snapshot': False}) + @ddt.unpack + def test_clone_lun_is_snapshot(self, supports_is_backup, is_snapshot): + + self.client.features.add_feature('BACKUP_CLONE_PARAM', + supported=supports_is_backup) + + self.client.clone_lun( + 'volume', 'fakeLUN', 'newFakeLUN', is_snapshot=is_snapshot) + + clone_create_args = { + 'volume': 'volume', + 'source-path': 'fakeLUN', + 'destination-path': 'newFakeLUN', + 'space-reserve': 'true', + } + if is_snapshot and supports_is_backup: + clone_create_args['is-backup'] = 'true' + self.connection.invoke_successfully.assert_called_once_with( + netapp_api.NaElement.create_node_with_children( + 'clone-create', **clone_create_args), True) + def test_clone_lun_multiple_zapi_calls(self): """Test for when lun clone requires more than one zapi call.""" @@ -974,7 +999,8 @@ class NetAppCmodeClientTestCase(test.TestCase): self.connection.get_api_version.return_value = (1, 20) self.client.clone_file(expected_flex_vol, expected_src_path, - expected_dest_path, self.vserver) + expected_dest_path, self.vserver, + source_snapshot=fake.CG_SNAPSHOT_ID) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] @@ -988,6 +1014,9 @@ class NetAppCmodeClientTestCase(test.TestCase): self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) + req_snapshot_child = actual_request.get_child_by_name('snapshot-name') + self.assertEqual(fake.CG_SNAPSHOT_ID, req_snapshot_child.get_content()) + self.assertEqual(actual_request.get_child_by_name( 'destination-exists'), None) @@ -1043,6 +1072,32 @@ class NetAppCmodeClientTestCase(test.TestCase): self.assertIsNone(actual_request.get_child_by_name( 'destination-exists')) + @ddt.data({'supports_is_backup': True, 'is_snapshot': True}, + {'supports_is_backup': True, 'is_snapshot': False}, + {'supports_is_backup': False, 'is_snapshot': True}, + {'supports_is_backup': False, 'is_snapshot': False}) + @ddt.unpack + def test_clone_file_is_snapshot(self, supports_is_backup, is_snapshot): + + self.connection.get_api_version.return_value = (1, 20) + self.client.features.add_feature('BACKUP_CLONE_PARAM', + supported=supports_is_backup) + + self.client.clone_file( + 'volume', 'fake_source', 'fake_destination', 'fake_vserver', + is_snapshot=is_snapshot) + + clone_create_args = { + 'volume': 'volume', + 'source-path': 'fake_source', + 'destination-path': 'fake_destination', + } + if is_snapshot and supports_is_backup: + clone_create_args['is-backup'] = 'true' + self.connection.invoke_successfully.assert_called_once_with( + netapp_api.NaElement.create_node_with_children( + 'clone-create', **clone_create_args), True) + def test_get_file_usage(self): expected_bytes = "2048" fake_vserver = 'fake_vserver' @@ -1334,6 +1389,7 @@ class NetAppCmodeClientTestCase(test.TestCase): 'name': None, 'owning-vserver-name': None, 'junction-path': None, + 'type': None, 'containing-aggregate-name': None, }, 'volume-mirror-attributes': { @@ -1343,9 +1399,17 @@ class NetAppCmodeClientTestCase(test.TestCase): 'volume-space-attributes': { 'is-space-guarantee-enabled': None, 'space-guarantee': None, + 'percentage-snapshot-reserve': None, + 'size': None, }, 'volume-qos-attributes': { 'policy-group-name': None, + }, + 'volume-snapshot-attributes': { + 'snapshot-policy': None, + }, + 'volume-language-attributes': { + 'language-code': None, } }, }, @@ -1366,6 +1430,193 @@ class NetAppCmodeClientTestCase(test.TestCase): self.client.get_flexvol, flexvol_name=fake_client.VOLUME_NAMES[0]) + def test_create_flexvol(self): + self.mock_object(self.client, 'send_request') + + self.client.create_flexvol( + fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100) + + volume_create_args = { + 'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME, + 'size': '100g', + 'volume': fake_client.VOLUME_NAME, + 'volume-type': 'rw', + 'junction-path': '/%s' % fake_client.VOLUME_NAME, + } + + self.client.send_request.assert_called_once_with('volume-create', + volume_create_args) + + @ddt.data('dp', 'rw', None) + def test_create_volume_with_extra_specs(self, volume_type): + + self.mock_object(self.client, 'enable_flexvol_dedupe') + self.mock_object(self.client, 'enable_flexvol_compression') + self.mock_object(self.client, 'send_request') + + self.client.create_flexvol( + fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100, + space_guarantee_type='volume', language='en-US', + snapshot_policy='default', dedupe_enabled=True, + compression_enabled=True, snapshot_reserve=15, + volume_type=volume_type) + + volume_create_args = { + 'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME, + 'size': '100g', + 'volume': fake_client.VOLUME_NAME, + 'space-reserve': 'volume', + 'language-code': 'en-US', + 'volume-type': volume_type, + 'percentage-snapshot-reserve': '15', + } + + if volume_type != 'dp': + volume_create_args['snapshot-policy'] = 'default' + volume_create_args['junction-path'] = ('/%s' % + fake_client.VOLUME_NAME) + + self.client.send_request.assert_called_with('volume-create', + volume_create_args) + self.client.enable_flexvol_dedupe.assert_called_once_with( + fake_client.VOLUME_NAME) + self.client.enable_flexvol_compression.assert_called_once_with( + fake_client.VOLUME_NAME) + + def test_flexvol_exists(self): + + api_response = netapp_api.NaElement( + fake_client.VOLUME_GET_NAME_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client.flexvol_exists(fake_client.VOLUME_NAME) + + volume_get_iter_args = { + 'query': { + 'volume-attributes': { + 'volume-id-attributes': { + 'name': fake_client.VOLUME_NAME + } + } + }, + 'desired-attributes': { + 'volume-attributes': { + 'volume-id-attributes': { + 'name': None + } + } + } + } + + self.client.send_iter_request.assert_has_calls([ + mock.call('volume-get-iter', volume_get_iter_args)]) + self.assertTrue(result) + + def test_flexvol_exists_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + + self.assertFalse(self.client.flexvol_exists(fake_client.VOLUME_NAME)) + + def test_rename_flexvol(self): + + self.mock_object(self.client, 'send_request') + + self.client.rename_flexvol(fake_client.VOLUME_NAME, 'new_name') + + volume_rename_api_args = { + 'volume': fake_client.VOLUME_NAME, + 'new-volume-name': 'new_name', + } + + self.client.send_request.assert_called_once_with( + 'volume-rename', volume_rename_api_args) + + def test_mount_flexvol_default_junction_path(self): + + self.mock_object(self.client, 'send_request') + + self.client.mount_flexvol(fake_client.VOLUME_NAME) + + volume_mount_args = { + 'volume-name': fake_client.VOLUME_NAME, + 'junction-path': '/%s' % fake_client.VOLUME_NAME, + } + + self.client.send_request.assert_has_calls([ + mock.call('volume-mount', volume_mount_args)]) + + def test_mount_flexvol(self): + + self.mock_object(self.client, 'send_request') + fake_path = '/fake_path' + + self.client.mount_flexvol(fake_client.VOLUME_NAME, + junction_path=fake_path) + + volume_mount_args = { + 'volume-name': fake_client.VOLUME_NAME, + 'junction-path': fake_path, + } + + self.client.send_request.assert_has_calls([ + mock.call('volume-mount', volume_mount_args)]) + + def test_enable_flexvol_dedupe(self): + + self.mock_object(self.client, 'send_request') + + self.client.enable_flexvol_dedupe(fake_client.VOLUME_NAME) + + sis_enable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME} + + self.client.send_request.assert_called_once_with('sis-enable', + sis_enable_args) + + def test_disable_flexvol_dedupe(self): + + self.mock_object(self.client, 'send_request') + + self.client.disable_flexvol_dedupe(fake_client.VOLUME_NAME) + + sis_disable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME} + + self.client.send_request.assert_called_once_with('sis-disable', + sis_disable_args) + + def test_enable_flexvol_compression(self): + + self.mock_object(self.client, 'send_request') + + self.client.enable_flexvol_compression(fake_client.VOLUME_NAME) + + sis_set_config_args = { + 'path': '/vol/%s' % fake_client.VOLUME_NAME, + 'enable-compression': 'true' + } + + self.client.send_request.assert_called_once_with('sis-set-config', + sis_set_config_args) + + def test_disable_flexvol_compression(self): + + self.mock_object(self.client, 'send_request') + + self.client.disable_flexvol_compression(fake_client.VOLUME_NAME) + + sis_set_config_args = { + 'path': '/vol/%s' % fake_client.VOLUME_NAME, + 'enable-compression': 'false' + } + + self.client.send_request.assert_called_once_with('sis-set-config', + sis_set_config_args) + def test_get_flexvol_dedupe_info(self): api_response = netapp_api.NaElement( @@ -1630,6 +1881,7 @@ class NetAppCmodeClientTestCase(test.TestCase): 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, + 'is-hybrid': None, }, }, } @@ -1641,6 +1893,7 @@ class NetAppCmodeClientTestCase(test.TestCase): expected = { 'name': fake_client.VOLUME_AGGREGATE_NAME, 'raid-type': 'raid_dp', + 'is-hybrid': True, } self.assertEqual(expected, result) @@ -1665,19 +1918,64 @@ class NetAppCmodeClientTestCase(test.TestCase): self.assertEqual({}, result) - def test_get_aggregate_disk_type(self): + @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, + {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) + @ddt.unpack + def test_get_aggregate_disk_types(self, types, expected): + + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', + mock.Mock(return_value=types)) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertItemsEqual(expected, result) + mock_get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_aggregate_disk_types_not_found(self): + + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', + mock.Mock(return_value=set())) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsNone(result) + mock_get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_aggregate_disk_types_shared(self): + + self.client.features.add_feature('ADVANCED_DISK_PARTITIONING') + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', + mock.Mock(side_effect=[set(['SSD']), set(['SATA'])])) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsInstance(result, list) + self.assertItemsEqual(['SATA', 'SSD'], result) + mock_get_aggregate_disk_types.assert_has_calls([ + mock.call(fake_client.VOLUME_AGGREGATE_NAME), + mock.call(fake_client.VOLUME_AGGREGATE_NAME, shared=True), + ]) + + def test__get_aggregate_disk_types(self): api_response = netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, - 'send_request', + 'send_iter_request', mock.Mock(return_value=api_response)) - result = self.client.get_aggregate_disk_type( + result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) storage_disk_get_iter_args = { - 'max-records': 1, 'query': { 'storage-disk-info': { 'disk-raid-info': { @@ -1696,34 +1994,184 @@ class NetAppCmodeClientTestCase(test.TestCase): }, }, } - self.client.send_request.assert_called_once_with( + self.client.send_iter_request.assert_called_once_with( 'storage-disk-get-iter', storage_disk_get_iter_args, enable_tunneling=False) - self.assertEqual(fake_client.AGGR_DISK_TYPE, result) - @ddt.data(fake_client.NO_RECORDS_RESPONSE, fake_client.INVALID_RESPONSE) - def test_get_aggregate_disk_type_not_found(self, response): + expected = set(fake_client.AGGREGATE_DISK_TYPES) + self.assertEqual(expected, result) - api_response = netapp_api.NaElement(response) + def test__get_aggregate_disk_types_shared(self): + + api_response = netapp_api.NaElement( + fake_client.STORAGE_DISK_GET_ITER_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client._get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME, shared=True) + + storage_disk_get_iter_args = { + 'query': { + 'storage-disk-info': { + 'disk-raid-info': { + 'disk-shared-info': { + 'aggregate-list': { + 'shared-aggregate-info': { + 'aggregate-name': + fake_client.VOLUME_AGGREGATE_NAME, + }, + }, + }, + }, + }, + }, + 'desired-attributes': { + 'storage-disk-info': { + 'disk-raid-info': { + 'effective-disk-type': None, + }, + }, + }, + } + self.client.send_iter_request.assert_called_once_with( + 'storage-disk-get-iter', storage_disk_get_iter_args, + enable_tunneling=False) + + expected = set(fake_client.AGGREGATE_DISK_TYPES) + self.assertEqual(expected, result) + + def test__get_aggregate_disk_types_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client._get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual(set(), result) + + def test__get_aggregate_disk_types_api_error(self): + + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(side_effect=self._mock_api_error())) + + result = self.client._get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual(set([]), result) + + def test_get_aggregate_capacities(self): + + aggr1_capacities = { + 'percent-used': 50, + 'size-available': 100.0, + 'size-total': 200.0, + } + aggr2_capacities = { + 'percent-used': 75, + 'size-available': 125.0, + 'size-total': 500.0, + } + mock_get_aggregate_capacity = self.mock_object( + self.client, 'get_aggregate_capacity', + mock.Mock(side_effect=[aggr1_capacities, aggr2_capacities])) + + result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) + + expected = { + 'aggr1': aggr1_capacities, + 'aggr2': aggr2_capacities, + } + self.assertEqual(expected, result) + mock_get_aggregate_capacity.assert_has_calls([ + mock.call('aggr1'), + mock.call('aggr2'), + ]) + + def test_get_aggregate_capacities_not_found(self): + + mock_get_aggregate_capacity = self.mock_object( + self.client, 'get_aggregate_capacity', + mock.Mock(side_effect=[{}, {}])) + + result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) + + expected = { + 'aggr1': {}, + 'aggr2': {}, + } + self.assertEqual(expected, result) + mock_get_aggregate_capacity.assert_has_calls([ + mock.call('aggr1'), + mock.call('aggr2'), + ]) + + def test_get_aggregate_capacities_not_list(self): + + result = self.client.get_aggregate_capacities('aggr1') + + self.assertEqual({}, result) + + def test_get_aggregate_capacity(self): + + api_response = netapp_api.NaElement( + fake_client.AGGR_GET_ITER_CAPACITY_RESPONSE).get_child_by_name( + 'attributes-list').get_children() + self.mock_object(self.client, + '_get_aggregates', + mock.Mock(return_value=api_response)) + + result = self.client.get_aggregate_capacity( + fake_client.VOLUME_AGGREGATE_NAME) + + desired_attributes = { + 'aggr-attributes': { + 'aggr-space-attributes': { + 'percent-used-capacity': None, + 'size-available': None, + 'size-total': None, + }, + }, + } + self.client._get_aggregates.assert_has_calls([ + mock.call( + aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], + desired_attributes=desired_attributes)]) + + expected = { + 'percent-used': float(fake_client.AGGR_USED_PERCENT), + 'size-available': float(fake_client.AGGR_SIZE_AVAILABLE), + 'size-total': float(fake_client.AGGR_SIZE_TOTAL), + } + self.assertEqual(expected, result) + + def test_get_aggregate_capacity_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) - result = self.client.get_aggregate_disk_type( + result = self.client.get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) - self.assertEqual('unknown', result) + self.assertEqual({}, result) - def test_get_aggregate_disk_type_api_error(self): + def test_get_aggregate_capacity_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) - result = self.client.get_aggregate_disk_type( + result = self.client.get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) - self.assertEqual('unknown', result) + self.assertEqual({}, result) def test_get_performance_instance_uuids(self): @@ -1913,3 +2361,804 @@ class NetAppCmodeClientTestCase(test.TestCase): self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot, expected_vol_name, expected_snapshot_name) + + def test_create_cluster_peer(self): + + self.mock_object(self.client, 'send_request') + + self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'], + 'fake_user', 'fake_password', + 'fake_passphrase') + + cluster_peer_create_args = { + 'peer-addresses': [ + {'remote-inet-address': 'fake_address_1'}, + {'remote-inet-address': 'fake_address_2'}, + ], + 'user-name': 'fake_user', + 'password': 'fake_password', + 'passphrase': 'fake_passphrase', + } + self.client.send_request.assert_has_calls([ + mock.call('cluster-peer-create', cluster_peer_create_args)]) + + def test_get_cluster_peers(self): + + api_response = netapp_api.NaElement( + fake_client.CLUSTER_PEER_GET_ITER_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client.get_cluster_peers() + + cluster_peer_get_iter_args = {} + self.client.send_iter_request.assert_has_calls([ + mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) + + expected = [{ + 'active-addresses': [ + fake_client.CLUSTER_ADDRESS_1, + fake_client.CLUSTER_ADDRESS_2 + ], + 'availability': 'available', + 'cluster-name': fake_client.CLUSTER_NAME, + 'cluster-uuid': 'fake_uuid', + 'peer-addresses': [fake_client.CLUSTER_ADDRESS_1], + 'remote-cluster-name': fake_client.REMOTE_CLUSTER_NAME, + 'serial-number': 'fake_serial_number', + 'timeout': '60', + }] + + self.assertEqual(expected, result) + + def test_get_cluster_peers_single(self): + + api_response = netapp_api.NaElement( + fake_client.CLUSTER_PEER_GET_ITER_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + self.client.get_cluster_peers( + remote_cluster_name=fake_client.CLUSTER_NAME) + + cluster_peer_get_iter_args = { + 'query': { + 'cluster-peer-info': { + 'remote-cluster-name': fake_client.CLUSTER_NAME, + } + }, + } + self.client.send_iter_request.assert_has_calls([ + mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) + + def test_get_cluster_peers_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client.get_cluster_peers( + remote_cluster_name=fake_client.CLUSTER_NAME) + + self.assertEqual([], result) + self.assertTrue(self.client.send_iter_request.called) + + def test_delete_cluster_peer(self): + + self.mock_object(self.client, 'send_request') + + self.client.delete_cluster_peer(fake_client.CLUSTER_NAME) + + cluster_peer_delete_args = {'cluster-name': fake_client.CLUSTER_NAME} + self.client.send_request.assert_has_calls([ + mock.call('cluster-peer-delete', cluster_peer_delete_args)]) + + def test_get_cluster_peer_policy(self): + + self.client.features.add_feature('CLUSTER_PEER_POLICY') + + api_response = netapp_api.NaElement( + fake_client.CLUSTER_PEER_POLICY_GET_RESPONSE) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + + result = self.client.get_cluster_peer_policy() + + expected = { + 'is-unauthenticated-access-permitted': False, + 'passphrase-minimum-length': 8, + } + self.assertEqual(expected, result) + self.assertTrue(self.client.send_request.called) + + def test_get_cluster_peer_policy_not_supported(self): + + result = self.client.get_cluster_peer_policy() + + self.assertEqual({}, result) + + def test_set_cluster_peer_policy_not_supported(self): + + self.mock_object(self.client, 'send_request') + + self.client.set_cluster_peer_policy() + + self.assertFalse(self.client.send_request.called) + + def test_set_cluster_peer_policy_no_arguments(self): + + self.client.features.add_feature('CLUSTER_PEER_POLICY') + self.mock_object(self.client, 'send_request') + + self.client.set_cluster_peer_policy() + + self.assertFalse(self.client.send_request.called) + + def test_set_cluster_peer_policy(self): + + self.client.features.add_feature('CLUSTER_PEER_POLICY') + self.mock_object(self.client, 'send_request') + + self.client.set_cluster_peer_policy( + is_unauthenticated_access_permitted=True, + passphrase_minimum_length=12) + + cluster_peer_policy_modify_args = { + 'is-unauthenticated-access-permitted': 'true', + 'passphrase-minlength': '12', + } + self.client.send_request.assert_has_calls([ + mock.call('cluster-peer-policy-modify', + cluster_peer_policy_modify_args)]) + + def test_create_vserver_peer(self): + + self.mock_object(self.client, 'send_request') + + self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer') + + vserver_peer_create_args = { + 'vserver': 'fake_vserver', + 'peer-vserver': 'fake_vserver_peer', + 'applications': [ + {'vserver-peer-application': 'snapmirror'}, + ], + } + self.client.send_request.assert_has_calls([ + mock.call('vserver-peer-create', vserver_peer_create_args)]) + + def test_delete_vserver_peer(self): + + self.mock_object(self.client, 'send_request') + + self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer') + + vserver_peer_delete_args = { + 'vserver': 'fake_vserver', + 'peer-vserver': 'fake_vserver_peer', + } + self.client.send_request.assert_has_calls([ + mock.call('vserver-peer-delete', vserver_peer_delete_args)]) + + def test_accept_vserver_peer(self): + + self.mock_object(self.client, 'send_request') + + self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer') + + vserver_peer_accept_args = { + 'vserver': 'fake_vserver', + 'peer-vserver': 'fake_vserver_peer', + } + self.client.send_request.assert_has_calls([ + mock.call('vserver-peer-accept', vserver_peer_accept_args)]) + + def test_get_vserver_peers(self): + + api_response = netapp_api.NaElement( + fake_client.VSERVER_PEER_GET_ITER_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client.get_vserver_peers( + vserver_name=fake_client.VSERVER_NAME, + peer_vserver_name=fake_client.VSERVER_NAME_2) + + vserver_peer_get_iter_args = { + 'query': { + 'vserver-peer-info': { + 'vserver': fake_client.VSERVER_NAME, + 'peer-vserver': fake_client.VSERVER_NAME_2, + } + }, + } + self.client.send_iter_request.assert_has_calls([ + mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args)]) + + expected = [{ + 'vserver': 'fake_vserver', + 'peer-vserver': 'fake_vserver_2', + 'peer-state': 'peered', + 'peer-cluster': 'fake_cluster' + }] + self.assertEqual(expected, result) + + def test_get_vserver_peers_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client.get_vserver_peers( + vserver_name=fake_client.VSERVER_NAME, + peer_vserver_name=fake_client.VSERVER_NAME_2) + + self.assertEqual([], result) + self.assertTrue(self.client.send_iter_request.called) + + def test_ensure_snapmirror_v2(self): + + self.assertIsNone(self.client._ensure_snapmirror_v2()) + + def test_ensure_snapmirror_v2_not_supported(self): + + self.client.features.add_feature('SNAPMIRROR_V2', supported=False) + + self.assertRaises(exception.NetAppDriverException, + self.client._ensure_snapmirror_v2) + + @ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'}, + {'schedule': None, 'policy': None}) + @ddt.unpack + def test_create_snapmirror(self, schedule, policy): + self.mock_object(self.client, 'send_request') + + self.client.create_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + schedule=schedule, policy=policy) + + snapmirror_create_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + 'relationship-type': 'data_protection', + } + if schedule: + snapmirror_create_args['schedule'] = schedule + if policy: + snapmirror_create_args['policy'] = policy + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-create', snapmirror_create_args)]) + + def test_create_snapmirror_already_exists(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( + code=netapp_api.ERELATION_EXISTS)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.client.create_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_create_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + 'relationship-type': 'data_protection', + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-create', snapmirror_create_args)]) + + def test_create_snapmirror_error(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( + code=0)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.assertRaises(netapp_api.NaApiError, + self.client.create_snapmirror, + fake_client.SM_SOURCE_VSERVER, + fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, + fake_client.SM_DEST_VOLUME) + self.assertTrue(self.client.send_request.called) + + @ddt.data( + { + 'source_snapshot': 'fake_snapshot', + 'transfer_priority': 'fake_priority' + }, + { + 'source_snapshot': None, + 'transfer_priority': None + } + ) + @ddt.unpack + def test_initialize_snapmirror(self, source_snapshot, transfer_priority): + + api_response = netapp_api.NaElement( + fake_client.SNAPMIRROR_INITIALIZE_RESULT) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + + result = self.client.initialize_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + source_snapshot=source_snapshot, + transfer_priority=transfer_priority) + + snapmirror_initialize_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + if source_snapshot: + snapmirror_initialize_args['source-snapshot'] = source_snapshot + if transfer_priority: + snapmirror_initialize_args['transfer-priority'] = transfer_priority + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-initialize', snapmirror_initialize_args)]) + + expected = { + 'operation-id': None, + 'status': 'succeeded', + 'jobid': None, + 'error-code': None, + 'error-message': None + } + self.assertEqual(expected, result) + + @ddt.data(True, False) + def test_release_snapmirror(self, relationship_info_only): + + self.mock_object(self.client, 'send_request') + + self.client.release_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + relationship_info_only=relationship_info_only) + + snapmirror_release_args = { + 'query': { + 'snapmirror-destination-info': { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + 'relationship-info-only': ('true' if relationship_info_only + else 'false'), + } + } + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-release-iter', snapmirror_release_args)]) + + def test_quiesce_snapmirror(self): + + self.mock_object(self.client, 'send_request') + + self.client.quiesce_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_quiesce_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-quiesce', snapmirror_quiesce_args)]) + + @ddt.data(True, False) + def test_abort_snapmirror(self, clear_checkpoint): + + self.mock_object(self.client, 'send_request') + + self.client.abort_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + clear_checkpoint=clear_checkpoint) + + snapmirror_abort_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + 'clear-checkpoint': 'true' if clear_checkpoint else 'false', + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-abort', snapmirror_abort_args)]) + + def test_abort_snapmirror_no_transfer_in_progress(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( + code=netapp_api.ENOTRANSFER_IN_PROGRESS)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.client.abort_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_abort_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + 'clear-checkpoint': 'false', + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-abort', snapmirror_abort_args)]) + + def test_abort_snapmirror_error(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.assertRaises(netapp_api.NaApiError, + self.client.abort_snapmirror, + fake_client.SM_SOURCE_VSERVER, + fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, + fake_client.SM_DEST_VOLUME) + + def test_break_snapmirror(self): + + self.mock_object(self.client, 'send_request') + + self.client.break_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_break_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-break', snapmirror_break_args)]) + + @ddt.data( + { + 'schedule': 'fake_schedule', + 'policy': 'fake_policy', + 'tries': 5, + 'max_transfer_rate': 1024, + }, + { + 'schedule': None, + 'policy': None, + 'tries': None, + 'max_transfer_rate': None, + } + ) + @ddt.unpack + def test_modify_snapmirror(self, schedule, policy, tries, + max_transfer_rate): + + self.mock_object(self.client, 'send_request') + + self.client.modify_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + schedule=schedule, policy=policy, tries=tries, + max_transfer_rate=max_transfer_rate) + + snapmirror_modify_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + if schedule: + snapmirror_modify_args['schedule'] = schedule + if policy: + snapmirror_modify_args['policy'] = policy + if tries: + snapmirror_modify_args['tries'] = tries + if max_transfer_rate: + snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-modify', snapmirror_modify_args)]) + + def test_delete_snapmirror(self): + + self.mock_object(self.client, 'send_request') + + self.client.delete_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_delete_args = { + 'query': { + 'snapmirror-info': { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + } + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-destroy-iter', snapmirror_delete_args)]) + + def test_update_snapmirror(self): + + self.mock_object(self.client, 'send_request') + + self.client.update_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_update_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-update', snapmirror_update_args)]) + + def test_update_snapmirror_already_transferring(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( + code=netapp_api.ETRANSFER_IN_PROGRESS)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.client.update_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_update_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-update', snapmirror_update_args)]) + + def test_update_snapmirror_already_transferring_two(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( + code=netapp_api.EANOTHER_OP_ACTIVE)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.client.update_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_update_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-update', snapmirror_update_args)]) + + def test_update_snapmirror_error(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.assertRaises(netapp_api.NaApiError, + self.client.update_snapmirror, + fake_client.SM_SOURCE_VSERVER, + fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, + fake_client.SM_DEST_VOLUME) + + def test_resume_snapmirror(self): + self.mock_object(self.client, 'send_request') + + self.client.resume_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_resume_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-resume', snapmirror_resume_args)]) + + def test_resume_snapmirror_not_quiesed(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( + code=netapp_api.ERELATION_NOT_QUIESCED)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.client.resume_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_resume_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-resume', snapmirror_resume_args)]) + + def test_resume_snapmirror_error(self): + mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) + self.mock_object(self.client, 'send_request', mock_send_req) + + self.assertRaises(netapp_api.NaApiError, + self.client.resume_snapmirror, + fake_client.SM_SOURCE_VSERVER, + fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, + fake_client.SM_DEST_VOLUME) + + def test_resync_snapmirror(self): + self.mock_object(self.client, 'send_request') + + self.client.resync_snapmirror( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) + + snapmirror_resync_args = { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + } + self.client.send_request.assert_has_calls([ + mock.call('snapmirror-resync', snapmirror_resync_args)]) + + def test__get_snapmirrors(self): + + api_response = netapp_api.NaElement( + fake_client.SNAPMIRROR_GET_ITER_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + desired_attributes = { + 'snapmirror-info': { + 'source-vserver': None, + 'source-volume': None, + 'destination-vserver': None, + 'destination-volume': None, + 'is-healthy': None, + } + } + + result = self.client._get_snapmirrors( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + desired_attributes=desired_attributes) + + snapmirror_get_iter_args = { + 'query': { + 'snapmirror-info': { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + }, + }, + 'desired-attributes': { + 'snapmirror-info': { + 'source-vserver': None, + 'source-volume': None, + 'destination-vserver': None, + 'destination-volume': None, + 'is-healthy': None, + }, + }, + } + self.client.send_iter_request.assert_has_calls([ + mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) + self.assertEqual(1, len(result)) + + def test__get_snapmirrors_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client._get_snapmirrors() + + self.client.send_iter_request.assert_has_calls([ + mock.call('snapmirror-get-iter', {})]) + + self.assertEqual([], result) + + def test_get_snapmirrors(self): + + api_response = netapp_api.NaElement( + fake_client.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + desired_attributes = ['source-vserver', 'source-volume', + 'destination-vserver', 'destination-volume', + 'is-healthy', 'mirror-state', 'schedule'] + + result = self.client.get_snapmirrors( + fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, + fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, + desired_attributes=desired_attributes) + + snapmirror_get_iter_args = { + 'query': { + 'snapmirror-info': { + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + }, + }, + 'desired-attributes': { + 'snapmirror-info': { + 'source-vserver': None, + 'source-volume': None, + 'destination-vserver': None, + 'destination-volume': None, + 'is-healthy': None, + 'mirror-state': None, + 'schedule': None, + }, + }, + } + + expected = [{ + 'source-vserver': fake_client.SM_SOURCE_VSERVER, + 'source-volume': fake_client.SM_SOURCE_VOLUME, + 'destination-vserver': fake_client.SM_DEST_VSERVER, + 'destination-volume': fake_client.SM_DEST_VOLUME, + 'is-healthy': 'true', + 'mirror-state': 'snapmirrored', + 'schedule': 'daily', + }] + + self.client.send_iter_request.assert_has_calls([ + mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) + self.assertEqual(expected, result) + + def test_get_provisioning_options_from_flexvol(self): + + self.mock_object(self.client, 'get_flexvol', + mock.Mock(return_value=fake_client.VOLUME_INFO_SSC)) + self.mock_object(self.client, 'get_flexvol_dedupe_info', mock.Mock( + return_value=fake_client.VOLUME_DEDUPE_INFO_SSC)) + + expected_prov_opts = { + 'aggregate': 'fake_aggr1', + 'compression_enabled': False, + 'dedupe_enabled': True, + 'language': 'en_US', + 'size': 1, + 'snapshot_policy': 'default', + 'snapshot_reserve': '5', + 'space_guarantee_type': 'none', + 'volume_type': 'rw' + } + + actual_prov_opts = self.client.get_provisioning_options_from_flexvol( + fake_client.VOLUME_NAME) + + self.assertEqual(expected_prov_opts, actual_prov_opts) + + def test_wait_for_busy_snapshot(self): + mock_get_snapshot = self.mock_object( + self.client, 'get_snapshot', + mock.Mock(return_value=fake.SNAPSHOT) + ) + + self.client.wait_for_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME) + + mock_get_snapshot.assert_called_once_with(fake.FLEXVOL, + fake.SNAPSHOT_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py index 9b3e8bca7..c66a5e21b 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py @@ -34,10 +34,17 @@ EXPORT_PATH = '/fake/export/path' NFS_SHARE = '%s:%s' % (SHARE_IP, EXPORT_PATH) HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME) NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE) +AGGREGATE = 'aggr1' FLEXVOL = 'openstack-flexvol' NFS_FILE_PATH = 'nfsvol' PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME) IMAGE_FILE_ID = 'img-cache-imgid' +PROVIDER_LOCATION = 'fake_provider_location' +NFS_HOST = 'nfs-host1' +NFS_SHARE_PATH = '/export' +NFS_EXPORT_1 = '%s:%s' % (NFS_HOST, NFS_SHARE_PATH) +NFS_EXPORT_2 = 'nfs-host2:/export' +MOUNT_POINT = '/mnt/nfs' LUN_METADATA = { 'OsType': None, 'SpaceReserved': 'true', @@ -56,10 +63,74 @@ NFS_VOLUME = { 'size': SIZE, 'id': VOLUME_ID, 'host': NFS_HOST_STRING, + 'provider_location': PROVIDER_LOCATION, } +FAKE_MANAGE_VOLUME = { + 'name': 'volume-new-managed-123', + 'id': 'volume-new-managed-123', +} + +FAKE_IMAGE_LOCATION = ( + None, + [ + # valid metadata + { + 'metadata': { + 'share_location': 'nfs://host/path', + 'mountpoint': '/opt/stack/data/glance', + 'id': 'abc-123', + 'type': 'nfs' + }, + 'url': 'file:///opt/stack/data/glance/image-id-0' + }, + # missing metadata + { + 'metadata': {}, + 'url': 'file:///opt/stack/data/glance/image-id-1' + }, + # missing location_type + { + 'metadata': {'location_type': None}, + 'url': 'file:///opt/stack/data/glance/image-id-2' + }, + # non-nfs location_type + { + 'metadata': {'location_type': 'not-NFS'}, + 'url': 'file:///opt/stack/data/glance/image-id-3' + }, + # missing share_location + { + 'metadata': {'location_type': 'nfs', 'share_location': None}, + 'url': 'file:///opt/stack/data/glance/image-id-4'}, + # missing mountpoint + { + 'metadata': { + 'location_type': 'nfs', + 'share_location': 'nfs://host/path', + # Pre-kilo we documented "mount_point" + 'mount_point': '/opt/stack/data/glance' + }, + 'url': 'file:///opt/stack/data/glance/image-id-5' + }, + # Valid metadata + { + 'metadata': + { + 'share_location': 'nfs://host/path', + 'mountpoint': '/opt/stack/data/glance', + 'id': 'abc-123', + 'type': 'nfs', + }, + 'url': 'file:///opt/stack/data/glance/image-id-6' + } + ] +) + NETAPP_VOLUME = 'fake_netapp_volume' +VFILER = 'fake_netapp_vfiler' + UUID1 = '12345678-1234-5678-1234-567812345678' LUN_PATH = '/vol/vol0/%s' % LUN_NAME @@ -211,13 +282,18 @@ CLONE_DESTINATION = { 'id': CLONE_DESTINATION_ID, } +VOLUME_NAME = 'volume-fake_volume_id' +MOUNT_PATH = '168.10.16.11:/' + VOLUME_ID SNAPSHOT_NAME = 'fake_snapshot_name' SNAPSHOT_LUN_HANDLE = 'fake_snapshot_lun_handle' +SNAPSHOT_MOUNT = '/fake/mount/path' SNAPSHOT = { 'name': SNAPSHOT_NAME, + 'volume_name': 'volume-fake_volume_id', 'volume_size': SIZE, - 'volume_id': 'fake_volume_id', + 'volume_id': VOLUME_ID, + 'volume_name': VOLUME_NAME, 'busy': False, } @@ -300,6 +376,7 @@ FAKE_7MODE_POOLS = [ 'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0, 'max_over_subscription_ratio': 20.0, + 'multiattach': True, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'provisioned_capacity_gb': 0.0, @@ -311,6 +388,7 @@ FAKE_7MODE_POOLS = [ CG_VOLUME_NAME = 'fake_cg_volume' CG_GROUP_NAME = 'fake_consistency_group' +CG_POOL_NAME = 'cdot' SOURCE_CG_VOLUME_NAME = 'fake_source_cg_volume' CG_VOLUME_ID = 'fake_cg_volume_id' CG_VOLUME_SIZE = 100 @@ -343,7 +421,7 @@ CG_VOLUME = { 'name': CG_VOLUME_NAME, 'size': 100, 'id': CG_VOLUME_ID, - 'host': 'hostname@backend#cdot', + 'host': 'hostname@backend#' + CG_POOL_NAME, 'consistencygroup_id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', } @@ -359,6 +437,8 @@ CONSISTENCY_GROUP = { 'name': CG_GROUP_NAME, } +CG_CONTEXT = {} + CG_SNAPSHOT = { 'id': CG_SNAPSHOT_ID, 'name': CG_SNAPSHOT_NAME, @@ -406,9 +486,30 @@ class test_snapshot(object): def __getitem__(self, key): return getattr(self, key) -PROVIDER_LOCATION = 'fake_provider_location' test_snapshot = test_snapshot() test_snapshot.id = 'fake_snap_id' test_snapshot.name = 'snapshot-%s' % test_snapshot.id test_snapshot.volume_id = 'fake_volume_id' test_snapshot.provider_location = PROVIDER_LOCATION + + +def get_fake_net_interface_get_iter_response(): + return etree.XML(""" + 1 + + +
FAKE_IP
+
+
""") + + +def get_fake_ifs(): + list_of_ifs = [ + etree.XML(""" +
FAKE_IP
"""), + etree.XML(""" +
FAKE_IP2
"""), + etree.XML(""" +
FAKE_IP3
"""), + ] + return [netapp_api.NaElement(el) for el in list_of_ifs] diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py index 7a80579a8..caf2bee9e 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py @@ -45,13 +45,13 @@ class PerformanceCmodeLibraryTestCase(test.TestCase): self.fake_volumes = { 'pool1': { - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', }, 'pool2': { - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', }, 'pool3': { - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', }, } @@ -331,6 +331,16 @@ class PerformanceCmodeLibraryTestCase(test.TestCase): self.assertAlmostEqual(expected, result) + def test__update_for_failover(self): + self.mock_object(self.perf_library, 'update_performance_cache') + mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') + + self.perf_library._update_for_failover(mock_client, self.fake_volumes) + + self.assertEqual(mock_client, self.perf_library.zapi_client) + self.perf_library.update_performance_cache.assert_called_once_with( + self.fake_volumes) + def test_get_aggregates_for_pools(self): result = self.perf_library._get_aggregates_for_pools(self.fake_volumes) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py index 68b87c988..d8c9eee93 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py @@ -24,6 +24,8 @@ import ddt from lxml import etree import mock +from oslo_utils import timeutils + from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.dataontap.client.fakes \ @@ -45,7 +47,10 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorage7modeLibraryTestCase, self).setUp() - kwargs = {'configuration': self.get_config_7mode()} + kwargs = { + 'configuration': self.get_config_7mode(), + 'host': 'openstack@7modeblock', + } self.library = block_7mode.NetAppBlockStorage7modeLibrary( 'driver', 'protocol', **kwargs) @@ -127,11 +132,25 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) - def test_check_for_setup_error_too_old(self): - self.zapi_client.get_ontapi_version.return_value = (1, 8) + @ddt.data(None, (1, 8)) + def test_check_for_setup_error_unsupported_or_no_version(self, version): + self.zapi_client.get_ontapi_version.return_value = version self.assertRaises(exception.VolumeBackendAPIException, self.library.check_for_setup_error) + @ddt.data(None, fake.VFILER) + def test__get_owner(self, vfiler): + self.library.configuration.netapp_server_hostname = 'openstack' + self.library.vfiler = vfiler + expected_owner = 'openstack' + + retval = self.library._get_owner() + + if vfiler: + expected_owner += ':' + vfiler + + self.assertEqual(expected_owner, retval) + def test_find_mapped_lun_igroup(self): response = netapp_api.NaElement(etree.XML(""" @@ -291,7 +310,8 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): self.assertEqual(0, self.library.partner_zapi_client. has_luns_mapped_to_initiators.call_count) - def test_clone_lun_zero_block_count(self): + @ddt.data(True, False) + def test_clone_lun_zero_block_count(self, is_snapshot): """Test for when clone lun is not passed a block count.""" self.library._get_lun_attr = mock.Mock(return_value={ 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) @@ -299,7 +319,8 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] self.library._add_lun_to_table = mock.Mock() - self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false') + self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', + is_snapshot=is_snapshot) self.library.zapi_client.clone_lun.assert_called_once_with( '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', @@ -539,6 +560,7 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): 'total_capacity_gb': 1342.21, 'reserved_percentage': 5, 'max_over_subscription_ratio': 10.0, + 'multiattach': True, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', @@ -633,6 +655,78 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): self.assertListEqual([], pools) + @ddt.data((None, False, False), + (30, True, False), + (30, False, True)) + @ddt.unpack + def test__refresh_volume_info_already_running(self, + vol_refresh_time, + vol_refresh_voluntary, + is_newer): + mock_warning_log = self.mock_object(block_7mode.LOG, 'warning') + self.library.vol_refresh_time = vol_refresh_time + self.library.vol_refresh_voluntary = vol_refresh_voluntary + self.library.vol_refresh_interval = 30 + self.mock_object(timeutils, 'is_newer_than', mock.Mock( + return_value=is_newer)) + self.mock_object(na_utils, 'set_safe_attr', mock.Mock( + return_value=False)) + + retval = self.library._refresh_volume_info() + + self.assertIsNone(retval) + # Assert no values are unset by the method + self.assertEqual(vol_refresh_voluntary, + self.library.vol_refresh_voluntary) + self.assertEqual(vol_refresh_time, self.library.vol_refresh_time) + if timeutils.is_newer_than.called: + timeutils.is_newer_than.assert_called_once_with( + vol_refresh_time, self.library.vol_refresh_interval) + na_utils.set_safe_attr.assert_has_calls([ + mock.call(self.library, 'vol_refresh_running', True), + mock.call(self.library, 'vol_refresh_running', False)]) + self.assertEqual(1, mock_warning_log.call_count) + + def test__refresh_volume_info(self): + mock_warning_log = self.mock_object(block_7mode.LOG, 'warning') + self.library.vol_refresh_time = None + self.library.vol_refresh_voluntary = True + self.mock_object(timeutils, 'is_newer_than') + self.mock_object(self.library.zapi_client, 'get_filer_volumes') + self.mock_object(self.library, '_get_filtered_pools', mock.Mock( + return_value=['vol1', 'vol2'])) + self.mock_object(na_utils, 'set_safe_attr', mock.Mock( + return_value=True)) + + retval = self.library._refresh_volume_info() + + self.assertIsNone(retval) + self.assertEqual(False, self.library.vol_refresh_voluntary) + self.assertEqual(['vol1', 'vol2'], self.library.volume_list) + self.assertIsNotNone(self.library.vol_refresh_time) + na_utils.set_safe_attr.assert_has_calls([ + mock.call(self.library, 'vol_refresh_running', True), + mock.call(self.library, 'vol_refresh_running', False)]) + self.assertFalse(mock_warning_log.called) + + def test__refresh_volume_info_exception(self): + mock_warning_log = self.mock_object(block_7mode.LOG, 'warning') + self.library.vol_refresh_time = None + self.library.vol_refresh_voluntary = True + self.mock_object(timeutils, 'is_newer_than') + self.mock_object(na_utils, 'set_safe_attr', mock.Mock( + return_value=True)) + self.mock_object( + self.library.zapi_client, 'get_filer_volumes', + mock.Mock(side_effect=exception.NetAppDriverException)) + self.mock_object(self.library, '_get_filtered_pools') + + retval = self.library._refresh_volume_info() + + self.assertIsNone(retval) + self.assertFalse(self.library._get_filtered_pools.called) + self.assertEqual(1, mock_warning_log.call_count) + def test_delete_volume(self): self.library.vol_refresh_voluntary = False mock_super_delete_volume = self.mock_object( diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py index 5be5131bf..5ac3f94bb 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py @@ -28,11 +28,12 @@ import uuid import ddt import mock from oslo_log import versionutils +from oslo_service import loopingcall from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _LW from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes @@ -48,7 +49,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageLibraryTestCase, self).setUp() - kwargs = {'configuration': self.get_config_base()} + kwargs = { + 'configuration': self.get_config_base(), + 'host': 'openstack@netappblock', + } self.library = block_base.NetAppBlockStorageLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() @@ -740,11 +744,11 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): def test_setup_error_invalid_lun_os(self): self.library.configuration.netapp_lun_ostype = 'unknown' self.library.do_setup(mock.Mock()) + self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) - msg = _("Invalid value for NetApp configuration" - " option netapp_lun_ostype.") - block_base.LOG.error.assert_called_once_with(msg) + + block_base.LOG.error.assert_called_once_with(mock.ANY) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) @@ -756,9 +760,7 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) - msg = _("Invalid value for NetApp configuration" - " option netapp_host_type.") - block_base.LOG.error.assert_called_once_with(msg) + block_base.LOG.error.assert_called_once_with(mock.ANY) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_both_config(self): @@ -767,9 +769,13 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() + mock_start_periodic_tasks = self.mock_object( + self.library, '_start_periodic_tasks') self.library.check_for_setup_error() + self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) + mock_start_periodic_tasks.assert_called_once_with() @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_no_os_host(self): @@ -778,9 +784,29 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() + mock_start_periodic_tasks = self.mock_object( + self.library, '_start_periodic_tasks') + self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) + mock_start_periodic_tasks.assert_called_once_with() + + def test_start_periodic_tasks(self): + + mock_handle_housekeeping_tasks = self.mock_object( + self.library, '_handle_housekeeping_tasks') + + housekeeping_periodic_task = mock.Mock() + mock_loopingcall = self.mock_object( + loopingcall, 'FixedIntervalLoopingCall', + mock.Mock(return_value=housekeeping_periodic_task)) + + self.library._start_periodic_tasks() + + mock_loopingcall.assert_called_once_with( + mock_handle_housekeeping_tasks) + self.assertTrue(housekeeping_periodic_task.start.called) def test_delete_volume(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') @@ -892,6 +918,21 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): self.assertRaises(NotImplementedError, self.library._clone_lun, fake.VOLUME_ID, 'new-' + fake.VOLUME_ID) + def test_create_snapshot(self): + + fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, + fake.LUN_SIZE, fake.LUN_METADATA) + mock_clone_lun = self.mock_object(self.library, '_clone_lun') + self.mock_object( + self.library, '_get_lun_from_table', + mock.Mock(return_value=fake_lun)) + + self.library.create_snapshot(fake.SNAPSHOT) + + mock_clone_lun.assert_called_once_with( + fake_lun.name, fake.SNAPSHOT_NAME, space_reserved='false', + is_snapshot=True) + def test_create_volume_from_snapshot(self): mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') @@ -1329,7 +1370,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): mock.Mock(return_value=fake.POOL_NAME)) mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_busy = self.mock_object(self.library, '_handle_busy_snapshot') + mock_busy = self.mock_object( + self.zapi_client, 'wait_for_busy_snapshot') self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot]) @@ -1458,16 +1500,3 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): } mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) - - def test_handle_busy_snapshot(self): - self.mock_object(block_base, 'LOG') - mock_get_snapshot = self.mock_object( - self.zapi_client, 'get_snapshot', - mock.Mock(return_value=fake.SNAPSHOT) - ) - - self.library._handle_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME) - - self.assertEqual(1, block_base.LOG.info.call_count) - mock_get_snapshot.assert_called_once_with(fake.FLEXVOL, - fake.SNAPSHOT_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py index cf9f6f011..5ee7cee76 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py @@ -25,12 +25,16 @@ from oslo_service import loopingcall from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake +from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as\ + fake_utils import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode +from cinder.volume.drivers.netapp.dataontap.utils import data_motion +from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils from cinder.volume.drivers.netapp import utils as na_utils @@ -41,7 +45,10 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp() - kwargs = {'configuration': self.get_config_cmode()} + kwargs = { + 'configuration': self.get_config_cmode(), + 'host': 'openstack@cdotblock', + } self.library = block_cmode.NetAppBlockStorageCmodeLibrary( 'driver', 'protocol', **kwargs) @@ -82,6 +89,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') def test_do_setup(self, super_do_setup, mock_check_flags): self.mock_object(client_base.Client, '_init_ssh_client') + self.mock_object( + config_utils, 'get_backend_configuration', + mock.Mock(return_value=self.get_config_cmode())) context = mock.Mock() self.library.do_setup(context) @@ -94,8 +104,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') mock_check_api_permissions = self.mock_object( self.library.ssc_library, 'check_api_permissions') - mock_start_periodic_tasks = self.mock_object( - self.library, '_start_periodic_tasks') mock_get_pool_map = self.mock_object( self.library, '_get_flexvol_to_pool_map', mock.Mock(return_value={'fake_map': None})) @@ -104,7 +112,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.assertEqual(1, super_check_for_setup_error.call_count) mock_check_api_permissions.assert_called_once_with() - self.assertEqual(1, mock_start_periodic_tasks.call_count) mock_get_pool_map.assert_called_once_with() def test_check_for_setup_error_no_filtered_pools(self): @@ -112,7 +119,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'check_for_setup_error') mock_check_api_permissions = self.mock_object( self.library.ssc_library, 'check_api_permissions') - self.mock_object(self.library, '_start_periodic_tasks') self.mock_object( self.library, '_get_flexvol_to_pool_map', mock.Mock(return_value={})) @@ -122,6 +128,51 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): mock_check_api_permissions.assert_called_once_with() + def test_start_periodic_tasks(self): + + mock_update_ssc = self.mock_object( + self.library, '_update_ssc') + super_start_periodic_tasks = self.mock_object( + block_base.NetAppBlockStorageLibrary, '_start_periodic_tasks') + + update_ssc_periodic_task = mock.Mock() + mock_loopingcall = self.mock_object( + loopingcall, 'FixedIntervalLoopingCall', + mock.Mock(return_value=update_ssc_periodic_task)) + + self.library._start_periodic_tasks() + + mock_loopingcall.assert_called_once_with(mock_update_ssc) + self.assertTrue(update_ssc_periodic_task.start.called) + mock_update_ssc.assert_called_once_with() + super_start_periodic_tasks.assert_called_once_with() + + @ddt.data({'replication_enabled': True, 'failed_over': False}, + {'replication_enabled': True, 'failed_over': True}, + {'replication_enabled': False, 'failed_over': False}) + @ddt.unpack + def test_handle_housekeeping_tasks(self, replication_enabled, failed_over): + ensure_mirrors = self.mock_object(data_motion.DataMotionMixin, + 'ensure_snapmirrors') + self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', + mock.Mock(return_value=fake_utils.SSC.keys())) + self.library.replication_enabled = replication_enabled + self.library.failed_over = failed_over + super_handle_housekeeping_tasks = self.mock_object( + block_base.NetAppBlockStorageLibrary, '_handle_housekeeping_tasks') + + self.library._handle_housekeeping_tasks() + + super_handle_housekeeping_tasks.assert_called_once_with() + (self.zapi_client.remove_unused_qos_policy_groups. + assert_called_once_with()) + if replication_enabled and not failed_over: + ensure_mirrors.assert_called_once_with( + self.library.configuration, self.library.backend_name, + fake_utils.SSC.keys()) + else: + self.assertFalse(ensure_mirrors.called) + def test_find_mapped_lun_igroup(self): igroups = [fake.IGROUP1] self.zapi_client.get_igroup_by_initiators.return_value = igroups @@ -203,7 +254,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, - source_snapshot=None) + source_snapshot=None, is_snapshot=False) def test_clone_lun_blocks(self): """Test for when clone lun is passed block information.""" @@ -229,7 +280,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=block_count, dest_block=dest_block, src_block=src_block, qos_policy_group_name=None, - source_snapshot=None) + source_snapshot=None, is_snapshot=False) def test_clone_lun_no_space_reservation(self): """Test for when space_reservation is not passed.""" @@ -245,12 +296,12 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.library._add_lun_to_table = mock.Mock() self.library._update_stale_vols = mock.Mock() - self.library._clone_lun('fakeLUN', 'newFakeLUN') + self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True) self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, - source_snapshot=None) + source_snapshot=None, is_snapshot=True) def test_get_fc_target_wwpns(self): ports = [fake.FC_FORMATTED_TARGET_WWPNS[0], @@ -302,7 +353,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', }, @@ -310,6 +361,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): mock_get_ssc = self.mock_object(self.library.ssc_library, 'get_ssc', mock.Mock(return_value=ssc)) + mock_get_aggrs = self.mock_object(self.library.ssc_library, + 'get_ssc_aggregates', + mock.Mock(return_value=['aggr1'])) self.library.reserved_percentage = 5 self.library.max_over_subscription_ratio = 10 @@ -323,6 +377,17 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.zapi_client, 'get_flexvol_capacity', mock.Mock(return_value=mock_capacities)) + aggr_capacities = { + 'aggr1': { + 'percent-used': 45, + 'size-available': 59055800320.0, + 'size-total': 107374182400.0, + }, + } + mock_get_aggr_capacities = self.mock_object( + self.zapi_client, 'get_aggregate_capacities', + mock.Mock(return_value=aggr_capacities)) + result = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') @@ -332,9 +397,11 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'consistencygroup_support': True, 'reserved_percentage': 5, 'max_over_subscription_ratio': 10.0, + 'multiattach': True, 'total_capacity_gb': 10.0, 'free_capacity_gb': 2.0, 'provisioned_capacity_gb': 8.0, + 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', @@ -344,13 +411,15 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', }] self.assertEqual(expected, result) mock_get_ssc.assert_called_once_with() + mock_get_aggrs.assert_called_once_with() + mock_get_aggr_capacities.assert_called_once_with(['aggr1']) @ddt.data({}, None) def test_get_pool_stats_no_ssc_vols(self, ssc): @@ -573,25 +642,67 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.zapi_client.move_lun.assert_called_once_with( '/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume') - def test_start_periodic_tasks(self): + @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, + {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, + {'secondary_id': 'dev1', 'configured_targets': []}, + {'secondary_id': None, 'configured_targets': []}) + @ddt.unpack + def test_failover_host_invalid_replication_target(self, secondary_id, + configured_targets): + """This tests executes a method in the DataMotionMixin.""" + self.library.backend_name = 'dev0' + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=configured_targets)) + complete_failover_call = self.mock_object( + data_motion.DataMotionMixin, '_complete_failover') - mock_update_ssc = self.mock_object( - self.library, '_update_ssc') - mock_remove_unused_qos_policy_groups = self.mock_object( - self.zapi_client, 'remove_unused_qos_policy_groups') + self.assertRaises(exception.InvalidReplicationTarget, + self.library.failover_host, 'fake_context', [], + secondary_id=secondary_id) + self.assertFalse(complete_failover_call.called) - update_ssc_periodic_task = mock.Mock() - harvest_qos_periodic_task = mock.Mock() - side_effect = [update_ssc_periodic_task, harvest_qos_periodic_task] - mock_loopingcall = self.mock_object( - loopingcall, 'FixedIntervalLoopingCall', - mock.Mock(side_effect=side_effect)) + def test_failover_host_unable_to_failover(self): + """This tests executes a method in the DataMotionMixin.""" + self.library.backend_name = 'dev0' + self.mock_object( + data_motion.DataMotionMixin, '_complete_failover', + mock.Mock(side_effect=exception.NetAppDriverException)) + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=['dev1', 'dev2'])) + self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', + mock.Mock(return_value=fake_utils.SSC.keys())) + self.mock_object(self.library, '_update_zapi_client') - self.library._start_periodic_tasks() + self.assertRaises(exception.UnableToFailOver, + self.library.failover_host, 'fake_context', [], + secondary_id='dev1') + data_motion.DataMotionMixin._complete_failover.assert_called_once_with( + 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], + failover_target='dev1') + self.assertFalse(self.library._update_zapi_client.called) - mock_loopingcall.assert_has_calls([ - mock.call(mock_update_ssc), - mock.call(mock_remove_unused_qos_policy_groups)]) - self.assertTrue(update_ssc_periodic_task.start.called) - self.assertTrue(harvest_qos_periodic_task.start.called) - mock_update_ssc.assert_called_once_with() + def test_failover_host(self): + """This tests executes a method in the DataMotionMixin.""" + self.library.backend_name = 'dev0' + self.mock_object(data_motion.DataMotionMixin, '_complete_failover', + mock.Mock(return_value=('dev1', []))) + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=['dev1', 'dev2'])) + self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', + mock.Mock(return_value=fake_utils.SSC.keys())) + self.mock_object(self.library, '_update_zapi_client') + + actual_active, vol_updates = self.library.failover_host( + 'fake_context', [], secondary_id='dev1') + + data_motion.DataMotionMixin._complete_failover.assert_called_once_with( + 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], + failover_target='dev1') + self.library._update_zapi_client.assert_called_once_with('dev1') + self.assertTrue(self.library.failed_over) + self.assertEqual('dev1', self.library.failed_over_backend_name) + self.assertEqual('dev1', actual_active) + self.assertEqual([], vol_updates) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py index c2e92d9cc..a4ed4437a 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py @@ -33,7 +33,10 @@ class NetApp7modeNfsDriverTestCase(test.TestCase): def setUp(self): super(NetApp7modeNfsDriverTestCase, self).setUp() - kwargs = {'configuration': self.get_config_7mode()} + kwargs = { + 'configuration': self.get_config_7mode(), + 'host': 'openstack@7modenfs', + } with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): @@ -55,6 +58,32 @@ class NetApp7modeNfsDriverTestCase(test.TestCase): config.netapp_server_port = '80' return config + @ddt.data({'share': None, 'is_snapshot': False}, + {'share': None, 'is_snapshot': True}, + {'share': 'fake_share', 'is_snapshot': False}, + {'share': 'fake_share', 'is_snapshot': True}) + @ddt.unpack + def test_clone_backing_file_for_volume(self, share, is_snapshot): + + mock_get_export_ip_path = self.mock_object( + self.driver, '_get_export_ip_path', + mock.Mock(return_value=(fake.SHARE_IP, fake.EXPORT_PATH))) + mock_get_actual_path_for_export = self.mock_object( + self.driver.zapi_client, 'get_actual_path_for_export', + mock.Mock(return_value='fake_path')) + + self.driver._clone_backing_file_for_volume( + fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share, + is_snapshot=is_snapshot) + + mock_get_export_ip_path.assert_called_once_with( + fake.VOLUME_ID, share) + mock_get_actual_path_for_export.assert_called_once_with( + fake.EXPORT_PATH) + self.driver.zapi_client.clone_file.assert_called_once_with( + 'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone', + None) + @ddt.data({'nfs_sparsed_volumes': True}, {'nfs_sparsed_volumes': False}) @ddt.unpack @@ -87,12 +116,14 @@ class NetApp7modeNfsDriverTestCase(test.TestCase): expected = [{'pool_name': '192.168.99.24:/fake/export/path', 'QoS_support': False, + 'consistencygroup_support': True, 'thick_provisioning_support': thick, 'thin_provisioning_support': not thick, 'free_capacity_gb': 12.0, 'total_capacity_gb': 4468.0, 'reserved_percentage': 7, 'max_over_subscription_ratio': 19.0, + 'multiattach': True, 'provisioned_capacity_gb': 4456.0, 'utilization': 30.0, 'filter_function': 'filter', @@ -143,3 +174,31 @@ class NetApp7modeNfsDriverTestCase(test.TestCase): fake.NFS_SHARE) self.assertEqual(expected, result) + + def test_delete_cgsnapshot(self): + mock_delete_file = self.mock_object(self.driver, '_delete_file') + + model_update, snapshots_model_update = ( + self.driver.delete_cgsnapshot( + fake.CG_CONTEXT, fake.CG_SNAPSHOT, [fake.SNAPSHOT])) + + mock_delete_file.assert_called_once_with( + fake.SNAPSHOT['volume_id'], fake.SNAPSHOT['name']) + self.assertIsNone(model_update) + self.assertIsNone(snapshots_model_update) + + def test_get_snapshot_backing_flexvol_names(self): + snapshots = [ + {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, + {'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}}, + {'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}}, + {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, + ] + + hosts = [snap['volume']['host'] for snap in snapshots] + flexvols = self.driver._get_backing_flexvol_names(hosts) + + self.assertEqual(3, len(flexvols)) + self.assertIn('volume1', flexvols) + self.assertIn('volume2', flexvols) + self.assertIn('volume3', flexvols) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py index 1ee0ca32c..22b8e9a00 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py @@ -16,23 +16,31 @@ """ Unit tests for the NetApp NFS storage driver """ - -import os - import copy +import os +import threading +import time + import ddt import mock from os_brick.remotefs import remotefs as remotefs_brick +from oslo_concurrency import processutils +from oslo_service import loopingcall from oslo_utils import units +import shutil +from cinder import context from cinder import exception from cinder import test +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs +from cinder.volume.drivers import remotefs @ddt.ddt @@ -44,8 +52,15 @@ class NetAppNfsDriverTestCase(test.TestCase): configuration.nfs_mount_point_base = '/mnt/test' configuration.reserved_percentage = 0 configuration.max_over_subscription_ratio = 1.1 + self.fake_nfs_export_1 = fake.NFS_EXPORT_1 + self.fake_nfs_export_2 = fake.NFS_EXPORT_2 + self.fake_mount_point = fake.MOUNT_POINT + self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) - kwargs = {'configuration': configuration} + kwargs = { + 'configuration': configuration, + 'host': 'openstack@netappnfs', + } with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): @@ -54,6 +69,9 @@ class NetAppNfsDriverTestCase(test.TestCase): self.driver = nfs_base.NetAppNfsDriver(**kwargs) self.driver.db = mock.Mock() + self.driver.zapi_client = mock.Mock() + self.zapi_client = self.driver.zapi_client + @mock.patch.object(nfs.NfsDriver, 'do_setup') @mock.patch.object(na_utils, 'check_flags') def test_do_setup(self, mock_check_flags, mock_super_do_setup): @@ -81,9 +99,35 @@ class NetAppNfsDriverTestCase(test.TestCase): self.assertEqual(expected_reserved_percentage, round(result['reserved_percentage'])) + def test_check_for_setup_error(self): + super_check_for_setup_error = self.mock_object( + nfs.NfsDriver, 'check_for_setup_error') + mock_start_periodic_tasks = self.mock_object( + self.driver, '_start_periodic_tasks') + + self.driver.check_for_setup_error() + + super_check_for_setup_error.assert_called_once_with() + mock_start_periodic_tasks.assert_called_once_with() + + def test_start_periodic_tasks(self): + + mock_handle_housekeeping_tasks = self.mock_object( + self.driver, '_handle_housekeeping_tasks') + + housekeeping_periodic_task = mock.Mock() + mock_loopingcall = self.mock_object( + loopingcall, 'FixedIntervalLoopingCall', + mock.Mock(return_value=housekeeping_periodic_task)) + + self.driver._start_periodic_tasks() + + mock_loopingcall.assert_called_once_with( + mock_handle_housekeeping_tasks) + self.assertTrue(housekeeping_periodic_task.start.called) + def test_get_capacity_info_ipv4_share(self): expected = fake.CAPACITY_VALUES - self.driver.zapi_client = mock.Mock() get_capacity = self.driver.zapi_client.get_flexvol_capacity get_capacity.return_value = fake.CAPACITIES @@ -95,7 +139,6 @@ class NetAppNfsDriverTestCase(test.TestCase): def test_get_capacity_info_ipv6_share(self): expected = fake.CAPACITY_VALUES - self.driver.zapi_client = mock.Mock() get_capacity = self.driver.zapi_client.get_flexvol_capacity get_capacity.return_value = fake.CAPACITIES @@ -105,6 +148,11 @@ class NetAppNfsDriverTestCase(test.TestCase): get_capacity.assert_has_calls([ mock.call(flexvol_path=fake.EXPORT_PATH)]) + def test_get_pool(self): + pool = self.driver.get_pool({'provider_location': 'fake-share'}) + + self.assertEqual('fake-share', pool) + def test_create_volume(self): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(na_utils, 'get_volume_extra_specs') @@ -135,17 +183,6 @@ class NetAppNfsDriverTestCase(test.TestCase): self.driver.create_volume, fake.NFS_VOLUME) - def test_create_volume_from_snapshot(self): - provider_location = fake.POOL_NAME - snapshot = fake.CLONE_SOURCE - self.mock_object(self.driver, '_clone_source_to_destination_volume', - mock.Mock(return_value=provider_location)) - - result = self.driver.create_cloned_volume(fake.NFS_VOLUME, - snapshot) - - self.assertEqual(provider_location, result) - def test_clone_source_to_destination_volume(self): self.mock_object(self.driver, '_get_volume_location', mock.Mock( return_value=fake.POOL_NAME)) @@ -243,6 +280,22 @@ class NetAppNfsDriverTestCase(test.TestCase): fake.CLONE_SOURCE, fake.NFS_VOLUME) + def test_create_volume_from_snapshot(self): + volume = fake.VOLUME + expected_source = { + 'name': fake.SNAPSHOT_NAME, + 'size': fake.SIZE, + 'id': fake.VOLUME_ID, + } + mock_clone_call = self.mock_object( + self.driver, '_clone_source_to_destination_volume', + mock.Mock(return_value='fake')) + + retval = self.driver.create_volume_from_snapshot(volume, fake.SNAPSHOT) + + self.assertEqual('fake', retval) + mock_clone_call.assert_called_once_with(expected_source, volume) + def test_create_cloned_volume(self): provider_location = fake.POOL_NAME src_vref = fake.CLONE_SOURCE @@ -259,6 +312,186 @@ class NetAppNfsDriverTestCase(test.TestCase): fake.NFS_VOLUME, fake.EXTRA_SPECS) + def test_create_snapshot(self): + + mock_clone_backing_file_for_volume = self.mock_object( + self.driver, '_clone_backing_file_for_volume') + + self.driver.create_snapshot(fake.SNAPSHOT) + + mock_clone_backing_file_for_volume.assert_called_once_with( + fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'], + fake.SNAPSHOT['volume_id'], is_snapshot=True) + + def test_delete_snapshot(self): + updates = { + 'name': fake.SNAPSHOT_NAME, + 'volume_size': fake.SIZE, + 'volume_id': fake.VOLUME_ID, + 'volume_name': fake.VOLUME_NAME, + 'busy': False, + } + snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt, **updates) + self.mock_object(self.driver, '_delete_file') + + self.driver.delete_snapshot(snapshot) + + self.driver._delete_file.assert_called_once_with(snapshot.volume_id, + snapshot.name) + + def test__get_volume_location(self): + volume_id = fake.VOLUME_ID + self.mock_object(self.driver, '_get_host_ip', + mock.Mock(return_value='168.124.10.12')) + self.mock_object(self.driver, '_get_export_path', + mock.Mock(return_value='/fake_mount_path')) + + retval = self.driver._get_volume_location(volume_id) + + self.assertEqual('168.124.10.12:/fake_mount_path', retval) + self.driver._get_host_ip.assert_called_once_with(volume_id) + self.driver._get_export_path.assert_called_once_with(volume_id) + + def test__clone_backing_file_for_volume(self): + self.assertRaises(NotImplementedError, + self.driver._clone_backing_file_for_volume, + fake.VOLUME_NAME, fake.CLONE_SOURCE_NAME, + fake.VOLUME_ID, share=None) + + def test__get_provider_location(self): + updates = {'provider_location': fake.PROVIDER_LOCATION} + volume = fake_volume.fake_volume_obj(self.ctxt, **updates) + self.mock_object(self.driver.db, 'volume_get', mock.Mock( + return_value=volume)) + + retval = self.driver._get_provider_location(fake.VOLUME_ID) + + self.assertEqual(fake.PROVIDER_LOCATION, retval) + + @ddt.data(None, processutils.ProcessExecutionError) + def test__volume_not_present(self, side_effect): + self.mock_object(self.driver, '_get_volume_path') + self.mock_object(self.driver, '_try_execute', + mock.Mock(side_effect=side_effect)) + + retval = self.driver._volume_not_present( + fake.MOUNT_PATH, fake.VOLUME_NAME) + + self.assertEqual(side_effect is not None, retval) + + @mock.patch.object(time, 'sleep') + def test__try_execute_exception(self, patched_sleep): + self.mock_object(self.driver, '_execute', mock.Mock( + side_effect=processutils.ProcessExecutionError)) + mock_exception_log = self.mock_object(nfs_base.LOG, 'exception') + self.driver.configuration.num_shell_tries = 3 + + self.assertRaises(processutils.ProcessExecutionError, + self.driver._try_execute, + 'fake-command', attr1='val1', attr2='val2') + self.assertEqual(2, mock_exception_log.call_count) + self.driver._execute.assert_has_calls([ + mock.call('fake-command', attr1='val1', attr2='val2'), + mock.call('fake-command', attr1='val1', attr2='val2'), + mock.call('fake-command', attr1='val1', attr2='val2')]) + self.assertEqual(2, time.sleep.call_count) + patched_sleep.assert_has_calls([mock.call(1), mock.call(4)]) + + def test__update_volume_stats(self): + self.assertRaises(NotImplementedError, + self.driver._update_volume_stats) + + def test_copy_image_to_volume_base_exception(self): + updates = { + 'name': fake.VOLUME_NAME, + 'id': fake.VOLUME_ID, + 'provider_location': fake.PROVIDER_LOCATION, + } + mock_info_log = self.mock_object(nfs_base.LOG, 'info') + fake_vol = fake_volume.fake_volume_obj(self.ctxt, **updates) + self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume', + mock.Mock(side_effect=exception.NfsException)) + + self.assertRaises(exception.NfsException, + self.driver.copy_image_to_volume, + 'fake_context', fake_vol, + 'fake_img_service', fake.IMAGE_FILE_ID) + mock_info_log.assert_not_called() + + @ddt.data(None, Exception) + def test_copy_image_to_volume(self, exc): + mock_log = self.mock_object(nfs_base, 'LOG') + self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume') + self.mock_object(self.driver, '_do_clone_rel_img_cache', + mock.Mock(side_effect=exc)) + + retval = self.driver.copy_image_to_volume( + 'fake_context', fake.NFS_VOLUME, 'fake_img_service', + fake.IMAGE_FILE_ID) + + self.assertIsNone(retval) + self.assertEqual(exc is not None, mock_log.warning.called) + self.assertEqual(2, mock_log.info.call_count) + + @ddt.data(True, False) + def test_do_clone_rel_img_cache(self, path_exists): + self.mock_object(nfs_base.LOG, 'info') + self.mock_object(utils, 'synchronized', + mock.Mock(return_value=lambda f: f)) + self.mock_object(self.driver, '_get_mount_point_for_share', + mock.Mock(return_value='dir')) + self.mock_object(os.path, 'exists', + mock.Mock(return_value=path_exists)) + self.mock_object(self.driver, '_clone_backing_file_for_volume') + + retval = self.driver._do_clone_rel_img_cache( + fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, + fake.NFS_SHARE, 'fake_cache_file') + + self.assertIsNone(retval) + self.assertTrue(self.driver._get_mount_point_for_share.called) + if not path_exists: + self.driver._clone_backing_file_for_volume.assert_called_once_with( + fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, + share=fake.NFS_SHARE, volume_id=None) + else: + self.driver._clone_backing_file_for_volume.assert_not_called() + os.path.exists.assert_called_once_with( + 'dir/' + fake.CLONE_DESTINATION_NAME) + + def test__spawn_clean_cache_job_clean_job_setup(self): + self.driver.cleaning = True + mock_debug_log = self.mock_object(nfs_base.LOG, 'debug') + self.mock_object(utils, 'synchronized', + mock.Mock(return_value=lambda f: f)) + + retval = self.driver._spawn_clean_cache_job() + + self.assertIsNone(retval) + self.assertEqual(1, mock_debug_log.call_count) + + def test__spawn_clean_cache_job_new_clean_job(self): + + class FakeTimer(object): + def start(self): + pass + + fake_timer = FakeTimer() + self.mock_object(utils, 'synchronized', + mock.Mock(return_value=lambda f: f)) + self.mock_object(fake_timer, 'start') + self.mock_object(nfs_base.LOG, 'debug') + self.mock_object(self.driver, '_clean_image_cache') + self.mock_object(threading, 'Timer', + mock.Mock(return_value=fake_timer)) + + retval = self.driver._spawn_clean_cache_job() + + self.assertIsNone(retval) + threading.Timer.assert_called_once_with( + 0, self.driver._clean_image_cache) + fake_timer.start.assert_called_once_with() + def test_cleanup_volume_on_failure(self): path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) mock_local_path = self.mock_object(self.driver, 'local_path') @@ -341,6 +574,22 @@ class NetAppNfsDriverTestCase(test.TestCase): self.assertEqual(expected, result) + def test_construct_image_url_loc(self): + img_loc = fake.FAKE_IMAGE_LOCATION + + locations = self.driver._construct_image_nfs_url(img_loc) + + self.assertIn("nfs://host/path/image-id-0", locations) + self.assertIn("nfs://host/path/image-id-6", locations) + self.assertEqual(2, len(locations)) + + def test_construct_image_url_direct(self): + img_loc = ("nfs://host/path/image-id", None) + + locations = self.driver._construct_image_nfs_url(img_loc) + + self.assertIn("nfs://host/path/image-id", locations) + def test_extend_volume(self): new_size = 100 @@ -488,3 +737,338 @@ class NetAppNfsDriverTestCase(test.TestCase): size, thin=thin) self.assertEqual(expected, result) + + def test_get_share_mount_and_vol_from_vol_ref(self): + self.mock_object(na_utils, 'resolve_hostname', + mock.Mock(return_value='10.12.142.11')) + self.mock_object(os.path, 'isfile', mock.Mock(return_value=True)) + self.driver._mounted_shares = [self.fake_nfs_export_1] + vol_path = "%s/%s" % (self.fake_nfs_export_1, 'test_file_name') + vol_ref = {'source-name': vol_path} + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + (share, mount, file_path) = ( + self.driver._get_share_mount_and_vol_from_vol_ref(vol_ref)) + + self.assertEqual(self.fake_nfs_export_1, share) + self.assertEqual(self.fake_mount_point, mount) + self.assertEqual('test_file_name', file_path) + + def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self): + self.mock_object(na_utils, 'resolve_hostname', + mock.Mock(return_value='10.12.142.11')) + self.driver._mounted_shares = [self.fake_nfs_export_1] + vol_ref = {'source-id': '1234546'} + + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver._get_share_mount_and_vol_from_vol_ref, + vol_ref) + + def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self): + self.mock_object(na_utils, 'resolve_hostname', + mock.Mock(return_value='10.12.142.11')) + self.driver._mounted_shares = [self.fake_nfs_export_1] + vol_path = "%s/%s" % (self.fake_nfs_export_2, 'test_file_name') + vol_ref = {'source-name': vol_path} + + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver._get_share_mount_and_vol_from_vol_ref, + vol_ref) + + def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self): + self.mock_object(na_utils, 'resolve_hostname', + mock.Mock(return_value='10.12.142.11')) + self.driver._mounted_shares = [self.fake_nfs_export_1] + vol_ref = {'source-name': self.fake_nfs_export_2} + + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver._get_share_mount_and_vol_from_vol_ref, + vol_ref) + + def test_manage_existing(self): + self.mock_object(utils, 'get_file_size', + mock.Mock(return_value=1074253824)) + self.driver._mounted_shares = [self.fake_nfs_export_1] + test_file = 'test_file_name' + volume = fake.FAKE_MANAGE_VOLUME + vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) + vol_ref = {'source-name': vol_path} + self.driver._check_volume_type = mock.Mock() + shutil.move = mock.Mock() + self.mock_object(self.driver, '_execute') + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( + return_value=(self.fake_nfs_export_1, self.fake_mount_point, + test_file)) + mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs') + mock_get_specs.return_value = {} + self.mock_object(self.driver, '_do_qos_for_volume') + + location = self.driver.manage_existing(volume, vol_ref) + + self.assertEqual(self.fake_nfs_export_1, location['provider_location']) + self.driver._check_volume_type.assert_called_once_with( + volume, self.fake_nfs_export_1, test_file, {}) + + def test_manage_existing_move_fails(self): + self.mock_object(utils, 'get_file_size', + mock.Mock(return_value=1074253824)) + self.driver._mounted_shares = [self.fake_nfs_export_1] + test_file = 'test_file_name' + volume = fake.FAKE_MANAGE_VOLUME + vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) + vol_ref = {'source-name': vol_path} + mock_check_volume_type = self.driver._check_volume_type = mock.Mock() + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( + return_value=(self.fake_nfs_export_1, self.fake_mount_point, + test_file)) + self.driver._execute = mock.Mock(side_effect=OSError) + mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs') + mock_get_specs.return_value = {} + self.mock_object(self.driver, '_do_qos_for_volume') + + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.manage_existing, volume, vol_ref) + + mock_check_volume_type.assert_called_once_with( + volume, self.fake_nfs_export_1, test_file, {}) + + def test_unmanage(self): + mock_log = self.mock_object(nfs_base, 'LOG') + volume = {'id': '123', 'provider_location': '/share'} + + retval = self.driver.unmanage(volume) + + self.assertIsNone(retval) + self.assertEqual(1, mock_log.info.call_count) + + def test_manage_existing_get_size(self): + test_file = 'test_file_name' + self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( + return_value=(self.fake_nfs_export_1, self.fake_mount_point, + test_file)) + self.mock_object(utils, 'get_file_size', + mock.Mock(return_value=1073741824)) + self.driver._mounted_shares = [self.fake_nfs_export_1] + volume = fake.FAKE_MANAGE_VOLUME + vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) + vol_ref = {'source-name': vol_path} + + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + vol_size = self.driver.manage_existing_get_size(volume, vol_ref) + + self.assertEqual(1, vol_size) + + def test_manage_existing_get_size_round_up(self): + test_file = 'test_file_name' + self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( + return_value=(self.fake_nfs_export_1, self.fake_mount_point, + test_file)) + self.mock_object(utils, 'get_file_size', + mock.Mock(return_value=1073760270)) + self.driver._mounted_shares = [self.fake_nfs_export_1] + volume = fake.FAKE_MANAGE_VOLUME + vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) + vol_ref = {'source-name': vol_path} + + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + vol_size = self.driver.manage_existing_get_size(volume, vol_ref) + + self.assertEqual(2, vol_size) + + def test_manage_existing_get_size_error(self): + test_file = 'test_file_name' + self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( + return_value=(self.fake_nfs_export_1, self.fake_mount_point, + test_file)) + self.driver._mounted_shares = [self.fake_nfs_export_1] + volume = fake.FAKE_MANAGE_VOLUME + vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) + vol_ref = {'source-name': vol_path} + + self.driver._ensure_shares_mounted = mock.Mock() + self.driver._get_mount_point_for_share = mock.Mock( + return_value=self.fake_mount_point) + + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.manage_existing_get_size, + volume, + vol_ref) + + def test_create_consistency_group(self): + model_update = self.driver.create_consistencygroup( + fake.CG_CONTEXT, fake.CONSISTENCY_GROUP) + self.assertEqual('available', model_update['status']) + + @ddt.data(True, False) + def test_delete_file(self, volume_not_present): + mock_get_provider_location = self.mock_object( + self.driver, '_get_provider_location') + mock_get_provider_location.return_value = fake.NFS_SHARE + mock_volume_not_present = self.mock_object( + self.driver, '_volume_not_present') + mock_volume_not_present.return_value = volume_not_present + mock_get_volume_path = self.mock_object( + self.driver, '_get_volume_path') + mock_get_volume_path.return_value = fake.PATH + mock_delete = self.mock_object(self.driver, '_delete') + + self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) + + mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID) + mock_volume_not_present.assert_called_once_with( + fake.NFS_SHARE, fake.CG_VOLUME_NAME) + if not volume_not_present: + mock_get_volume_path.assert_called_once_with( + fake.NFS_SHARE, fake.CG_VOLUME_NAME) + mock_delete.assert_called_once_with(fake.PATH) + + def test_delete_file_volume_not_present(self): + mock_get_provider_location = self.mock_object( + self.driver, '_get_provider_location') + mock_get_provider_location.return_value = fake.NFS_SHARE + mock_volume_not_present = self.mock_object( + self.driver, '_volume_not_present') + mock_volume_not_present.return_value = True + mock_get_volume_path = self.mock_object( + self.driver, '_get_volume_path') + mock_delete = self.mock_object(self.driver, '_delete') + + self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) + + mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID) + mock_volume_not_present.assert_called_once_with( + fake.NFS_SHARE, fake.CG_VOLUME_NAME) + mock_get_volume_path.assert_not_called() + mock_delete.assert_not_called() + + def test_update_consistencygroup(self): + model_update, add_volumes_update, remove_volumes_update = ( + self.driver.update_consistencygroup(fake.CG_CONTEXT, "foo")) + self.assertIsNone(add_volumes_update) + self.assertIsNone(remove_volumes_update) + + def test_create_consistencygroup_from_src(self): + mock_create_volume_from_snapshot = self.mock_object( + self.driver, 'create_volume_from_snapshot') + + model_update, volumes_model_update = ( + self.driver.create_consistencygroup_from_src( + fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME], + cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.SNAPSHOT])) + + mock_create_volume_from_snapshot.assert_called_once_with( + fake.VOLUME, fake.SNAPSHOT) + self.assertIsNone(model_update) + self.assertIsNone(volumes_model_update) + + def test_create_consistencygroup_from_src_source_vols(self): + mock_get_snapshot_flexvols = self.mock_object( + self.driver, '_get_backing_flexvol_names') + mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME])) + mock_clone_backing_file = self.mock_object( + self.driver, '_clone_backing_file_for_volume') + fake_snapshot_name = 'snapshot-temp-' + fake.CONSISTENCY_GROUP['id'] + mock_busy = self.mock_object( + self.driver.zapi_client, 'wait_for_busy_snapshot') + + model_update, volumes_model_update = ( + self.driver.create_consistencygroup_from_src( + fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME], + source_cg=fake.CONSISTENCY_GROUP, + source_vols=[fake.CG_VOLUME])) + + mock_get_snapshot_flexvols.assert_called_once_with( + [fake.CG_VOLUME['host']]) + self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( + set([fake.CG_POOL_NAME]), fake_snapshot_name) + mock_clone_backing_file.assert_called_once_with( + fake.CG_VOLUME['name'], fake.VOLUME['name'], fake.CG_VOLUME['id'], + source_snapshot=fake_snapshot_name) + mock_busy.assert_called_once_with( + fake.CG_POOL_NAME, fake_snapshot_name) + self.driver.zapi_client.delete_snapshot.assert_called_once_with( + fake.CG_POOL_NAME, fake_snapshot_name) + self.assertIsNone(model_update) + self.assertIsNone(volumes_model_update) + + def test_create_consistencygroup_from_src_invalid_parms(self): + + model_update, volumes_model_update = ( + self.driver.create_consistencygroup_from_src( + fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME])) + + self.assertIn('error', model_update['status']) + + def test_create_cgsnapshot(self): + snapshot = fake.CG_SNAPSHOT + snapshot['volume'] = fake.CG_VOLUME + mock_get_snapshot_flexvols = self.mock_object( + self.driver, '_get_backing_flexvol_names') + mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME])) + mock_clone_backing_file = self.mock_object( + self.driver, '_clone_backing_file_for_volume') + mock_busy = self.mock_object( + self.driver.zapi_client, 'wait_for_busy_snapshot') + + self.driver.create_cgsnapshot( + fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot]) + + mock_get_snapshot_flexvols.assert_called_once_with( + [snapshot['volume']['host']]) + self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( + set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID) + mock_clone_backing_file.assert_called_once_with( + snapshot['volume']['name'], snapshot['name'], + snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID) + mock_busy.assert_called_once_with( + fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID) + self.driver.zapi_client.delete_snapshot.assert_called_once_with( + fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID) + + def test_delete_consistencygroup_volume_delete_failure(self): + self.mock_object(self.driver, '_delete_file', + mock.Mock(side_effect=Exception)) + + model_update, volumes = self.driver.delete_consistencygroup( + fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) + + self.assertEqual('deleted', model_update['status']) + self.assertEqual('error_deleting', volumes[0]['status']) + + def test_delete_consistencygroup(self): + mock_delete_file = self.mock_object( + self.driver, '_delete_file') + + model_update, volumes = self.driver.delete_consistencygroup( + fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) + + self.assertEqual('deleted', model_update['status']) + self.assertEqual('deleted', volumes[0]['status']) + mock_delete_file.assert_called_once_with( + fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py index 4c0e1ad0f..7989cf9b5 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py @@ -23,6 +23,7 @@ from oslo_service import loopingcall from oslo_utils import units from cinder import exception +from cinder.image import image_utils from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as \ @@ -34,6 +35,8 @@ from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap import nfs_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode +from cinder.volume.drivers.netapp.dataontap.utils import data_motion +from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs from cinder.volume import utils as volume_utils @@ -44,7 +47,10 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): def setUp(self): super(NetAppCmodeNfsDriverTestCase, self).setUp() - kwargs = {'configuration': self.get_config_cmode()} + kwargs = { + 'configuration': self.get_config_cmode(), + 'host': 'openstack@nfscmode', + } with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): @@ -71,18 +77,75 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): config.netapp_copyoffload_tool_path = 'copyoffload_tool_path' return config + @ddt.data({'active_backend_id': None, 'targets': ['dev1', 'dev2']}, + {'active_backend_id': None, 'targets': []}, + {'active_backend_id': 'dev1', 'targets': []}, + {'active_backend_id': 'dev1', 'targets': ['dev1', 'dev2']}) + @ddt.unpack + def test_init_driver_for_replication(self, active_backend_id, + targets): + kwargs = { + 'configuration': self.get_config_cmode(), + 'host': 'openstack@nfscmode', + 'active_backend_id': active_backend_id, + } + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=targets)) + with mock.patch.object(utils, 'get_root_helper', + return_value=mock.Mock()): + with mock.patch.object(remotefs_brick, 'RemoteFsClient', + return_value=mock.Mock()): + nfs_driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs) + + self.assertEqual(active_backend_id, + nfs_driver.failed_over_backend_name) + self.assertEqual(active_backend_id is not None, + nfs_driver.failed_over) + self.assertEqual(len(targets) > 0, + nfs_driver.replication_enabled) + @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(client_cmode, 'Client', mock.Mock()) @mock.patch.object(nfs.NfsDriver, 'do_setup') @mock.patch.object(na_utils, 'check_flags') def test_do_setup(self, mock_check_flags, mock_super_do_setup): + self.mock_object( + config_utils, 'get_backend_configuration', + mock.Mock(return_value=self.get_config_cmode())) self.driver.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertTrue(mock_super_do_setup.called) + def test__update_volume_stats(self): + mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') + self.mock_object(self.driver, 'get_filter_function') + self.mock_object(self.driver, 'get_goodness_function') + self.mock_object(self.driver, '_spawn_clean_cache_job') + self.driver.zapi_client = mock.Mock() + self.mock_object( + self.driver, '_get_pool_stats', mock.Mock(return_value={})) + expected_stats = { + 'driver_version': self.driver.VERSION, + 'pools': {}, + 'sparse_copy_volume': True, + 'replication_enabled': False, + 'storage_protocol': 'nfs', + 'vendor_name': 'NetApp', + 'volume_backend_name': 'NetApp_NFS_Cluster_direct', + } + + retval = self.driver._update_volume_stats() + + self.assertIsNone(retval) + self.assertTrue(self.driver._spawn_clean_cache_job.called) + self.assertEqual(1, mock_debug_log.call_count) + self.assertEqual(expected_stats, self.driver._stats) + def test_get_pool_stats(self): + self.driver.zapi_client = mock.Mock() ssc = { 'vola': { 'pool_name': '10.10.10.10:/vola', @@ -92,14 +155,18 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', + 'consistencygroup_support': True, }, } mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc', mock.Mock(return_value=ssc)) + mock_get_aggrs = self.mock_object(self.driver.ssc_library, + 'get_ssc_aggregates', + mock.Mock(return_value=['aggr1'])) total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES // units.Gi, '0.01') @@ -117,6 +184,17 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): '_get_share_capacity_info', mock.Mock(return_value=capacity)) + aggr_capacities = { + 'aggr1': { + 'percent-used': 45, + 'size-available': 59055800320.0, + 'size-total': 107374182400.0, + }, + } + mock_get_aggr_capacities = self.mock_object( + self.driver.zapi_client, 'get_aggregate_capacities', + mock.Mock(return_value=aggr_capacities)) + self.driver.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) @@ -128,9 +206,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): 'QoS_support': True, 'reserved_percentage': fake.RESERVED_PERCENTAGE, 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, + 'multiattach': True, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, + 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', @@ -140,13 +220,16 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', + 'consistencygroup_support': True, }] self.assertEqual(expected, result) mock_get_ssc.assert_called_once_with() + mock_get_aggrs.assert_called_once_with() + mock_get_aggr_capacities.assert_called_once_with(['aggr1']) @ddt.data({}, None) def test_get_pool_stats_no_ssc_vols(self, ssc): @@ -234,11 +317,71 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): self.assertEqual({}, result) + @ddt.data(['/mnt/img-id1', '/mnt/img-id2'], []) + def test__shortlist_del_eligible_files(self, old_files): + self.driver.zapi_client = mock.Mock() + self.driver.zapi_client.get_file_usage = mock.Mock(return_value='1000') + mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') + self.mock_object(self.driver, '_get_vserver_and_exp_vol', + mock.Mock(return_value=('openstack', 'fake_share'))) + expected_list = [(o, '1000') for o in old_files] + + observed_list = self.driver._shortlist_del_eligible_files( + 'fake_ip:fake_share', old_files) + + self.assertEqual(expected_list, observed_list) + self.assertEqual(1, mock_debug_log.call_count) + + @ddt.data({'ip': None, 'shares': None}, + {'ip': 'fake_ip', 'shares': ['fip:/fsh1']}) + @ddt.unpack + def test__share_match_for_ip_no_match(self, ip, shares): + def side_effect(arg): + if arg == 'fake_ip': + return 'openstack' + return None + + self.mock_object(self.driver, '_get_vserver_for_ip', + mock.Mock(side_effect=side_effect)) + mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') + + retval = self.driver._share_match_for_ip(ip, shares) + + self.assertIsNone(retval) + self.assertEqual(1, mock_debug_log.call_count) + + def test__share_match_for_ip(self): + shares = ['fip:/fsh1'] + self.mock_object(self.driver, '_get_vserver_for_ip', + mock.Mock(return_value='openstack')) + mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') + + retval = self.driver._share_match_for_ip('fip', shares) + + self.assertEqual('fip:/fsh1', retval) + self.assertEqual(1, mock_debug_log.call_count) + + def test__get_vserver_for_ip_ignores_zapi_exception(self): + self.driver.zapi_client = mock.Mock() + self.driver.zapi_client.get_if_info_by_ip = mock.Mock( + side_effect=exception.NotFound) + + vserver = self.driver._get_vserver_for_ip('FAKE_IP') + + self.assertIsNone(vserver) + + def test__get_vserver_for_ip(self): + self.driver.zapi_client = mock.Mock() + self.driver.zapi_client.get_if_info_by_ip = mock.Mock( + return_value=fake.get_fake_ifs()) + + vserver = self.driver._get_vserver_for_ip('FAKE_IP') + + self.assertIsNone(vserver) + def test_check_for_setup_error(self): super_check_for_setup_error = self.mock_object( nfs_base.NetAppNfsDriver, 'check_for_setup_error') - mock_start_periodic_tasks = self.mock_object( - self.driver, '_start_periodic_tasks') mock_check_api_permissions = self.mock_object( self.driver.ssc_library, 'check_api_permissions') @@ -246,7 +389,51 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): self.assertEqual(1, super_check_for_setup_error.call_count) mock_check_api_permissions.assert_called_once_with() - self.assertEqual(1, mock_start_periodic_tasks.call_count) + + def test_start_periodic_tasks(self): + + mock_update_ssc = self.mock_object( + self.driver, '_update_ssc') + super_start_periodic_tasks = self.mock_object( + nfs_base.NetAppNfsDriver, '_start_periodic_tasks') + + update_ssc_periodic_task = mock.Mock() + mock_loopingcall = self.mock_object( + loopingcall, 'FixedIntervalLoopingCall', + mock.Mock(return_value=update_ssc_periodic_task)) + + self.driver._start_periodic_tasks() + + mock_loopingcall.assert_called_once_with(mock_update_ssc) + self.assertTrue(update_ssc_periodic_task.start.called) + mock_update_ssc.assert_called_once_with() + super_start_periodic_tasks.assert_called_once_with() + + @ddt.data({'replication_enabled': True, 'failed_over': False}, + {'replication_enabled': True, 'failed_over': True}, + {'replication_enabled': False, 'failed_over': False}) + @ddt.unpack + def test_handle_housekeeping_tasks(self, replication_enabled, failed_over): + ensure_mirrors = self.mock_object(data_motion.DataMotionMixin, + 'ensure_snapmirrors') + self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', + mock.Mock(return_value=fake_ssc.SSC.keys())) + self.driver.replication_enabled = replication_enabled + self.driver.failed_over = failed_over + super_handle_housekeeping_tasks = self.mock_object( + nfs_base.NetAppNfsDriver, '_handle_housekeeping_tasks') + + self.driver._handle_housekeeping_tasks() + + super_handle_housekeeping_tasks.assert_called_once_with() + (self.driver.zapi_client.remove_unused_qos_policy_groups. + assert_called_once_with()) + if replication_enabled and not failed_over: + ensure_mirrors.assert_called_once_with( + self.driver.configuration, self.driver.backend_name, + fake_ssc.SSC.keys()) + else: + self.assertFalse(ensure_mirrors.called) def test_delete_volume(self): fake_provider_location = 'fake_provider_location' @@ -272,7 +459,10 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock(return_value='fake_qos_policy_group_info')) - self.driver.zapi_client = mock.Mock(side_effect=Exception) + self.mock_object( + self.driver.zapi_client, + 'mark_qos_policy_group_for_deletion', + mock.Mock(side_effect=exception.NetAppDriverException)) self.driver.delete_volume(fake_volume) @@ -284,37 +474,33 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): assert_called_once_with('fake_qos_policy_group_info')) def test_delete_backing_file_for_volume(self): - mock_filer_delete = self.mock_object(self.driver, - '_delete_volume_on_filer') + mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume') self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) - mock_filer_delete.assert_called_once_with(fake.NFS_VOLUME) + mock_filer_delete.assert_called_once_with( + fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name']) self.assertEqual(0, mock_super_delete.call_count) - def test_delete_backing_file_for_volume_exception_path(self): - mock_filer_delete = self.mock_object(self.driver, - '_delete_volume_on_filer') + @ddt.data(True, False) + def test_delete_backing_file_for_volume_exception_path(self, super_exc): + mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception') + exception_call_count = 2 if super_exc else 1 + mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_filer_delete.side_effect = [Exception] mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume') + if super_exc: + mock_super_delete.side_effect = [Exception] self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) - mock_filer_delete.assert_called_once_with(fake.NFS_VOLUME) + mock_filer_delete.assert_called_once_with( + fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name']) mock_super_delete.assert_called_once_with(fake.NFS_VOLUME) - - def test_delete_volume_on_filer(self): - mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path') - mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL) - mock_zapi_delete = self.driver.zapi_client.delete_file - - self.driver._delete_volume_on_filer(fake.NFS_VOLUME) - - mock_zapi_delete.assert_called_once_with( - '/vol/%s/%s' % (fake.FLEXVOL, fake.NFS_VOLUME['name'])) + self.assertEqual(exception_call_count, mock_exception_log.call_count) def test_delete_snapshot(self): mock_get_location = self.mock_object(self.driver, @@ -328,34 +514,41 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): mock_delete_backing.assert_called_once_with(fake.test_snapshot) def test_delete_backing_file_for_snapshot(self): - mock_filer_delete = self.mock_object( - self.driver, '_delete_snapshot_on_filer') + mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) - mock_filer_delete.assert_called_once_with(fake.test_snapshot) + mock_filer_delete.assert_called_once_with( + fake.test_snapshot['volume_id'], fake.test_snapshot['name']) self.assertEqual(0, mock_super_delete.call_count) - def test_delete_backing_file_for_snapshot_exception_path(self): - mock_filer_delete = self.mock_object( - self.driver, '_delete_snapshot_on_filer') + @ddt.data(True, False) + def test_delete_backing_file_for_snapshot_exception_path(self, super_exc): + mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception') + exception_call_count = 2 if super_exc else 1 + mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_filer_delete.side_effect = [Exception] mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') + if super_exc: + mock_super_delete.side_effect = [Exception] self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) - mock_filer_delete.assert_called_once_with(fake.test_snapshot) + mock_filer_delete.assert_called_once_with( + fake.test_snapshot['volume_id'], fake.test_snapshot['name']) mock_super_delete.assert_called_once_with(fake.test_snapshot) + self.assertEqual(exception_call_count, mock_exception_log.call_count) - def test_delete_snapshot_on_filer(self): + def test_delete_file(self): mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path') mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL) mock_zapi_delete = self.driver.zapi_client.delete_file - self.driver._delete_snapshot_on_filer(fake.test_snapshot) + self.driver._delete_file( + fake.test_snapshot['volume_id'], fake.test_snapshot['name']) mock_zapi_delete.assert_called_once_with( '/vol/%s/%s' % (fake.FLEXVOL, fake.test_snapshot['name'])) @@ -508,6 +701,185 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): self.assertEqual(0, mock_get_flex_vol_name.call_count) self.assertEqual(0, mock_file_assign_qos.call_count) + @ddt.data({'share': None, 'is_snapshot': False}, + {'share': None, 'is_snapshot': True}, + {'share': 'fake_share', 'is_snapshot': False}, + {'share': 'fake_share', 'is_snapshot': True}) + @ddt.unpack + def test_clone_backing_file_for_volume(self, share, is_snapshot): + + mock_get_vserver_and_exp_vol = self.mock_object( + self.driver, '_get_vserver_and_exp_vol', + mock.Mock(return_value=(fake.VSERVER_NAME, fake.FLEXVOL))) + + self.driver._clone_backing_file_for_volume( + fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share, + is_snapshot=is_snapshot) + + mock_get_vserver_and_exp_vol.assert_called_once_with( + fake.VOLUME_ID, share) + self.driver.zapi_client.clone_file.assert_called_once_with( + fake.FLEXVOL, fake.FLEXVOL, 'fake_clone', fake.VSERVER_NAME, + is_snapshot=is_snapshot) + + def test__clone_backing_file_for_volume(self): + body = fake.get_fake_net_interface_get_iter_response() + self.driver.zapi_client.get_if_info_by_ip = mock.Mock( + return_value=[netapp_api.NaElement(body)]) + self.driver.zapi_client.get_vol_by_junc_vserver = mock.Mock( + return_value='nfsvol') + self.mock_object(self.driver, '_get_export_ip_path', + mock.Mock(return_value=('127.0.0.1', 'fakepath'))) + + retval = self.driver._clone_backing_file_for_volume( + 'vol', 'clone', 'vol_id', share='share', is_snapshot=True) + + self.assertIsNone(retval) + self.driver.zapi_client.clone_file.assert_called_once_with( + 'nfsvol', 'vol', 'clone', None, is_snapshot=True) + + def test__copy_from_img_service_copyoffload_nonexistent_binary_path(self): + self.mock_object(nfs_cmode.LOG, 'debug') + drv = self.driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = mock.Mock() + image_service.get_location.return_value = (mock.Mock(), mock.Mock()) + image_service.show.return_value = {'size': 0} + image_id = 'image_id' + drv._client = mock.Mock() + drv._client.get_api_version = mock.Mock(return_value=(1, 20)) + drv._find_image_in_cache = mock.Mock(return_value=[]) + drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"]) + drv._check_get_nfs_path_segs = mock.Mock( + return_value=("test:test", "dr")) + drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.128.1.1") + drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') + drv._get_host_ip = mock.Mock() + drv._get_provider_location = mock.Mock() + drv._get_export_path = mock.Mock(return_value="dr") + drv._check_share_can_hold_size = mock.Mock() + # Raise error as if the copyoffload file can not be found + drv._clone_file_dst_exists = mock.Mock(side_effect=OSError()) + drv._discover_file_till_timeout = mock.Mock() + + # Verify the original error is propagated + self.assertRaises(OSError, drv._copy_from_img_service, + context, volume, image_service, image_id) + + drv._discover_file_till_timeout.assert_not_called() + + @mock.patch.object(image_utils, 'qemu_img_info') + def test__copy_from_img_service_raw_copyoffload_workflow_success( + self, mock_qemu_img_info): + drv = self.driver + volume = {'id': 'vol_id', 'name': 'name', 'size': 1} + image_id = 'image_id' + context = object() + image_service = mock.Mock() + image_service.get_location.return_value = ('nfs://ip1/openstack/img', + None) + image_service.show.return_value = {'size': 1, 'disk_format': 'raw'} + + drv._check_get_nfs_path_segs =\ + mock.Mock(return_value=('ip1', '/openstack')) + drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') + drv._get_host_ip = mock.Mock(return_value='ip2') + drv._get_export_path = mock.Mock(return_value='/exp_path') + drv._get_provider_location = mock.Mock(return_value='share') + drv._execute = mock.Mock() + drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') + drv._discover_file_till_timeout = mock.Mock(return_value=True) + img_inf = mock.Mock() + img_inf.file_format = 'raw' + mock_qemu_img_info.return_value = img_inf + drv._check_share_can_hold_size = mock.Mock() + drv._move_nfs_file = mock.Mock(return_value=True) + drv._delete_file_at_path = mock.Mock() + drv._clone_file_dst_exists = mock.Mock() + drv._post_clone_image = mock.Mock() + + retval = drv._copy_from_img_service( + context, volume, image_service, image_id) + + self.assertIsNone(retval) + drv._get_ip_verify_on_cluster.assert_any_call('ip1') + drv._get_export_path.assert_called_with('vol_id') + drv._check_share_can_hold_size.assert_called_with('share', 1) + drv._post_clone_image.assert_called_with(volume) + self.assertEqual(1, drv._execute.call_count) + + @mock.patch.object(image_utils, 'convert_image') + @mock.patch.object(image_utils, 'qemu_img_info') + @mock.patch('os.path.exists') + def test__copy_from_img_service_qcow2_copyoffload_workflow_success( + self, mock_exists, mock_qemu_img_info, mock_cvrt_image): + drv = self.driver + volume = {'id': 'vol_id', 'name': 'name', 'size': 1} + image_id = 'image_id' + context = object() + image_service = mock.Mock() + image_service.get_location.return_value = ('nfs://ip1/openstack/img', + None) + image_service.show.return_value = {'size': 1, + 'disk_format': 'qcow2'} + drv._check_get_nfs_path_segs =\ + mock.Mock(return_value=('ip1', '/openstack')) + + drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') + drv._get_host_ip = mock.Mock(return_value='ip2') + drv._get_export_path = mock.Mock(return_value='/exp_path') + drv._get_provider_location = mock.Mock(return_value='share') + drv._execute = mock.Mock() + drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') + img_inf = mock.Mock() + img_inf.file_format = 'raw' + mock_qemu_img_info.return_value = img_inf + drv._check_share_can_hold_size = mock.Mock() + + drv._move_nfs_file = mock.Mock(return_value=True) + drv._delete_file_at_path = mock.Mock() + drv._clone_file_dst_exists = mock.Mock() + drv._post_clone_image = mock.Mock() + + retval = drv._copy_from_img_service( + context, volume, image_service, image_id) + + self.assertIsNone(retval) + drv._get_ip_verify_on_cluster.assert_any_call('ip1') + drv._get_export_path.assert_called_with('vol_id') + drv._check_share_can_hold_size.assert_called_with('share', 1) + drv._post_clone_image.assert_called_with(volume) + self.assertEqual(1, mock_cvrt_image.call_count) + self.assertEqual(1, drv._execute.call_count) + self.assertEqual(2, drv._delete_file_at_path.call_count) + self.assertEqual(1, drv._clone_file_dst_exists.call_count) + + def test__copy_from_cache_copyoffload_success(self): + drv = self.driver + volume = {'id': 'vol_id', 'name': 'name', 'size': 1} + image_id = 'image_id' + cache_result = [('ip1:/openstack', 'img-cache-imgid')] + drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') + drv._get_host_ip = mock.Mock(return_value='ip2') + drv._get_export_path = mock.Mock(return_value='/exp_path') + drv._execute = mock.Mock() + drv._register_image_in_cache = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='/share') + drv._post_clone_image = mock.Mock() + + copied = drv._copy_from_cache(volume, image_id, cache_result) + + self.assertTrue(copied) + drv._get_ip_verify_on_cluster.assert_any_call('ip1') + drv._get_export_path.assert_called_with('vol_id') + drv._execute.assert_called_once_with( + 'copyoffload_tool_path', 'ip1', 'ip1', + '/openstack/img-cache-imgid', '/exp_path/name', + run_as_root=False, check_exit_code=0) + drv._post_clone_image.assert_called_with(volume) + drv._get_provider_location.assert_called_with('vol_id') + def test_unmanage(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') @@ -537,29 +909,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) - def test_start_periodic_tasks(self): - - mock_update_ssc = self.mock_object(self.driver, '_update_ssc') - mock_remove_unused_qos_policy_groups = self.mock_object( - self.driver.zapi_client, - 'remove_unused_qos_policy_groups') - - update_ssc_periodic_task = mock.Mock() - harvest_qos_periodic_task = mock.Mock() - side_effect = [update_ssc_periodic_task, harvest_qos_periodic_task] - mock_loopingcall = self.mock_object( - loopingcall, 'FixedIntervalLoopingCall', - mock.Mock(side_effect=side_effect)) - - self.driver._start_periodic_tasks() - - mock_loopingcall.assert_has_calls([ - mock.call(mock_update_ssc), - mock.call(mock_remove_unused_qos_policy_groups)]) - self.assertTrue(update_ssc_periodic_task.start.called) - self.assertTrue(harvest_qos_periodic_task.start.called) - mock_update_ssc.assert_called_once_with() - @ddt.data({'has_space': True, 'type_match': True, 'expected': True}, {'has_space': True, 'type_match': False, 'expected': False}, {'has_space': False, 'type_match': True, 'expected': False}, @@ -743,6 +1092,95 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): self.driver._get_host_ip.assert_called_once_with(fake.VOLUME_ID) self.driver._get_export_path.assert_called_once_with(fake.VOLUME_ID) + def test_copy_image_to_volume_copyoffload_non_cached_ssc_update(self): + mock_log = self.mock_object(nfs_cmode, 'LOG') + drv = self.driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv.zapi_client = mock.Mock() + drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) + drv._copy_from_img_service = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='share') + drv._get_vol_for_share = mock.Mock(return_value='vol') + + retval = drv.copy_image_to_volume( + context, volume, image_service, image_id) + + self.assertIsNone(retval) + drv._copy_from_img_service.assert_called_once_with( + context, volume, image_service, image_id) + self.assertEqual(1, mock_log.debug.call_count) + self.assertEqual(1, mock_log.info.call_count) + + def test_copy_image_to_volume_copyoffload_from_cache_success(self): + mock_info_log = self.mock_object(nfs_cmode.LOG, 'info') + drv = self.driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv.zapi_client = mock.Mock() + drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) + nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='share') + drv._get_vol_for_share = mock.Mock(return_value='vol') + drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')]) + drv._copy_from_cache = mock.Mock(return_value=True) + + drv.copy_image_to_volume(context, volume, image_service, image_id) + + drv._copy_from_cache.assert_called_once_with( + volume, image_id, [('share', 'img')]) + self.assertEqual(1, mock_info_log.call_count) + + def test_copy_image_to_volume_copyoffload_from_img_service(self): + drv = self.driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv.zapi_client = mock.Mock() + drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) + nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='share') + drv._get_vol_for_share = mock.Mock(return_value='vol') + drv._find_image_in_cache = mock.Mock(return_value=False) + drv._copy_from_img_service = mock.Mock() + + retval = drv.copy_image_to_volume( + context, volume, image_service, image_id) + + self.assertIsNone(retval) + drv._copy_from_img_service.assert_called_once_with( + context, volume, image_service, image_id) + + def test_copy_image_to_volume_copyoffload_failure(self): + mock_log = self.mock_object(nfs_cmode, 'LOG') + drv = self.driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv.zapi_client = mock.Mock() + drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) + drv._copy_from_img_service = mock.Mock(side_effect=Exception()) + nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='share') + drv._get_vol_for_share = mock.Mock(return_value='vol') + + retval = drv.copy_image_to_volume( + context, volume, image_service, image_id) + + self.assertIsNone(retval) + drv._copy_from_img_service.assert_called_once_with( + context, volume, image_service, image_id) + nfs_base.NetAppNfsDriver.copy_image_to_volume. \ + assert_called_once_with(context, volume, image_service, image_id) + mock_log.info.assert_not_called() + self.assertEqual(1, mock_log.exception.call_count) + def test_copy_from_remote_cache(self): source_ip = '192.0.1.1' source_path = '/openstack/img-cache-imgid' @@ -832,3 +1270,107 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): self.driver._copy_from_remote_cache.assert_called_once_with( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0]) self.assertFalse(self.driver._post_clone_image.called) + + @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, + {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, + {'secondary_id': 'dev1', 'configured_targets': []}, + {'secondary_id': None, 'configured_targets': []}) + @ddt.unpack + def test_failover_host_invalid_replication_target(self, secondary_id, + configured_targets): + """This tests executes a method in the DataMotionMixin.""" + self.driver.backend_name = 'dev0' + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=configured_targets)) + complete_failover_call = self.mock_object( + data_motion.DataMotionMixin, '_complete_failover') + + self.assertRaises(exception.InvalidReplicationTarget, + self.driver.failover_host, 'fake_context', [], + secondary_id=secondary_id) + self.assertFalse(complete_failover_call.called) + + def test_failover_host_unable_to_failover(self): + """This tests executes a method in the DataMotionMixin.""" + self.driver.backend_name = 'dev0' + self.mock_object( + data_motion.DataMotionMixin, '_complete_failover', + mock.Mock(side_effect=exception.NetAppDriverException)) + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=['dev1', 'dev2'])) + self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', + mock.Mock(return_value=fake_ssc.SSC.keys())) + self.mock_object(self.driver, '_update_zapi_client') + + self.assertRaises(exception.UnableToFailOver, + self.driver.failover_host, 'fake_context', [], + secondary_id='dev1') + data_motion.DataMotionMixin._complete_failover.assert_called_once_with( + 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], + failover_target='dev1') + self.assertFalse(self.driver._update_zapi_client.called) + + def test_failover_host(self): + """This tests executes a method in the DataMotionMixin.""" + self.driver.backend_name = 'dev0' + self.mock_object(data_motion.DataMotionMixin, '_complete_failover', + mock.Mock(return_value=('dev1', []))) + self.mock_object(data_motion.DataMotionMixin, + 'get_replication_backend_names', + mock.Mock(return_value=['dev1', 'dev2'])) + self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', + mock.Mock(return_value=fake_ssc.SSC.keys())) + self.mock_object(self.driver, '_update_zapi_client') + + actual_active, vol_updates = self.driver.failover_host( + 'fake_context', [], secondary_id='dev1') + + data_motion.DataMotionMixin._complete_failover.assert_called_once_with( + 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], + failover_target='dev1') + self.driver._update_zapi_client.assert_called_once_with('dev1') + self.assertTrue(self.driver.failed_over) + self.assertEqual('dev1', self.driver.failed_over_backend_name) + self.assertEqual('dev1', actual_active) + self.assertEqual([], vol_updates) + + def test_delete_cgsnapshot(self): + mock_delete_backing_file = self.mock_object( + self.driver, '_delete_backing_file_for_snapshot') + snapshots = [fake.CG_SNAPSHOT] + + model_update, snapshots_model_update = ( + self.driver.delete_cgsnapshot( + fake.CG_CONTEXT, fake.CG_SNAPSHOT, snapshots)) + + mock_delete_backing_file.assert_called_once_with(fake.CG_SNAPSHOT) + self.assertIsNone(model_update) + self.assertIsNone(snapshots_model_update) + + def test_get_snapshot_backing_flexvol_names(self): + snapshots = [ + {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, + {'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}}, + {'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}}, + {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, + ] + + ssc = { + 'volume1': {'pool_name': '/fake/volume1', }, + 'volume2': {'pool_name': '/fake/volume2', }, + 'volume3': {'pool_name': '/fake/volume3', }, + } + + mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc') + mock_get_ssc.return_value = ssc + + hosts = [snap['volume']['host'] for snap in snapshots] + flexvols = self.driver._get_backing_flexvol_names(hosts) + + mock_get_ssc.assert_called_once_with() + self.assertEqual(3, len(flexvols)) + self.assertIn('volume1', flexvols) + self.assertIn('volume2', flexvols) + self.assertIn('volume3', flexvols) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py index fad7bf7bf..e104f9098 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py @@ -13,6 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +from cinder.volume import configuration +from cinder.volume import driver +from cinder.volume.drivers.netapp import options as na_opts + SSC_VSERVER = 'fake_vserver' SSC_VOLUMES = ('volume1', 'volume2') SSC_VOLUME_MAP = { @@ -30,24 +34,26 @@ SSC = { 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_compression': 'false', 'netapp_dedup': 'true', 'netapp_mirrored': 'false', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', + 'netapp_hybrid_aggregate': False, 'pool_name': 'volume1', }, 'volume2': { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_thin_provisioned': 'true', - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', 'netapp_compression': 'true', 'netapp_dedup': 'true', 'netapp_mirrored': 'true', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'FCAL', + 'netapp_hybrid_aggregate': True, 'pool_name': 'volume2', }, } @@ -57,13 +63,13 @@ SSC_FLEXVOL_INFO = { 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', }, 'volume2': { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_thin_provisioned': 'true', - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', }, } @@ -91,9 +97,39 @@ SSC_AGGREGATE_INFO = { 'volume1': { 'netapp_disk_type': 'SSD', 'netapp_raid_type': 'raid_dp', + 'netapp_hybrid_aggregate': False, }, 'volume2': { 'netapp_disk_type': 'FCAL', 'netapp_raid_type': 'raid_dp', + 'netapp_hybrid_aggregate': True, }, } + +PROVISIONING_OPTS = { + 'aggregate': 'fake_aggregate', + 'thin_provisioned': True, + 'snapshot_policy': None, + 'language': 'en_US', + 'dedupe_enabled': False, + 'compression_enabled': False, + 'snapshot_reserve': '12', + 'volume_type': 'rw', + 'size': 20, +} + + +def get_fake_cmode_config(backend_name): + + config = configuration.Configuration(driver.volume_opts, + config_group=backend_name) + config.append_config_values(na_opts.netapp_proxy_opts) + config.append_config_values(na_opts.netapp_connection_opts) + config.append_config_values(na_opts.netapp_transport_opts) + config.append_config_values(na_opts.netapp_basicauth_opts) + config.append_config_values(na_opts.netapp_provisioning_opts) + config.append_config_values(na_opts.netapp_cluster_opts) + config.append_config_values(na_opts.netapp_san_opts) + config.append_config_values(na_opts.netapp_replication_opts) + + return config diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py index fe41185c5..d8df5833a 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py @@ -104,6 +104,12 @@ class CapabilitiesLibraryTestCase(test.TestCase): self.assertEqual({}, result) + def test_get_ssc_aggregates(self): + + result = self.ssc_library.get_ssc_aggregates() + + self.assertEqual(list(fake.SSC_AGGREGATES), result) + def test_update_ssc(self): mock_get_ssc_flexvol_info = self.mock_object( @@ -147,6 +153,16 @@ class CapabilitiesLibraryTestCase(test.TestCase): mock_get_ssc_aggregate_info.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2')]) + def test__update_for_failover(self): + self.mock_object(self.ssc_library, 'update_ssc') + flexvol_map = {'volume1': fake.SSC_VOLUME_MAP['volume1']} + mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') + + self.ssc_library._update_for_failover(mock_client, flexvol_map) + + self.assertEqual(mock_client, self.ssc_library.zapi_client) + self.ssc_library.update_ssc.assert_called_once_with(flexvol_map) + @ddt.data({'lun_space_guarantee': True}, {'lun_space_guarantee': False}) @ddt.unpack @@ -165,7 +181,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'true', 'thick_provisioning_support': False, 'thin_provisioning_support': True, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -192,7 +208,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'false', 'thick_provisioning_support': lun_space_guarantee, 'thin_provisioning_support': not lun_space_guarantee, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -217,7 +233,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'true', 'thick_provisioning_support': False, 'thin_provisioning_support': True, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -245,7 +261,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'false', 'thick_provisioning_support': not nfs_sparsed_volumes, 'thin_provisioning_support': nfs_sparsed_volumes, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -285,25 +301,45 @@ class CapabilitiesLibraryTestCase(test.TestCase): def test_get_ssc_aggregate_info(self): - self.mock_object( - self.ssc_library.zapi_client, 'get_aggregate_disk_type', - mock.Mock(return_value=fake_client.AGGR_DISK_TYPE)) self.mock_object( self.ssc_library.zapi_client, 'get_aggregate', mock.Mock(return_value=fake_client.AGGR_INFO_SSC)) + self.mock_object( + self.ssc_library.zapi_client, 'get_aggregate_disk_types', + mock.Mock(return_value=fake_client.AGGREGATE_DISK_TYPES)) result = self.ssc_library._get_ssc_aggregate_info( fake_client.VOLUME_AGGREGATE_NAME) expected = { - 'netapp_disk_type': fake_client.AGGR_DISK_TYPE, - 'netapp_raid_type': fake_client.AGGR_RAID_TYPE, + 'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES, + 'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE, + 'netapp_hybrid_aggregate': 'true', } self.assertEqual(expected, result) - self.zapi_client.get_aggregate_disk_type.assert_called_once_with( - fake_client.VOLUME_AGGREGATE_NAME) self.zapi_client.get_aggregate.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) + self.zapi_client.get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_ssc_aggregate_info_not_found(self): + + self.mock_object( + self.ssc_library.zapi_client, 'get_aggregate', + mock.Mock(return_value={})) + self.mock_object( + self.ssc_library.zapi_client, 'get_aggregate_disk_types', + mock.Mock(return_value=None)) + + result = self.ssc_library._get_ssc_aggregate_info( + fake_client.VOLUME_AGGREGATE_NAME) + + expected = { + 'netapp_disk_type': None, + 'netapp_raid_type': None, + 'netapp_hybrid_aggregate': None, + } + self.assertEqual(expected, result) def test_get_matching_flexvols_for_extra_specs(self): diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py new file mode 100644 index 000000000..6906d9f33 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py @@ -0,0 +1,749 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +import copy +import ddt +import mock +from oslo_config import cfg + +from cinder import exception +from cinder import test +from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes +from cinder.volume import configuration +from cinder.volume import driver +from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api +from cinder.volume.drivers.netapp.dataontap.client import client_cmode +from cinder.volume.drivers.netapp.dataontap.utils import data_motion +from cinder.volume.drivers.netapp.dataontap.utils import utils +from cinder.volume.drivers.netapp import options as na_opts + + +CONF = cfg.CONF + + +@ddt.ddt +class NetAppCDOTDataMotionMixinTestCase(test.TestCase): + + def setUp(self): + super(NetAppCDOTDataMotionMixinTestCase, self).setUp() + self.dm_mixin = data_motion.DataMotionMixin() + self.src_backend = 'backend1' + self.dest_backend = 'backend2' + self.src_vserver = 'source_vserver' + self.dest_vserver = 'dest_vserver' + self._setup_mock_config() + self.mock_cmode_client = self.mock_object(client_cmode, 'Client') + self.src_flexvol_name = 'volume_c02d497a_236c_4852_812a_0d39373e312a' + self.dest_flexvol_name = self.src_flexvol_name + self.mock_src_client = mock.Mock() + self.mock_dest_client = mock.Mock() + self.config = fakes.get_fake_cmode_config(self.src_backend) + self.mock_object(utils, 'get_backend_configuration', + mock.Mock(side_effect=[self.mock_dest_config, + self.mock_src_config])) + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[self.mock_dest_client, + self.mock_src_client])) + + def _setup_mock_config(self): + self.mock_src_config = configuration.Configuration( + driver.volume_opts, config_group=self.src_backend) + self.mock_dest_config = configuration.Configuration( + driver.volume_opts, config_group=self.dest_backend) + + for config in (self.mock_src_config, self.mock_dest_config): + config.append_config_values(na_opts.netapp_proxy_opts) + config.append_config_values(na_opts.netapp_connection_opts) + config.append_config_values(na_opts.netapp_transport_opts) + config.append_config_values(na_opts.netapp_basicauth_opts) + config.append_config_values(na_opts.netapp_provisioning_opts) + config.append_config_values(na_opts.netapp_cluster_opts) + config.append_config_values(na_opts.netapp_san_opts) + config.append_config_values(na_opts.netapp_replication_opts) + config.netapp_snapmirror_quiesce_timeout = 10 + + CONF.set_override('netapp_vserver', self.src_vserver, + group=self.src_backend, enforce_type=True) + CONF.set_override('netapp_vserver', self.dest_vserver, + group=self.dest_backend, enforce_type=True) + + @ddt.data(None, [], [{'some_key': 'some_value'}]) + def test_get_replication_backend_names_none(self, replication_device): + CONF.set_override('replication_device', replication_device, + group=self.src_backend, enforce_type=True) + + devices = self.dm_mixin.get_replication_backend_names(self.config) + + self.assertEqual(0, len(devices)) + + @ddt.data([{'backend_id': 'xyzzy'}, {'backend_id': 'spoon!'}], + [{'backend_id': 'foobar'}]) + def test_get_replication_backend_names_valid(self, replication_device): + CONF.set_override('replication_device', replication_device, + group=self.src_backend, enforce_type=True) + + devices = self.dm_mixin.get_replication_backend_names(self.config) + + self.assertEqual(len(replication_device), len(devices)) + + def test_get_snapmirrors(self): + self.mock_object(self.mock_dest_client, 'get_snapmirrors') + + self.dm_mixin.get_snapmirrors(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.mock_dest_client.get_snapmirrors.assert_called_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name, + desired_attributes=['relationship-status', + 'mirror-state', + 'source-vserver', + 'source-volume', + 'destination-vserver', + 'destination-volume', + 'last-transfer-end-timestamp', + 'lag-time']) + self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count) + + @ddt.data([], ['backend1'], ['backend1', 'backend2']) + def test_get_replication_backend_stats(self, replication_backend_names): + self.mock_object(self.dm_mixin, 'get_replication_backend_names', + mock.Mock(return_value=replication_backend_names)) + enabled_stats = { + 'replication_count': len(replication_backend_names), + 'replication_targets': replication_backend_names, + 'replication_type': 'async', + } + expected_stats = { + 'replication_enabled': len(replication_backend_names) > 0, + } + if len(replication_backend_names) > 0: + expected_stats.update(enabled_stats) + + actual_stats = self.dm_mixin.get_replication_backend_stats(self.config) + + self.assertDictMatch(expected_stats, actual_stats) + + @ddt.data(None, [], + [{'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}]) + def test_get_replication_aggregate_map_none(self, replication_aggr_map): + + self.mock_object(utils, 'get_backend_configuration', + mock.Mock(return_value=self.config)) + CONF.set_override('netapp_replication_aggregate_map', + replication_aggr_map, + group=self.src_backend, enforce_type=True) + + aggr_map = self.dm_mixin._get_replication_aggregate_map( + self.src_backend, 'replication_backend_1') + + self.assertEqual(0, len(aggr_map)) + + @ddt.data([{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}], + [{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}, + {'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}]) + def test_get_replication_aggregate_map_valid(self, replication_aggr_map): + self.mock_object(utils, 'get_backend_configuration', + mock.Mock(return_value=self.config)) + CONF.set_override('netapp_replication_aggregate_map', + replication_aggr_map, group=self.src_backend, + enforce_type=True) + + aggr_map = self.dm_mixin._get_replication_aggregate_map( + self.src_backend, 'replication_backend_1') + + self.assertDictMatch({'aggr1': 'aggr10'}, aggr_map) + + @ddt.data(True, False) + def test_create_snapmirror_dest_flexvol_exists(self, dest_exists): + mock_dest_client = mock.Mock() + self.mock_object(mock_dest_client, 'flexvol_exists', + mock.Mock(return_value=dest_exists)) + self.mock_object(mock_dest_client, 'get_snapmirrors', + mock.Mock(return_value=None)) + create_destination_flexvol = self.mock_object( + self.dm_mixin, 'create_destination_flexvol') + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(return_value=mock_dest_client)) + + self.dm_mixin.create_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + if not dest_exists: + create_destination_flexvol.assert_called_once_with( + self.src_backend, self.dest_backend, self.src_flexvol_name, + self.dest_flexvol_name) + else: + self.assertFalse(create_destination_flexvol.called) + mock_dest_client.create_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, schedule='hourly') + mock_dest_client.initialize_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + @ddt.data('uninitialized', 'broken-off', 'snapmirrored') + def test_create_snapmirror_snapmirror_exists_state(self, mirror_state): + mock_dest_client = mock.Mock() + existing_snapmirrors = [{'mirror-state': mirror_state}] + self.mock_object(self.dm_mixin, 'create_destination_flexvol') + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(return_value=mock_dest_client)) + self.mock_object(mock_dest_client, 'flexvol_exists', + mock.Mock(return_value=True)) + self.mock_object(mock_dest_client, 'get_snapmirrors', + mock.Mock(return_value=existing_snapmirrors)) + + self.dm_mixin.create_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.assertFalse(mock_dest_client.create_snapmirror.called) + self.assertFalse(mock_dest_client.initialize_snapmirror.called) + self.assertFalse(self.dm_mixin.create_destination_flexvol.called) + if mirror_state == 'snapmirrored': + self.assertFalse(mock_dest_client.resume_snapmirror.called) + self.assertFalse(mock_dest_client.resync_snapmirror.called) + else: + mock_dest_client.resume_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + mock_dest_client.resume_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + + @ddt.data('resume_snapmirror', 'resync_snapmirror') + def test_create_snapmirror_snapmirror_exists_repair_exception(self, + failed_call): + mock_dest_client = mock.Mock() + mock_exception_log = self.mock_object(data_motion.LOG, 'exception') + existing_snapmirrors = [{'mirror-state': 'broken-off'}] + self.mock_object(self.dm_mixin, 'create_destination_flexvol') + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(return_value=mock_dest_client)) + self.mock_object(mock_dest_client, 'flexvol_exists', + mock.Mock(return_value=True)) + self.mock_object(mock_dest_client, 'get_snapmirrors', + mock.Mock(return_value=existing_snapmirrors)) + self.mock_object(mock_dest_client, failed_call, + mock.Mock(side_effect=netapp_api.NaApiError)) + + self.dm_mixin.create_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.assertFalse(mock_dest_client.create_snapmirror.called) + self.assertFalse(mock_dest_client.initialize_snapmirror.called) + self.assertFalse(self.dm_mixin.create_destination_flexvol.called) + mock_dest_client.resume_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + mock_dest_client.resume_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + self.assertEqual(1, mock_exception_log.call_count) + + def test_delete_snapmirror(self): + mock_src_client = mock.Mock() + mock_dest_client = mock.Mock() + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[mock_dest_client, + mock_src_client])) + + self.dm_mixin.delete_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + mock_dest_client.delete_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + mock_src_client.release_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + def test_delete_snapmirror_does_not_exist(self): + """Ensure delete succeeds when the snapmirror does not exist.""" + mock_src_client = mock.Mock() + mock_dest_client = mock.Mock() + mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError( + code=netapp_api.EAPIERROR) + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[mock_dest_client, + mock_src_client])) + + self.dm_mixin.delete_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + mock_dest_client.delete_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + mock_src_client.release_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + def test_delete_snapmirror_error_deleting(self): + """Ensure delete succeeds when the snapmirror does not exist.""" + mock_src_client = mock.Mock() + mock_dest_client = mock.Mock() + mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError( + code=netapp_api.ESOURCE_IS_DIFFERENT + ) + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[mock_dest_client, + mock_src_client])) + + self.dm_mixin.delete_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + mock_dest_client.delete_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + mock_src_client.release_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + def test_delete_snapmirror_error_releasing(self): + """Ensure delete succeeds when the snapmirror does not exist.""" + mock_src_client = mock.Mock() + mock_dest_client = mock.Mock() + mock_src_client.release_snapmirror.side_effect = ( + netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)) + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[mock_dest_client, + mock_src_client])) + + self.dm_mixin.delete_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + mock_dest_client.delete_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + mock_src_client.release_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + def test_delete_snapmirror_without_release(self): + mock_src_client = mock.Mock() + mock_dest_client = mock.Mock() + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[mock_dest_client, + mock_src_client])) + + self.dm_mixin.delete_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name, + release=False) + + mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + mock_dest_client.delete_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + self.assertFalse(mock_src_client.release_snapmirror.called) + + def test_delete_snapmirror_source_unreachable(self): + mock_src_client = mock.Mock() + mock_dest_client = mock.Mock() + self.mock_object(utils, 'get_client_for_backend', + mock.Mock(side_effect=[mock_dest_client, + Exception])) + + self.dm_mixin.delete_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + mock_dest_client.delete_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + self.assertFalse(mock_src_client.release_snapmirror.called) + + def test_quiesce_then_abort_timeout(self): + self.mock_object(time, 'sleep') + mock_get_snapmirrors = mock.Mock( + return_value=[{'relationship-status': 'transferring'}]) + self.mock_object(self.mock_dest_client, 'get_snapmirrors', + mock_get_snapmirrors) + + self.dm_mixin.quiesce_then_abort(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.mock_dest_client.get_snapmirrors.assert_called_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name, + desired_attributes=['relationship-status', 'mirror-state']) + self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) + self.mock_dest_client.quiesce_snapmirror.assert_called_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + self.mock_dest_client.abort_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name, clear_checkpoint=False) + + def test_update_snapmirror(self): + self.mock_object(self.mock_dest_client, 'get_snapmirrors') + + self.dm_mixin.update_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.mock_dest_client.update_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + + def test_quiesce_then_abort_wait_for_quiesced(self): + self.mock_object(time, 'sleep') + self.mock_object(self.mock_dest_client, 'get_snapmirrors', + mock.Mock(side_effect=[ + [{'relationship-status': 'transferring'}], + [{'relationship-status': 'quiesced'}]])) + + self.dm_mixin.quiesce_then_abort(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.mock_dest_client.get_snapmirrors.assert_called_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name, + desired_attributes=['relationship-status', 'mirror-state']) + self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) + self.mock_dest_client.quiesce_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + + def test_break_snapmirror(self): + self.mock_object(self.dm_mixin, 'quiesce_then_abort') + + self.dm_mixin.break_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.dm_mixin.quiesce_then_abort.assert_called_once_with( + self.src_backend, self.dest_backend, + self.src_flexvol_name, self.dest_flexvol_name) + self.mock_dest_client.break_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + self.mock_dest_client.mount_flexvol.assert_called_once_with( + self.dest_flexvol_name) + + def test_break_snapmirror_wait_for_quiesced(self): + self.mock_object(self.dm_mixin, 'quiesce_then_abort') + + self.dm_mixin.break_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.dm_mixin.quiesce_then_abort.assert_called_once_with( + self.src_backend, self.dest_backend, + self.src_flexvol_name, self.dest_flexvol_name,) + self.mock_dest_client.break_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + self.mock_dest_client.mount_flexvol.assert_called_once_with( + self.dest_flexvol_name) + + def test_resync_snapmirror(self): + self.dm_mixin.resync_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.mock_dest_client.resync_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, + self.dest_vserver, self.dest_flexvol_name) + + def test_resume_snapmirror(self): + self.dm_mixin.resume_snapmirror(self.src_backend, + self.dest_backend, + self.src_flexvol_name, + self.dest_flexvol_name) + + self.mock_dest_client.resume_snapmirror.assert_called_once_with( + self.src_vserver, self.src_flexvol_name, self.dest_vserver, + self.dest_flexvol_name) + + @ddt.data({'size': 1, 'aggr_map': {}}, + {'size': 1, 'aggr_map': {'aggr02': 'aggr20'}}, + {'size': None, 'aggr_map': {'aggr01': 'aggr10'}}) + @ddt.unpack + def test_create_destination_flexvol_exception(self, size, aggr_map): + self.mock_object( + self.mock_src_client, 'get_provisioning_options_from_flexvol', + mock.Mock(return_value={'size': size, 'aggregate': 'aggr01'})) + self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', + mock.Mock(return_value=aggr_map)) + mock_client_call = self.mock_object( + self.mock_dest_client, 'create_flexvol') + + self.assertRaises(exception.NetAppDriverException, + self.dm_mixin.create_destination_flexvol, + self.src_backend, self.dest_backend, + self.src_flexvol_name, self.dest_flexvol_name) + if size: + self.dm_mixin._get_replication_aggregate_map.\ + assert_called_once_with(self.src_backend, self.dest_backend) + else: + self.assertFalse( + self.dm_mixin._get_replication_aggregate_map.called) + self.assertFalse(mock_client_call.called) + + def test_create_destination_flexvol(self): + aggr_map = { + fakes.PROVISIONING_OPTS['aggregate']: 'aggr01', + 'aggr20': 'aggr02', + } + provisioning_opts = copy.deepcopy(fakes.PROVISIONING_OPTS) + expected_prov_opts = copy.deepcopy(fakes.PROVISIONING_OPTS) + expected_prov_opts.pop('volume_type', None) + expected_prov_opts.pop('size', None) + expected_prov_opts.pop('aggregate', None) + mock_get_provisioning_opts_call = self.mock_object( + self.mock_src_client, 'get_provisioning_options_from_flexvol', + mock.Mock(return_value=provisioning_opts)) + self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', + mock.Mock(return_value=aggr_map)) + mock_client_call = self.mock_object( + self.mock_dest_client, 'create_flexvol') + + retval = self.dm_mixin.create_destination_flexvol( + self.src_backend, self.dest_backend, + self.src_flexvol_name, self.dest_flexvol_name) + + self.assertIsNone(retval) + mock_get_provisioning_opts_call.assert_called_once_with( + self.src_flexvol_name) + self.dm_mixin._get_replication_aggregate_map.assert_called_once_with( + self.src_backend, self.dest_backend) + mock_client_call.assert_called_once_with( + self.dest_flexvol_name, 'aggr01', fakes.PROVISIONING_OPTS['size'], + volume_type='dp', **expected_prov_opts) + + def test_ensure_snapmirrors(self): + flexvols = ['nvol1', 'nvol2'] + replication_backends = ['fallback1', 'fallback2'] + self.mock_object(self.dm_mixin, 'get_replication_backend_names', + mock.Mock(return_value=replication_backends)) + self.mock_object(self.dm_mixin, 'create_snapmirror') + expected_calls = [ + mock.call(self.src_backend, replication_backends[0], + flexvols[0], flexvols[0]), + mock.call(self.src_backend, replication_backends[0], + flexvols[1], flexvols[1]), + mock.call(self.src_backend, replication_backends[1], + flexvols[0], flexvols[0]), + mock.call(self.src_backend, replication_backends[1], + flexvols[1], flexvols[1]), + ] + + retval = self.dm_mixin.ensure_snapmirrors(self.mock_src_config, + self.src_backend, + flexvols) + + self.assertIsNone(retval) + self.dm_mixin.get_replication_backend_names.assert_called_once_with( + self.mock_src_config) + self.dm_mixin.create_snapmirror.assert_has_calls(expected_calls) + + def test_break_snapmirrors(self): + flexvols = ['nvol1', 'nvol2'] + replication_backends = ['fallback1', 'fallback2'] + side_effects = [None, netapp_api.NaApiError, None, None] + self.mock_object(self.dm_mixin, 'get_replication_backend_names', + mock.Mock(return_value=replication_backends)) + self.mock_object(self.dm_mixin, 'break_snapmirror', + mock.Mock(side_effect=side_effects)) + mock_exc_log = self.mock_object(data_motion.LOG, 'exception') + expected_calls = [ + mock.call(self.src_backend, replication_backends[0], + flexvols[0], flexvols[0]), + mock.call(self.src_backend, replication_backends[0], + flexvols[1], flexvols[1]), + mock.call(self.src_backend, replication_backends[1], + flexvols[0], flexvols[0]), + mock.call(self.src_backend, replication_backends[1], + flexvols[1], flexvols[1]), + ] + + failed_to_break = self.dm_mixin.break_snapmirrors( + self.mock_src_config, self.src_backend, flexvols, 'fallback1') + + self.assertEqual(1, len(failed_to_break)) + self.assertEqual(1, mock_exc_log.call_count) + self.dm_mixin.get_replication_backend_names.assert_called_once_with( + self.mock_src_config) + self.dm_mixin.break_snapmirror.assert_has_calls(expected_calls) + + def test_update_snapmirrors(self): + flexvols = ['nvol1', 'nvol2'] + replication_backends = ['fallback1', 'fallback2'] + self.mock_object(self.dm_mixin, 'get_replication_backend_names', + mock.Mock(return_value=replication_backends)) + side_effects = [None, netapp_api.NaApiError, None, None] + self.mock_object(self.dm_mixin, 'update_snapmirror', + mock.Mock(side_effect=side_effects)) + expected_calls = [ + mock.call(self.src_backend, replication_backends[0], + flexvols[0], flexvols[0]), + mock.call(self.src_backend, replication_backends[0], + flexvols[1], flexvols[1]), + mock.call(self.src_backend, replication_backends[1], + flexvols[0], flexvols[0]), + mock.call(self.src_backend, replication_backends[1], + flexvols[1], flexvols[1]), + ] + + retval = self.dm_mixin.update_snapmirrors(self.mock_src_config, + self.src_backend, + flexvols) + + self.assertIsNone(retval) + self.dm_mixin.get_replication_backend_names.assert_called_once_with( + self.mock_src_config) + self.dm_mixin.update_snapmirror.assert_has_calls(expected_calls) + + @ddt.data([{'destination-volume': 'nvol3', 'lag-time': '3223'}, + {'destination-volume': 'nvol5', 'lag-time': '32'}], + []) + def test__choose_failover_target_no_failover_targets(self, snapmirrors): + flexvols = ['nvol1', 'nvol2'] + replication_backends = ['fallback1', 'fallback2'] + mock_debug_log = self.mock_object(data_motion.LOG, 'debug') + self.mock_object(self.dm_mixin, 'get_snapmirrors', + mock.Mock(return_value=snapmirrors)) + + target = self.dm_mixin._choose_failover_target( + self.src_backend, flexvols, replication_backends) + + self.assertIsNone(target) + self.assertEqual(2, mock_debug_log.call_count) + + def test__choose_failover_target(self): + flexvols = ['nvol1', 'nvol2'] + replication_backends = ['fallback1', 'fallback2'] + target_1_snapmirrors = [ + {'destination-volume': 'nvol3', 'lag-time': '12'}, + {'destination-volume': 'nvol1', 'lag-time': '1541'}, + {'destination-volume': 'nvol2', 'lag-time': '16'}, + ] + target_2_snapmirrors = [ + {'destination-volume': 'nvol2', 'lag-time': '717'}, + {'destination-volume': 'nvol1', 'lag-time': '323'}, + {'destination-volume': 'nvol3', 'lag-time': '720'}, + ] + mock_debug_log = self.mock_object(data_motion.LOG, 'debug') + self.mock_object(self.dm_mixin, 'get_snapmirrors', + mock.Mock(side_effect=[target_1_snapmirrors, + target_2_snapmirrors])) + + target = self.dm_mixin._choose_failover_target( + self.src_backend, flexvols, replication_backends) + + self.assertEqual('fallback2', target) + self.assertFalse(mock_debug_log.called) + + def test__failover_host_no_suitable_target(self): + flexvols = ['nvol1', 'nvol2'] + replication_backends = ['fallback1', 'fallback2'] + self.mock_object(self.dm_mixin, '_choose_failover_target', + mock.Mock(return_value=None)) + self.mock_object(utils, 'get_backend_configuration') + self.mock_object(self.dm_mixin, 'update_snapmirrors') + self.mock_object(self.dm_mixin, 'break_snapmirrors') + + self.assertRaises(exception.NetAppDriverException, + self.dm_mixin._complete_failover, + self.src_backend, replication_backends, flexvols, + [], failover_target=None) + self.assertFalse(utils.get_backend_configuration.called) + self.assertFalse(self.dm_mixin.update_snapmirrors.called) + self.assertFalse(self.dm_mixin.break_snapmirrors.called) + + @ddt.data('fallback1', None) + def test__failover_host(self, failover_target): + flexvols = ['nvol1', 'nvol2', 'nvol3'] + replication_backends = ['fallback1', 'fallback2'] + volumes = [ + {'id': 'xyzzy', 'host': 'openstack@backend1#nvol1'}, + {'id': 'foobar', 'host': 'openstack@backend1#nvol2'}, + {'id': 'waldofred', 'host': 'openstack@backend1#nvol3'}, + ] + expected_volume_updates = [ + { + 'volume_id': 'xyzzy', + 'updates': {'replication_status': 'failed-over'}, + }, + { + 'volume_id': 'foobar', + 'updates': {'replication_status': 'failed-over'}, + }, + { + 'volume_id': 'waldofred', + 'updates': {'replication_status': 'error'}, + }, + ] + expected_active_backend_name = failover_target or 'fallback2' + self.mock_object(self.dm_mixin, '_choose_failover_target', + mock.Mock(return_value='fallback2')) + self.mock_object(utils, 'get_backend_configuration') + self.mock_object(self.dm_mixin, 'update_snapmirrors') + self.mock_object(self.dm_mixin, 'break_snapmirrors', + mock.Mock(return_value=['nvol3'])) + + actual_active_backend_name, actual_volume_updates = ( + self.dm_mixin._complete_failover( + self.src_backend, replication_backends, flexvols, + volumes, failover_target=failover_target) + ) + + self.assertEqual(expected_active_backend_name, + actual_active_backend_name) + self.assertEqual(expected_volume_updates, actual_volume_updates) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py new file mode 100644 index 000000000..d903859f8 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py @@ -0,0 +1,103 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import ddt +import mock +from oslo_config import cfg + +from cinder import exception +from cinder import test +from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes +from cinder.volume.drivers.netapp.dataontap.client import client_cmode +from cinder.volume.drivers.netapp.dataontap.utils import utils + +CONF = cfg.CONF + + +@ddt.ddt +class NetAppCDOTDataMotionTestCase(test.TestCase): + + def setUp(self): + super(NetAppCDOTDataMotionTestCase, self).setUp() + self.backend = 'backend1' + self.mock_cmode_client = self.mock_object(client_cmode, 'Client') + self.config = fakes.get_fake_cmode_config(self.backend) + CONF.set_override('volume_backend_name', self.backend, + group=self.backend, enforce_type=True) + CONF.set_override('netapp_transport_type', 'https', + group=self.backend, enforce_type=True) + CONF.set_override('netapp_login', 'fake_user', + group=self.backend, enforce_type=True) + CONF.set_override('netapp_password', 'fake_password', + group=self.backend, enforce_type=True) + CONF.set_override('netapp_server_hostname', 'fake_hostname', + group=self.backend, enforce_type=True) + CONF.set_override('netapp_server_port', 8866, + group=self.backend, enforce_type=True) + + def test_get_backend_configuration(self): + self.mock_object(utils, 'CONF') + CONF.set_override('netapp_vserver', 'fake_vserver', + group=self.backend, enforce_type=True) + utils.CONF.list_all_sections.return_value = [self.backend] + + config = utils.get_backend_configuration(self.backend) + + self.assertEqual('fake_vserver', config.netapp_vserver) + + def test_get_backend_configuration_different_backend_name(self): + self.mock_object(utils, 'CONF') + CONF.set_override('netapp_vserver', 'fake_vserver', + group=self.backend, enforce_type=True) + CONF.set_override('volume_backend_name', 'fake_backend_name', + group=self.backend, enforce_type=True) + utils.CONF.list_all_sections.return_value = [self.backend] + + config = utils.get_backend_configuration(self.backend) + + self.assertEqual('fake_vserver', config.netapp_vserver) + self.assertEqual('fake_backend_name', config.volume_backend_name) + + @ddt.data([], ['fake_backend1', 'fake_backend2']) + def test_get_backend_configuration_not_configured(self, conf_sections): + self.mock_object(utils, 'CONF') + utils.CONF.list_all_sections.return_value = conf_sections + + self.assertRaises(exception.ConfigNotFound, + utils.get_backend_configuration, + self.backend) + + def test_get_client_for_backend(self): + self.mock_object(utils, 'get_backend_configuration', + mock.Mock(return_value=self.config)) + + utils.get_client_for_backend(self.backend) + + self.mock_cmode_client.assert_called_once_with( + hostname='fake_hostname', password='fake_password', + username='fake_user', transport_type='https', port=8866, + trace=mock.ANY, vserver=None) + + def test_get_client_for_backend_with_vserver(self): + self.mock_object(utils, 'get_backend_configuration', + mock.Mock(return_value=self.config)) + + CONF.set_override('netapp_vserver', 'fake_vserver', + group=self.backend, enforce_type=True) + + utils.get_client_for_backend(self.backend) + + self.mock_cmode_client.assert_called_once_with( + hostname='fake_hostname', password='fake_password', + username='fake_user', transport_type='https', port=8866, + trace=mock.ANY, vserver='fake_vserver') diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py index 871c14dd0..4e940a0a7 100644 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py +++ b/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py @@ -165,7 +165,7 @@ class NetAppEseriesClientDriverTestCase(test.TestCase): groups = copy.deepcopy(eseries_fake.HOST_GROUPS) group = groups[0] self.mock_object(self.my_client, 'list_host_groups', - new_attr=mock.Mock(return_value=groups)) + return_value=groups) result = self.my_client.get_host_group_by_name(group['label']) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py index 02299be3f..6530621fc 100644 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py +++ b/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py @@ -71,14 +71,12 @@ class NetAppEseriesLibraryTestCase(test.TestCase): self.library = library.NetAppESeriesLibrary('FAKE', **kwargs) # We don't want the looping calls to run - self.mock_object(self.library, '_start_periodic_tasks', - new_attr=mock.Mock()) + self.mock_object(self.library, '_start_periodic_tasks') # Deprecated Option self.library.configuration.netapp_storage_pools = None self.library._client = eseries_fake.FakeEseriesClient() - self.mock_object(self.library, '_start_periodic_tasks', - new_attr=mock.Mock()) + self.mock_object(self.library, '_start_periodic_tasks') self.mock_object(library.cinder_utils, 'synchronized', mock.Mock(return_value=lambda f: f)) @@ -193,7 +191,7 @@ class NetAppEseriesLibraryTestCase(test.TestCase): def test_check_storage_system(self): system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) self.mock_object(self.library._client, 'list_storage_system', - new_attr=mock.Mock(return_value=system)) + return_value=system) update_password = self.mock_object(self.library._client, 'update_stored_system_password') info_log = self.mock_object(library.LOG, 'info', mock.Mock()) @@ -209,10 +207,9 @@ class NetAppEseriesLibraryTestCase(test.TestCase): cinder_utils.ZeroIntervalLoopingCall) def test_check_storage_system_bad_status(self, system): self.mock_object(self.library._client, 'list_storage_system', - new_attr=mock.Mock(return_value=system)) + return_value=system) self.mock_object(self.library._client, 'update_stored_system_password') - self.mock_object(time, 'time', new_attr = mock.Mock( - side_effect=range(0, 60, 5))) + self.mock_object(time, 'time', side_effect=range(0, 60, 5)) self.assertRaisesRegexp(exception.NetAppDriverException, 'bad.*?status', @@ -234,10 +231,9 @@ class NetAppEseriesLibraryTestCase(test.TestCase): yield system self.mock_object(self.library._client, 'list_storage_system', - new_attr=mock.Mock(side_effect=get_system_iter())) + side_effect=get_system_iter()) update_password = self.mock_object(self.library._client, - 'update_stored_system_password', - new_attr=mock.Mock()) + 'update_stored_system_password') info_log = self.mock_object(library.LOG, 'info', mock.Mock()) self.library._check_storage_system() @@ -495,8 +491,9 @@ class NetAppEseriesLibraryTestCase(test.TestCase): """Validate pool capacity calculations""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[ - "volumeGroupRef"]: {self.library.THIN_UQ_SPEC: True}}) + self.mock_object(self.library, '_ssc_stats', + {fake_pool["volumeGroupRef"]: { + self.library.THIN_UQ_SPEC: True}}) self.library.configuration = mock.Mock() reserved_pct = 5 over_subscription_ratio = 1.0 @@ -524,8 +521,9 @@ class NetAppEseriesLibraryTestCase(test.TestCase): """Validate that thin provisioning support is correctly reported""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[ - "volumeGroupRef"]: {self.library.THIN_UQ_SPEC: thin_provisioning}}) + self.mock_object(self.library, '_ssc_stats', + {fake_pool["volumeGroupRef"]: { + self.library.THIN_UQ_SPEC: thin_provisioning}}) self.library._update_volume_stats() @@ -541,8 +539,8 @@ class NetAppEseriesLibraryTestCase(test.TestCase): ssc = {self.library.THIN_UQ_SPEC: True, 'key': 'val'} fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[ - "volumeGroupRef"]: ssc}) + self.mock_object(self.library, '_ssc_stats', + {fake_pool["volumeGroupRef"]: ssc}) self.library._update_volume_stats() @@ -1377,8 +1375,7 @@ class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase): self.mock_object(library.cinder_utils, 'synchronized', mock.Mock(return_value=lambda f: f)) - self.mock_object(self.library, '_start_periodic_tasks', - new_attr=mock.Mock()) + self.mock_object(self.library, '_start_periodic_tasks') self.ctxt = context.get_admin_context() diff --git a/cinder/tests/unit/volume/drivers/netapp/test_common.py b/cinder/tests/unit/volume/drivers/netapp/test_common.py index 34caf4162..fe50b1a83 100644 --- a/cinder/tests/unit/volume/drivers/netapp/test_common.py +++ b/cinder/tests/unit/volume/drivers/netapp/test_common.py @@ -85,8 +85,11 @@ class NetAppDriverFactoryTestCase(test.TestCase): def get_full_class_name(obj): return obj.__module__ + '.' + obj.__class__.__name__ - kwargs = {'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info'} + kwargs = { + 'configuration': na_fakes.create_configuration(), + 'app_version': 'fake_info', + 'host': 'fakehost@fakebackend', + } registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY @@ -98,8 +101,11 @@ class NetAppDriverFactoryTestCase(test.TestCase): def test_create_driver_case_insensitive(self): - kwargs = {'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info'} + kwargs = { + 'configuration': na_fakes.create_configuration(), + 'app_version': 'fake_info', + 'host': 'fakehost@fakebackend', + } driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC', **kwargs) @@ -108,8 +114,11 @@ class NetAppDriverFactoryTestCase(test.TestCase): def test_create_driver_invalid_family(self): - kwargs = {'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info'} + kwargs = { + 'configuration': na_fakes.create_configuration(), + 'app_version': 'fake_info', + 'host': 'fakehost@fakebackend', + } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver.create_driver, @@ -117,8 +126,11 @@ class NetAppDriverFactoryTestCase(test.TestCase): def test_create_driver_invalid_protocol(self): - kwargs = {'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info'} + kwargs = { + 'configuration': na_fakes.create_configuration(), + 'app_version': 'fake_info', + 'host': 'fakehost@fakebackend', + } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver.create_driver, diff --git a/cinder/tests/unit/volume/drivers/nexenta/__init__.py b/cinder/tests/unit/volume/drivers/nexenta/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_nexenta.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py similarity index 97% rename from cinder/tests/unit/test_nexenta.py rename to cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py index 6036349c2..3063fd5db 100644 --- a/cinder/tests/unit/test_nexenta.py +++ b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py @@ -89,8 +89,8 @@ class TestNexentaISCSIDriver(test.TestCase): for mod in ['volume', 'zvol', 'iscsitarget', 'appliance', 'stmf', 'scsidisk', 'snapshot']: setattr(self.nms_mock, mod, mock.Mock()) - self.stubs.Set(jsonrpc, 'NexentaJSONProxy', - lambda *_, **__: self.nms_mock) + self.mock_object(jsonrpc, 'NexentaJSONProxy', + return_value=self.nms_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db @@ -393,8 +393,8 @@ class TestNexentaNfsDriver(test.TestCase): 'snapshot', 'netsvc'): setattr(self.nms_mock, mod, mock.Mock()) self.nms_mock.__hash__ = lambda *_, **__: 1 - self.stubs.Set(jsonrpc, 'NexentaJSONProxy', - lambda *_, **__: self.nms_mock) + self.mock_object(jsonrpc, 'NexentaJSONProxy', + return_value=self.nms_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.shares = {} self.drv.share2nms = {} @@ -415,6 +415,9 @@ class TestNexentaNfsDriver(test.TestCase): 'recursive': 'true', 'anonymous_rw': 'true', } + self.nms_mock.netstorsvc.get_shared_folders.return_value = '' + self.nms_mock.folder.get_child_props.return_value = { + 'available': 1, 'used': 1} self.drv.check_for_setup_error() self.nms_mock.netstorsvc.share_folder.assert_called_with( 'svc:/network/nfs/server:default', 'stack/share', share_opts) @@ -452,6 +455,8 @@ class TestNexentaNfsDriver(test.TestCase): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} compression = self.cfg.nexenta_dataset_compression + self.nms_mock.folder.get_child_props.return_value = { + 'available': 1, 'used': 1} self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.netsvc.get_confopts('svc:/network/nfs/server:default', 'configure').AndReturn({ @@ -471,6 +476,8 @@ class TestNexentaNfsDriver(test.TestCase): mock_truncate = self.nms_mock.appliance.execute mock_truncate.side_effect = exception.NexentaException() self.nms_mock.server.get_prop.return_value = '/volumes' + self.nms_mock.folder.get_child_props.return_value = { + 'available': 1, 'used': 1} self.assertRaises(exception.NexentaException, self.drv._do_create_volume, volume) @@ -610,7 +617,8 @@ class TestNexentaNfsDriver(test.TestCase): self.drv._execute = lambda *_, **__: 0 self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.folder.get_child_props.return_value = None + self.nms_mock.folder.get_child_props.return_value = { + 'available': 1, 'used': 1} self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', diff --git a/cinder/tests/unit/test_nexenta5_iscsi.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py similarity index 99% rename from cinder/tests/unit/test_nexenta5_iscsi.py rename to cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py index f5af40d0a..b0e7b627d 100644 --- a/cinder/tests/unit/test_nexenta5_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py @@ -86,8 +86,8 @@ class TestNexentaISCSIDriver(test.TestCase): self.cfg.nexenta_volume = 'pool' self.cfg.nexenta_volume_group = 'dsg' self.nef_mock = mock.Mock() - self.stubs.Set(jsonrpc, 'NexentaJSONProxy', - lambda *_, **__: self.nef_mock) + self.mock_object(jsonrpc, 'NexentaJSONProxy', + return_value=self.nef_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db diff --git a/cinder/tests/unit/test_nexenta5_nfs.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py similarity index 98% rename from cinder/tests/unit/test_nexenta5_nfs.py rename to cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py index c7efcc6ad..9c3bd2b19 100644 --- a/cinder/tests/unit/test_nexenta5_nfs.py +++ b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py @@ -79,8 +79,8 @@ class TestNexentaNfsDriver(test.TestCase): self.cfg.nas_host = '1.1.1.1' self.cfg.nas_share_path = 'pool/share' self.nef_mock = mock.Mock() - self.stubs.Set(jsonrpc, 'NexentaJSONProxy', - lambda *_, **__: self.nef_mock) + self.mock_object(jsonrpc, 'NexentaJSONProxy', + return_value=self.nef_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) diff --git a/cinder/tests/unit/test_nexenta_edge.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta_edge.py similarity index 100% rename from cinder/tests/unit/test_nexenta_edge.py rename to cinder/tests/unit/volume/drivers/nexenta/test_nexenta_edge.py diff --git a/cinder/tests/unit/test_block_device.py b/cinder/tests/unit/volume/drivers/test_block_device.py similarity index 97% rename from cinder/tests/unit/test_block_device.py rename to cinder/tests/unit/volume/drivers/test_block_device.py index 22aac47cb..c501c9fbe 100644 --- a/cinder/tests/unit/test_block_device.py +++ b/cinder/tests/unit/volume/drivers/test_block_device.py @@ -153,16 +153,18 @@ class TestBlockDeviceDriver(cinder.test.TestCase): gud_mocked: self.drv._update_volume_stats() - self.assertEqual({'total_capacity_gb': 2, - 'free_capacity_gb': 2, - 'reserved_percentage': - self.configuration.reserved_percentage, - 'QoS_support': False, - 'vendor_name': "Open Source", - 'driver_version': self.drv.VERSION, - 'storage_protocol': 'unknown', - 'volume_backend_name': 'BlockDeviceDriver', - }, self.drv._stats) + reserved_percentage = self.configuration.reserved_percentage + self.assertEqual({ + 'vendor_name': "Open Source", + 'driver_version': self.drv.VERSION, + 'volume_backend_name': 'BlockDev', + 'storage_protocol': 'unknown', + 'pools': [{ + 'QoS_support': False, + 'total_capacity_gb': 2, + 'free_capacity_gb': 2, + 'reserved_percentage': reserved_percentage, + 'pool_name': 'BlockDev'}]}, self.drv._stats) gud_mocked.assert_called_once_with() ds_mocked.assert_called_once_with() diff --git a/cinder/tests/unit/test_blockbridge.py b/cinder/tests/unit/volume/drivers/test_blockbridge.py similarity index 100% rename from cinder/tests/unit/test_blockbridge.py rename to cinder/tests/unit/volume/drivers/test_blockbridge.py diff --git a/cinder/tests/unit/test_cloudbyte.py b/cinder/tests/unit/volume/drivers/test_cloudbyte.py similarity index 100% rename from cinder/tests/unit/test_cloudbyte.py rename to cinder/tests/unit/volume/drivers/test_cloudbyte.py diff --git a/cinder/tests/unit/test_coho.py b/cinder/tests/unit/volume/drivers/test_coho.py similarity index 78% rename from cinder/tests/unit/test_coho.py rename to cinder/tests/unit/volume/drivers/test_coho.py index c0e8d3a28..62f47d281 100644 --- a/cinder/tests/unit/test_coho.py +++ b/cinder/tests/unit/volume/drivers/test_coho.py @@ -22,11 +22,15 @@ import six import socket import xdrlib +from cinder import context from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers import coho from cinder.volume.drivers import nfs +from cinder.volume.drivers import remotefs +from cinder.volume import qos_specs +from cinder.volume import volume_types ADDR = 'coho-datastream-addr' PATH = '/test/path' @@ -38,10 +42,10 @@ VOLUME = { 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128, 'volume_type': 'silver', - 'volume_type_id': 'test', + 'volume_type_id': 'type-id', 'metadata': [{'key': 'type', 'service_label': 'silver'}], - 'provider_location': None, + 'provider_location': 'coho-datastream-addr:/test/path', 'id': 'bcc48c61-9691-4e5f-897c-793686093190', 'status': 'available', } @@ -60,6 +64,31 @@ SNAPSHOT = { 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191', } +VOLUME_TYPE = { + 'name': 'sf-1', + 'qos_specs_id': 'qos-spec-id', + 'deleted': False, + 'created_at': '2016-06-06 04:58:11', + 'updated_at': None, + 'extra_specs': {}, + 'deleted_at': None, + 'id': 'type-id' +} + +QOS_SPEC = { + 'id': 'qos-spec-id', + 'specs': { + 'maxIOPS': '2000', + 'maxMBS': '500' + } +} + +QOS = { + 'uuid': 'qos-spec-id', + 'maxIOPS': 2000, + 'maxMBS': 500 +} + INVALID_SNAPSHOT = SNAPSHOT.copy() INVALID_SNAPSHOT['name'] = '' @@ -129,6 +158,34 @@ class CohoDriverTest(test.TestCase): self.assertTrue(coho.LOG.warning.called) self.assertTrue(nfs.NfsDriver.do_setup.called) + def test_create_volume_with_qos(self): + drv = coho.CohoDriver(configuration=self.configuration) + + mock_remotefs_create = self.mock_object(remotefs.RemoteFSDriver, + 'create_volume') + mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') + mock_get_volume_type = self.mock_object(volume_types, + 'get_volume_type') + mock_get_volume_type.return_value = VOLUME_TYPE + mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') + mock_get_qos_specs.return_value = QOS_SPEC + mock_get_admin_context = self.mock_object(context, 'get_admin_context') + mock_get_admin_context.return_value = 'test' + + drv.create_volume(VOLUME) + + self.assertTrue(mock_remotefs_create.called) + self.assertTrue(mock_get_admin_context.called) + mock_remotefs_create.assert_has_calls([mock.call(VOLUME)]) + mock_get_volume_type.assert_has_calls( + [mock.call('test', VOLUME_TYPE['id'])]) + mock_get_qos_specs.assert_has_calls( + [mock.call('test', QOS_SPEC['id'])]) + mock_rpc_client.assert_has_calls( + [mock.call(ADDR, self.configuration.coho_rpc_port), + mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), + QOS)]) + def test_create_snapshot(self): drv = coho.CohoDriver(configuration=self.configuration) @@ -169,24 +226,46 @@ class CohoDriverTest(test.TestCase): mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') mock_find_share = self.mock_object(drv, '_find_share') mock_find_share.return_value = ADDR + ':' + PATH + mock_get_volume_type = self.mock_object(volume_types, + 'get_volume_type') + mock_get_volume_type.return_value = VOLUME_TYPE + mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') + mock_get_qos_specs.return_value = QOS_SPEC + mock_get_admin_context = self.mock_object(context, 'get_admin_context') + mock_get_admin_context.return_value = 'test' drv.create_volume_from_snapshot(VOLUME, SNAPSHOT) mock_find_share.assert_has_calls( [mock.call(VOLUME['size'])]) + self.assertTrue(mock_get_admin_context.called) + mock_get_volume_type.assert_has_calls( + [mock.call('test', VOLUME_TYPE['id'])]) + mock_get_qos_specs.assert_has_calls( + [mock.call('test', QOS_SPEC['id'])]) mock_rpc_client.assert_has_calls( [mock.call(ADDR, self.configuration.coho_rpc_port), mock.call().create_volume_from_snapshot( - SNAPSHOT['name'], os.path.join(PATH, VOLUME['name']))]) + SNAPSHOT['name'], os.path.join(PATH, VOLUME['name'])), + mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), + QOS)]) def test_create_cloned_volume(self): drv = coho.CohoDriver(configuration=self.configuration) + mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') mock_find_share = self.mock_object(drv, '_find_share') mock_find_share.return_value = ADDR + ':' + PATH mock_execute = self.mock_object(drv, '_execute') mock_local_path = self.mock_object(drv, 'local_path') mock_local_path.return_value = LOCAL_PATH + mock_get_volume_type = self.mock_object(volume_types, + 'get_volume_type') + mock_get_volume_type.return_value = VOLUME_TYPE + mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') + mock_get_qos_specs.return_value = QOS_SPEC + mock_get_admin_context = self.mock_object(context, 'get_admin_context') + mock_get_admin_context.return_value = 'test' drv.create_cloned_volume(VOLUME, CLONE_VOL) @@ -196,6 +275,36 @@ class CohoDriverTest(test.TestCase): [mock.call(VOLUME), mock.call(CLONE_VOL)]) mock_execute.assert_has_calls( [mock.call('cp', LOCAL_PATH, LOCAL_PATH, run_as_root=True)]) + self.assertTrue(mock_get_admin_context.called) + mock_get_volume_type.assert_has_calls( + [mock.call('test', VOLUME_TYPE['id'])]) + mock_get_qos_specs.assert_has_calls( + [mock.call('test', QOS_SPEC['id'])]) + mock_rpc_client.assert_has_calls( + [mock.call(ADDR, self.configuration.coho_rpc_port), + mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), + QOS)]) + + def test_retype(self): + drv = coho.CohoDriver(configuration=self.configuration) + + mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') + mock_get_volume_type = self.mock_object(volume_types, + 'get_volume_type') + mock_get_volume_type.return_value = VOLUME_TYPE + mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') + mock_get_qos_specs.return_value = QOS_SPEC + + drv.retype('test', VOLUME, VOLUME_TYPE, None, None) + + mock_get_volume_type.assert_has_calls( + [mock.call('test', VOLUME_TYPE['id'])]) + mock_get_qos_specs.assert_has_calls( + [mock.call('test', QOS_SPEC['id'])]) + mock_rpc_client.assert_has_calls( + [mock.call(ADDR, self.configuration.coho_rpc_port), + mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), + QOS)]) def test_extend_volume(self): drv = coho.CohoDriver(configuration=self.configuration) diff --git a/cinder/tests/unit/volume/drivers/test_datera.py b/cinder/tests/unit/volume/drivers/test_datera.py index 0707f035d..096848c05 100644 --- a/cinder/tests/unit/volume/drivers/test_datera.py +++ b/cinder/tests/unit/volume/drivers/test_datera.py @@ -23,8 +23,10 @@ from cinder.volume.drivers import datera from cinder.volume import volume_types -DEFAULT_STORAGE_NAME = datera.DEFAULT_STORAGE_NAME -DEFAULT_VOLUME_NAME = datera.DEFAULT_VOLUME_NAME +datera.DEFAULT_SI_SLEEP = 0 +URL_TEMPLATES = datera.URL_TEMPLATES +OS_PREFIX = datera.OS_PREFIX +UNMANAGE_PREFIX = datera.UNMANAGE_PREFIX class DateraVolumeTestCase(test.TestCase): @@ -52,6 +54,7 @@ class DateraVolumeTestCase(test.TestCase): self.driver = datera.DateraDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() + self.driver.configuration.get = _config_getter self.volume = _stub_volume() self.api_patcher = mock.patch('cinder.volume.drivers.datera.' 'DateraDriver._issue_api_request') @@ -74,9 +77,8 @@ class DateraVolumeTestCase(test.TestCase): def _progress_api_return(mock_api): if mock_api.retry_count == 1: _bad_vol_ai = stub_single_ai.copy() - _bad_vol_ai['storage_instances'][ - DEFAULT_STORAGE_NAME]['volumes'][DEFAULT_VOLUME_NAME][ - 'op_status'] = 'unavailable' + _bad_vol_ai['storage_instances']['storage-1'][ + 'volumes']['volume-1']['op_status'] = 'unavailable' return _bad_vol_ai else: self.mock_api.retry_count += 1 @@ -156,12 +158,11 @@ class DateraVolumeTestCase(test.TestCase): self.driver.delete_volume, self.volume) def test_ensure_export_success(self): - with mock.patch('time.sleep'): - self.mock_api.side_effect = self._generate_fake_api_request() - ctxt = context.get_admin_context() - self.assertIsNone(self.driver.ensure_export(ctxt, - self.volume, - None)) + self.mock_api.side_effect = self._generate_fake_api_request() + ctxt = context.get_admin_context() + self.assertIsNone(self.driver.ensure_export(ctxt, + self.volume, + None)) def test_ensure_export_fails(self): self.mock_api.side_effect = exception.DateraAPIException @@ -170,23 +171,21 @@ class DateraVolumeTestCase(test.TestCase): self.driver.ensure_export, ctxt, self.volume, None) def test_create_export_target_does_not_exist_success(self): - with mock.patch('time.sleep'): - self.mock_api.side_effect = self._generate_fake_api_request( - targets_exist=False) - ctxt = context.get_admin_context() - self.assertIsNone(self.driver.create_export(ctxt, - self.volume, - None)) + self.mock_api.side_effect = self._generate_fake_api_request( + targets_exist=False) + ctxt = context.get_admin_context() + self.assertIsNone(self.driver.create_export(ctxt, + self.volume, + None)) def test_create_export_fails(self): - with mock.patch('time.sleep'): - self.mock_api.side_effect = exception.DateraAPIException - ctxt = context.get_admin_context() - self.assertRaises(exception.DateraAPIException, - self.driver.create_export, - ctxt, - self.volume, - None) + self.mock_api.side_effect = exception.DateraAPIException + ctxt = context.get_admin_context() + self.assertRaises(exception.DateraAPIException, + self.driver.create_export, + ctxt, + self.volume, + None) def test_initialize_connection_success(self): self.mock_api.side_effect = self._generate_fake_api_request() @@ -259,7 +258,7 @@ class DateraVolumeTestCase(test.TestCase): def test_delete_snapshot_not_found(self): self.mock_api.side_effect = [stub_return_snapshots, exception.NotFound] - snapshot = _stub_snapshot(self.volume['id']) + snapshot = _stub_snapshot(self.volume['id'], volume_id="test") self.assertIsNone(self.driver.delete_snapshot(snapshot)) def test_delete_snapshot_fails(self): @@ -303,6 +302,73 @@ class DateraVolumeTestCase(test.TestCase): self.assertRaises(exception.NotAuthorized, self.driver._login) self.assertEqual(1, self.mock_api.call_count) + def test_manage_existing(self): + TEST_NAME = {"source-name": "test-app:test-si:test-vol"} + self.mock_api.return_value = {} + self.assertIsNone( + self.driver.manage_existing( + _stub_volume(), + TEST_NAME)) + self.mock_api.assert_called_once_with( + URL_TEMPLATES['ai_inst']().format( + TEST_NAME["source-name"].split(":")[0]), + method='put', + body={'name': OS_PREFIX + _stub_volume()['id']}) + + def test_manage_existing_wrong_ref(self): + TEST_NAME = {"source-name": "incorrect-reference"} + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing, + _stub_volume(), + TEST_NAME) + + def test_manage_existing_get_size(self): + TEST_NAME = {"source-name": "test-app:storage-1:volume-1"} + self.mock_api.side_effect = self._generate_fake_api_request() + self.assertEqual( + self.driver.manage_existing_get_size( + _stub_volume(), + TEST_NAME), 500) + self.mock_api.assert_called_once_with( + URL_TEMPLATES['ai_inst']().format( + TEST_NAME["source-name"].split(":")[0])) + + def test_manage_existing_get_size_wrong_ref(self): + TEST_NAME = {"source-name": "incorrect-reference"} + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + _stub_volume(), + TEST_NAME) + + def test_get_manageable_volumes(self): + self.mock_api.return_value = non_cinder_ais + self.assertEqual( + self.driver.get_manageable_volumes( + {}, "", 10, 0, "", ""), + [{'cinder_id': None, + 'extra_info': None, + 'reason_not_safe': None, + 'reference': {"source-name": 'test-app-inst:storage-1:volume-1'}, + 'safe_to_manage': True, + 'size': 50}, + {'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba', + 'extra_info': None, + 'reason_not_safe': None, + 'reference': None, + 'safe_to_manage': False, + 'size': None}]) + + def test_unmanage(self): + self.mock_api.return_value = {} + self.assertIsNone(self.driver.unmanage(_stub_volume())) + self.mock_api.assert_called_once_with( + URL_TEMPLATES['ai_inst']().format( + OS_PREFIX + _stub_volume()['id']), + method='put', + body={'name': UNMANAGE_PREFIX + _stub_volume()['id']}) + def _generate_fake_api_request(self, targets_exist=True): def _fake_api_request(resource_type, method='get', resource=None, body=None, action=None, sensitive=False): @@ -318,6 +384,8 @@ class DateraVolumeTestCase(test.TestCase): return stub_acl elif resource_type == 'ig_group': return stub_ig + else: + return list(stub_app_instance.values())[0] return _fake_api_request stub_acl = { @@ -422,6 +490,7 @@ stub_app_instance = { }, "creation_type": "user", "descr": "c20aba21-6ef6-446b-b374-45733b4883ba__ST__storage-1", + "op_state": "available", "name": "storage-1", "path": "/app_instances/c20aba21-6ef6-446b-b374-" "45733b4883ba/storage_instances/storage-1", @@ -481,6 +550,148 @@ stub_return_snapshots = \ } +non_cinder_ais = { + "75bc1c69-a399-4acb-aade-3514caf13c5e": { + "admin_state": "online", + "create_mode": "normal", + "descr": "", + "health": "ok", + "id": "75bc1c69-a399-4acb-aade-3514caf13c5e", + "name": "test-app-inst", + "path": "/app_instances/75bc1c69-a399-4acb-aade-3514caf13c5e", + "snapshot_policies": {}, + "snapshots": {}, + "storage_instances": { + "storage-1": { + "access": { + "ips": [ + "172.28.41.93" + ], + "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" + "29036682e2d37b98", + "path": "/app_instances/75bc1c69-a399-4acb-aade-" + "3514caf13c5e/storage_instances/storage-1/access" + }, + "access_control_mode": "deny_all", + "acl_policy": { + "initiator_groups": [], + "initiators": [], + "path": "/app_instances/75bc1c69-a399-4acb-aade-" + "3514caf13c5e/storage_instances/storage-" + "1/acl_policy" + }, + "active_initiators": [], + "active_storage_nodes": [ + "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" + ], + "admin_state": "online", + "auth": { + "initiator_pswd": "(hidden)", + "initiator_user_name": "", + "path": "/app_instances/75bc1c69-a399-4acb-aade-" + "3514caf13c5e/storage_instances/storage-1/auth", + "target_pswd": "(hidden)", + "target_user_name": "", + "type": "none" + }, + "creation_type": "user", + "ip_pool": "/access_network_ip_pools/default", + "name": "storage-1", + "op_state": "available", + "path": "/app_instances/75bc1c69-a399-4acb-aade-" + "3514caf13c5e/storage_instances/storage-1", + "uuid": "6421237d-e4fc-433a-b535-148d5b6d8586", + "volumes": { + "volume-1": { + "capacity_in_use": 0, + "name": "volume-1", + "op_state": "available", + "path": "/app_instances/75bc1c69-a399-4acb-aade-" + "3514caf13c5e/storage_instances/storage-" + "1/volumes/volume-1", + "replica_count": 1, + "size": 50, + "snapshot_policies": {}, + "snapshots": {}, + "uuid": "e674d29c-a672-40d1-9577-abe3a504ffe9" + } + } + } + }, + "uuid": "00000000-0000-0000-0000-000000000000" + }, + "dfdaf8d1-8976-4c13-a829-3345e03cf810": { + "admin_state": "offline", + "create_mode": "openstack", + "descr": "", + "health": "ok", + "id": "dfdaf8d1-8976-4c13-a829-3345e03cf810", + "name": "OS-c20aba21-6ef6-446b-b374-45733b4883ba", + "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810", + "snapshot_policies": {}, + "snapshots": {}, + "storage_instances": { + "storage-1": { + "access": { + "ips": [ + "172.28.41.57" + ], + "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" + "56cd59e754ad02b6", + "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" + "3345e03cf810/storage_instances/storage-1/access" + }, + "access_control_mode": "deny_all", + "acl_policy": { + "initiator_groups": [], + "initiators": [], + "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" + "3345e03cf810/storage_instances/storage-" + "1/acl_policy" + }, + "active_initiators": [], + "active_storage_nodes": [ + "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" + ], + "admin_state": "offline", + "auth": { + "initiator_pswd": "(hidden)", + "initiator_user_name": "", + "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" + "3345e03cf810/storage_instances/storage-1/auth", + "target_pswd": "(hidden)", + "target_user_name": "", + "type": "none" + }, + "creation_type": "user", + "ip_pool": "/access_network_ip_pools/default", + "name": "storage-1", + "op_state": "unavailable", + "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810" + "/storage_instances/storage-1", + "uuid": "5620a673-9985-464e-9616-e325a50eac60", + "volumes": { + "volume-1": { + "capacity_in_use": 0, + "name": "volume-1", + "op_state": "available", + "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" + "3345e03cf810/storage_instances/storage-" + "1/volumes/volume-1", + "replica_count": 1, + "size": 5, + "snapshot_policies": {}, + "snapshots": {}, + "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" + } + } + } + }, + "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" + } +} + + def _stub_datera_volume(*args, **kwargs): return { "status": "available", @@ -519,3 +730,7 @@ def _stub_snapshot(*args, **kwargs): volume['display_name'] = kwargs.get('display_name', name) volume['volume_id'] = kwargs.get('volume_id', None) return volume + + +def _config_getter(*args, **kwargs): + return {} diff --git a/cinder/tests/unit/test_drbdmanagedrv.py b/cinder/tests/unit/volume/drivers/test_drbdmanagedrv.py similarity index 71% rename from cinder/tests/unit/test_drbdmanagedrv.py rename to cinder/tests/unit/volume/drivers/test_drbdmanagedrv.py index 97d3ba36b..48e809af9 100644 --- a/cinder/tests/unit/test_drbdmanagedrv.py +++ b/cinder/tests/unit/volume/drivers/test_drbdmanagedrv.py @@ -143,6 +143,17 @@ class DrbdManageFakeDriver(object): def __init__(self): self.calls = [] + self.cur = -1 + + def call_count(self): + return len(self.calls) + + def next_call(self): + self.cur += 1 + return self.calls[self.cur][0] + + def call_parm(self, arg_idx): + return self.calls[self.cur][arg_idx] def run_external_plugin(self, name, props): self.calls.append(["run_external_plugin", name, props]) @@ -270,6 +281,10 @@ class DrbdManageFakeDriver(object): self.calls.append(["set_drbdsetup_props", options]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] + def modify_resource(self, res, ser, props): + self.calls.append(["modify_resource", res, ser, props]) + return [[mock_dm_exc.DM_SUCCESS, "ack", []]] + class DrbdManageIscsiTestCase(test.TestCase): @@ -291,6 +306,8 @@ class DrbdManageIscsiTestCase(test.TestCase): '"ko-count": "30"}') if key == 'drbdmanage_resource_options': return '{"auto-promote-timeout": "300"}' + if key == 'drbdmanage_disk_options': + return '{"c-min-rate": "4M"}' return self._fake_safe_get(key) @@ -356,14 +373,15 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd.drbdmanage_devs_on_controller = False dmd.odm = DrbdManageFakeDriver() dmd.create_volume(testvol) - self.assertEqual("create_resource", dmd.odm.calls[0][0]) - self.assertEqual("set_drbdsetup_props", dmd.odm.calls[1][0]) - self.assertEqual("set_drbdsetup_props", dmd.odm.calls[2][0]) - self.assertEqual("list_volumes", dmd.odm.calls[3][0]) - self.assertEqual("create_volume", dmd.odm.calls[4][0]) - self.assertEqual(1048576, dmd.odm.calls[4][2]) - self.assertEqual("auto_deploy", dmd.odm.calls[5][0]) - self.assertEqual(7, len(dmd.odm.calls)) + self.assertEqual(8, dmd.odm.call_count()) + self.assertEqual("create_resource", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("create_volume", dmd.odm.next_call()) + self.assertEqual(1048576, dmd.odm.call_parm(2)) + self.assertEqual("auto_deploy", dmd.odm.next_call()) def test_create_volume_with_options(self): testvol = {'project_id': 'testprjid', @@ -379,21 +397,28 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd.odm = DrbdManageFakeDriver() dmd.create_volume(testvol) - self.assertEqual("create_resource", dmd.odm.calls[0][0]) + self.assertEqual(8, dmd.odm.call_count()) - self.assertEqual("set_drbdsetup_props", dmd.odm.calls[1][0]) - self.assertEqual("reso", dmd.odm.calls[1][1]["type"]) - self.assertEqual("300", dmd.odm.calls[1][1]["auto-promote-timeout"]) + self.assertEqual("create_resource", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.calls[2][0]) - self.assertEqual("neto", dmd.odm.calls[2][1]["type"]) - self.assertEqual("30", dmd.odm.calls[2][1]["ko-count"]) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("reso", dmd.odm.call_parm(1)["type"]) + self.assertEqual("300", dmd.odm.call_parm(1)["auto-promote-timeout"]) - self.assertEqual("list_volumes", dmd.odm.calls[3][0]) - self.assertEqual("create_volume", dmd.odm.calls[4][0]) - self.assertEqual(1048576, dmd.odm.calls[4][2]) - self.assertEqual("auto_deploy", dmd.odm.calls[5][0]) - self.assertEqual(7, len(dmd.odm.calls)) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("neto", dmd.odm.call_parm(1)["type"]) + self.assertEqual("30", dmd.odm.call_parm(1)["ko-count"]) + + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("disko", dmd.odm.call_parm(1)["type"]) + self.assertEqual("4M", dmd.odm.call_parm(1)["c-min-rate"]) + + self.assertEqual("list_volumes", dmd.odm.next_call()) + + self.assertEqual("create_volume", dmd.odm.next_call()) + self.assertEqual(1048576, dmd.odm.call_parm(2)) + + self.assertEqual("auto_deploy", dmd.odm.next_call()) def test_create_volume_controller_all_vols(self): testvol = {'project_id': 'testprjid', @@ -407,16 +432,17 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd.drbdmanage_devs_on_controller = True dmd.odm = DrbdManageFakeDriver() dmd.create_volume(testvol) - self.assertEqual(8, len(dmd.odm.calls)) - self.assertEqual("create_resource", dmd.odm.calls[0][0]) - self.assertEqual("set_drbdsetup_props", dmd.odm.calls[1][0]) - self.assertEqual("set_drbdsetup_props", dmd.odm.calls[2][0]) - self.assertEqual("list_volumes", dmd.odm.calls[3][0]) - self.assertEqual("create_volume", dmd.odm.calls[4][0]) - self.assertEqual(1048576, dmd.odm.calls[4][2]) - self.assertEqual("auto_deploy", dmd.odm.calls[5][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[6][0]) - self.assertEqual("assign", dmd.odm.calls[7][0]) + self.assertEqual("create_resource", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("create_volume", dmd.odm.next_call()) + self.assertEqual(1048576, dmd.odm.call_parm(2)) + self.assertEqual("auto_deploy", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("assign", dmd.odm.next_call()) + self.assertEqual(9, dmd.odm.call_count()) def test_delete_volume(self): testvol = {'project_id': 'testprjid', @@ -429,9 +455,9 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.delete_volume(testvol) - self.assertEqual("list_volumes", dmd.odm.calls[0][0]) - self.assertEqual(testvol['id'], dmd.odm.calls[0][3]["aux:cinder-id"]) - self.assertEqual("remove_volume", dmd.odm.calls[1][0]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual(testvol['id'], dmd.odm.call_parm(3)["aux:cinder-id"]) + self.assertEqual("remove_volume", dmd.odm.next_call()) def test_local_path(self): testvol = {'project_id': 'testprjid', @@ -453,10 +479,10 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_snapshot(testsnap) - self.assertEqual("list_volumes", dmd.odm.calls[0][0]) - self.assertEqual("list_assignments", dmd.odm.calls[1][0]) - self.assertEqual("create_snapshot", dmd.odm.calls[2][0]) - self.assertIn('node', dmd.odm.calls[2][3]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("list_assignments", dmd.odm.next_call()) + self.assertEqual("create_snapshot", dmd.odm.next_call()) + self.assertIn('node', dmd.odm.call_parm(3)) def test_delete_snapshot(self): testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} @@ -464,8 +490,8 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.delete_snapshot(testsnap) - self.assertEqual("list_snapshots", dmd.odm.calls[0][0]) - self.assertEqual("remove_snapshot", dmd.odm.calls[1][0]) + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("remove_snapshot", dmd.odm.next_call()) def test_extend_volume(self): testvol = {'project_id': 'testprjid', @@ -478,13 +504,13 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.extend_volume(testvol, 5) - self.assertEqual("list_volumes", dmd.odm.calls[0][0]) - self.assertEqual(testvol['id'], dmd.odm.calls[0][3]["aux:cinder-id"]) - self.assertEqual("resize_volume", dmd.odm.calls[1][0]) - self.assertEqual("res", dmd.odm.calls[1][1]) - self.assertEqual(2, dmd.odm.calls[1][2]) - self.assertEqual(-1, dmd.odm.calls[1][3]) - self.assertEqual(5242880, dmd.odm.calls[1][4]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual(testvol['id'], dmd.odm.call_parm(3)["aux:cinder-id"]) + self.assertEqual("resize_volume", dmd.odm.next_call()) + self.assertEqual("res", dmd.odm.call_parm(1)) + self.assertEqual(2, dmd.odm.call_parm(2)) + self.assertEqual(-1, dmd.odm.call_parm(3)) + self.assertEqual(5242880, dmd.odm.call_parm(4)) def test_create_cloned_volume(self): srcvol = {'project_id': 'testprjid', @@ -499,15 +525,18 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_cloned_volume(newvol, srcvol) - self.assertEqual("list_volumes", dmd.odm.calls[0][0]) - self.assertEqual("list_assignments", dmd.odm.calls[1][0]) - self.assertEqual("create_snapshot", dmd.odm.calls[2][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) - self.assertEqual("list_snapshots", dmd.odm.calls[4][0]) - self.assertEqual("restore_snapshot", dmd.odm.calls[5][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[6][0]) - self.assertEqual("list_snapshots", dmd.odm.calls[7][0]) - self.assertEqual("remove_snapshot", dmd.odm.calls[8][0]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("list_assignments", dmd.odm.next_call()) + self.assertEqual("create_snapshot", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("restore_snapshot", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("remove_snapshot", dmd.odm.next_call()) def test_create_cloned_volume_larger_size(self): srcvol = {'project_id': 'testprjid', @@ -523,25 +552,30 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_cloned_volume(newvol, srcvol) - self.assertEqual("list_volumes", dmd.odm.calls[0][0]) - self.assertEqual("list_assignments", dmd.odm.calls[1][0]) - self.assertEqual("create_snapshot", dmd.odm.calls[2][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) - self.assertEqual("list_snapshots", dmd.odm.calls[4][0]) - self.assertEqual("restore_snapshot", dmd.odm.calls[5][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[6][0]) - # resize image checks - self.assertEqual("list_volumes", dmd.odm.calls[7][0]) - self.assertEqual(newvol['id'], dmd.odm.calls[7][3]["aux:cinder-id"]) - self.assertEqual("resize_volume", dmd.odm.calls[8][0]) - self.assertEqual("res", dmd.odm.calls[8][1]) - self.assertEqual(2, dmd.odm.calls[8][2]) - self.assertEqual(-1, dmd.odm.calls[8][3]) - self.assertEqual(5242880, dmd.odm.calls[8][4]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("list_assignments", dmd.odm.next_call()) + self.assertEqual("create_snapshot", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("restore_snapshot", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.calls[9][0]) - self.assertEqual("list_snapshots", dmd.odm.calls[10][0]) - self.assertEqual("remove_snapshot", dmd.odm.calls[11][0]) + # resize image checks + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual(newvol['id'], dmd.odm.call_parm(3)["aux:cinder-id"]) + self.assertEqual("resize_volume", dmd.odm.next_call()) + self.assertEqual("res", dmd.odm.call_parm(1)) + self.assertEqual(2, dmd.odm.call_parm(2)) + self.assertEqual(-1, dmd.odm.call_parm(3)) + self.assertEqual(5242880, dmd.odm.call_parm(4)) + + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("remove_snapshot", dmd.odm.next_call()) def test_create_volume_from_snapshot(self): snap = {'project_id': 'testprjid', @@ -556,9 +590,12 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_volume_from_snapshot(newvol, snap) - self.assertEqual("list_snapshots", dmd.odm.calls[0][0]) - self.assertEqual("restore_snapshot", dmd.odm.calls[1][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[2][0]) + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("restore_snapshot", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) def test_create_volume_from_snapshot_larger_size(self): snap = {'project_id': 'testprjid', @@ -574,18 +611,12 @@ class DrbdManageIscsiTestCase(test.TestCase): dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_volume_from_snapshot(newvol, snap) - self.assertEqual("list_snapshots", dmd.odm.calls[0][0]) - self.assertEqual("restore_snapshot", dmd.odm.calls[1][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[2][0]) - - # resize image checks - self.assertEqual("list_volumes", dmd.odm.calls[3][0]) - self.assertEqual(newvol['id'], dmd.odm.calls[3][3]["aux:cinder-id"]) - self.assertEqual("resize_volume", dmd.odm.calls[4][0]) - self.assertEqual("res", dmd.odm.calls[4][1]) - self.assertEqual(2, dmd.odm.calls[4][2]) - self.assertEqual(-1, dmd.odm.calls[4][3]) - self.assertEqual(5242880, dmd.odm.calls[4][4]) + self.assertEqual("list_snapshots", dmd.odm.next_call()) + self.assertEqual("restore_snapshot", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) class DrbdManageDrbdTestCase(DrbdManageIscsiTestCase): @@ -612,12 +643,12 @@ class DrbdManageDrbdTestCase(DrbdManageIscsiTestCase): dmd.odm = DrbdManageFakeDriver() x = dmd.create_export({}, volume, connector) - self.assertEqual("list_volumes", dmd.odm.calls[0][0]) - self.assertEqual("create_node", dmd.odm.calls[1][0]) - self.assertEqual("assign", dmd.odm.calls[2][0]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("create_node", dmd.odm.next_call()) + self.assertEqual("assign", dmd.odm.next_call()) # local_path - self.assertEqual("list_volumes", dmd.odm.calls[3][0]) - self.assertEqual("text_query", dmd.odm.calls[4][0]) + self.assertEqual("list_volumes", dmd.odm.next_call()) + self.assertEqual("text_query", dmd.odm.next_call()) self.assertEqual("local", x["driver_volume_type"]) @@ -633,11 +664,11 @@ class DrbdManageCommonTestCase(DrbdManageIscsiTestCase): {'retry': 4, 'run-into-timeout': True}) self.assertFalse(res) - self.assertEqual(4, len(dmd.odm.calls)) - self.assertEqual("run_external_plugin", dmd.odm.calls[0][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[1][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[2][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) + self.assertEqual(4, dmd.odm.call_count()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) def test_drbd_policy_loop_success(self): dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) @@ -648,11 +679,11 @@ class DrbdManageCommonTestCase(DrbdManageIscsiTestCase): 'retry': 4}, {'override': 'xyz'}) self.assertTrue(res) - self.assertEqual(4, len(dmd.odm.calls)) - self.assertEqual("run_external_plugin", dmd.odm.calls[0][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[1][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[2][0]) - self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) + self.assertEqual(4, dmd.odm.call_count()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) def test_drbd_policy_loop_simple(self): dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) @@ -665,11 +696,11 @@ class DrbdManageCommonTestCase(DrbdManageIscsiTestCase): 'starttime': 0}) self.assertTrue(res) - self.assertEqual(1, len(dmd.odm.calls)) - self.assertEqual("run_external_plugin", dmd.odm.calls[0][0]) - self.assertEqual('policy-name', dmd.odm.calls[0][1]) + self.assertEqual(1, dmd.odm.call_count()) + self.assertEqual("run_external_plugin", dmd.odm.next_call()) + self.assertEqual('policy-name', dmd.odm.call_parm(1)) + incoming = dmd.odm.call_parm(2) - incoming = dmd.odm.calls[0][2] self.assertGreaterEqual(4, abs(float(incoming['starttime']) - time.time())) self.assertEqual('value', incoming['base']) diff --git a/cinder/tests/unit/test_eqlx.py b/cinder/tests/unit/volume/drivers/test_eqlx.py similarity index 99% rename from cinder/tests/unit/test_eqlx.py rename to cinder/tests/unit/volume/drivers/test_eqlx.py index 29f67acc9..67863435e 100644 --- a/cinder/tests/unit/test_eqlx.py +++ b/cinder/tests/unit/volume/drivers/test_eqlx.py @@ -1,5 +1,5 @@ # Copyright (c) 2013 Dell Inc. -# Copyright 2013 OpenStack LLC +# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,6 +14,7 @@ # under the License. import time +import unittest from eventlet import greenthread import mock @@ -528,6 +529,7 @@ class DellEQLSanISCSIDriverTestCase(test.TestCase): self.assertEqual(num_attempts + 1, self.driver._ssh_execute.call_count) + @unittest.skip("Skip until bug #1578986 is fixed") @mock.patch.object(greenthread, 'sleep') def test_ensure_retries_on_channel_timeout(self, _gt_sleep): num_attempts = 3 @@ -557,6 +559,7 @@ class DellEQLSanISCSIDriverTestCase(test.TestCase): self.assertEqual(num_attempts + 1, self.driver._get_output.call_count) + @unittest.skip("Skip until bug #1578986 is fixed") def test_with_timeout(self): @eqlx.with_timeout def no_timeout(cmd, *args, **kwargs): diff --git a/cinder/tests/unit/test_glusterfs.py b/cinder/tests/unit/volume/drivers/test_glusterfs.py similarity index 99% rename from cinder/tests/unit/test_glusterfs.py rename to cinder/tests/unit/volume/drivers/test_glusterfs.py index e15e362dc..fdbe5a502 100644 --- a/cinder/tests/unit/test_glusterfs.py +++ b/cinder/tests/unit/volume/drivers/test_glusterfs.py @@ -24,7 +24,6 @@ import traceback import mock import os_brick from oslo_concurrency import processutils as putils -from oslo_config import cfg from oslo_utils import imageutils from oslo_utils import units @@ -43,9 +42,6 @@ from cinder.volume.drivers import glusterfs from cinder.volume.drivers import remotefs as remotefs_drv -CONF = cfg.CONF - - class FakeDb(object): msg = "Tests are broken: mock this out." @@ -216,7 +212,7 @@ class GlusterFsDriverTestCase(test.TestCase): def test_update_volume_stats_thin(self): """_update_volume_stats_thin with qcow2 files.""" drv = self._driver - rfsdriver = remotefs_drv.RemoteFSSnapDriver + rfsdriver = remotefs_drv.RemoteFSSnapDriverBase with mock.patch.object(rfsdriver, '_update_volume_stats') as \ mock_update_volume_stats,\ @@ -240,7 +236,7 @@ class GlusterFsDriverTestCase(test.TestCase): def test_update_volume_stats_thick(self): """_update_volume_stats_thick with raw files.""" drv = self._driver - rfsdriver = remotefs_drv.RemoteFSSnapDriver + rfsdriver = remotefs_drv.RemoteFSSnapDriverBase with mock.patch.object(rfsdriver, '_update_volume_stats') as \ mock_update_volume_stats: diff --git a/cinder/tests/unit/test_gpfs.py b/cinder/tests/unit/volume/drivers/test_gpfs.py similarity index 100% rename from cinder/tests/unit/test_gpfs.py rename to cinder/tests/unit/volume/drivers/test_gpfs.py diff --git a/cinder/tests/unit/volume/drivers/test_kaminario.py b/cinder/tests/unit/volume/drivers/test_kaminario.py index 06ed7d230..82f0a99f6 100644 --- a/cinder/tests/unit/volume/drivers/test_kaminario.py +++ b/cinder/tests/unit/volume/drivers/test_kaminario.py @@ -14,14 +14,19 @@ # under the License. """Unit tests for kaminario driver.""" import mock +from oslo_utils import units +import time from cinder import context from cinder import exception +from cinder import objects +from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder import utils from cinder.volume import configuration +from cinder.volume.drivers.kaminario import kaminario_common from cinder.volume.drivers.kaminario import kaminario_fc from cinder.volume.drivers.kaminario import kaminario_iscsi from cinder.volume import utils as vol_utils @@ -46,6 +51,18 @@ class FakeSaveObject(FakeK2Obj): self.snapshot = FakeK2Obj() self.name = 'test' self.pwwn = '50024f4053300300' + self.volume_group = self + self.is_dedup = True + self.size = units.Mi + self.replication_status = None + self.state = 'in_sync' + self.generation_number = 548 + self.current_role = 'target' + self.current_snapshot_progress = 100 + self.current_snapshot_id = None + + def refresh(self): + return def save(self): return FakeSaveObject() @@ -92,6 +109,13 @@ class FakeKrestException(object): return FakeSaveObjectExp() +class Replication(object): + backend_id = '10.0.0.1' + login = 'login' + password = 'password' + rpo = 500 + + class TestKaminarioISCSI(test.TestCase): driver = None conf = None @@ -103,6 +127,7 @@ class TestKaminarioISCSI(test.TestCase): self.context = context.get_admin_context() self.vol = fake_volume.fake_volume_obj(self.context) self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) + self.vol.volume_type.extra_specs = {'foo': None} self.snap = fake_snapshot.fake_snapshot_obj(self.context) self.snap.volume = self.vol @@ -142,8 +167,16 @@ class TestKaminarioISCSI(test.TestCase): def test_create_snapshot(self): """Test create_snapshot.""" + self.snap.id = "253b2878-ec60-4793-ad19-e65496ec7aab" + self.driver.client.new = mock.Mock() result = self.driver.create_snapshot(self.snap) self.assertIsNone(result) + fake_object = self.driver.client.search().hits[0] + self.driver.client.new.assert_called_once_with( + "snapshots", + short_name='cs-253b2878-ec60-4793-ad19-e65496ec7aab', + source=fake_object, retention_policy=fake_object, + is_auto_deleteable=False) def test_create_snapshot_with_exception(self): """Test create_snapshot_with_exception.""" @@ -256,7 +289,7 @@ class TestKaminarioISCSI(test.TestCase): def test_get_target_info(self): """Test get_target_info.""" - iscsi_portal, target_iqn = self.driver.get_target_info() + iscsi_portal, target_iqn = self.driver.get_target_info(self.vol) self.assertEqual('10.0.0.1:3260', iscsi_portal) self.assertEqual('xyztlnxyz', target_iqn) @@ -265,6 +298,225 @@ class TestKaminarioISCSI(test.TestCase): result = self.driver.k2_initialize_connection(self.vol, CONNECTOR) self.assertEqual(548, result) + def test_manage_existing(self): + """Test manage_existing.""" + self.driver._get_replica_status = mock.Mock(return_value=False) + result = self.driver.manage_existing(self.vol, {'source-name': 'test'}) + self.assertIsNone(result) + + def test_manage_existing_exp(self): + self.driver._get_replica_status = mock.Mock(return_value=True) + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, self.vol, + {'source-name': 'test'}) + + def test_manage_existing_get_size(self): + """Test manage_existing_get_size.""" + self.driver.client.search().hits[0].size = units.Mi + result = self.driver.manage_existing_get_size(self.vol, + {'source-name': 'test'}) + self.assertEqual(1, result) + + def test_get_is_dedup(self): + """Test _get_is_dedup.""" + result = self.driver._get_is_dedup(self.vol.volume_type) + self.assertTrue(result) + + def test_get_is_dedup_false(self): + """Test _get_is_dedup_false.""" + specs = {'kaminario:thin_prov_type': 'nodedup'} + self.vol.volume_type.extra_specs = specs + result = self.driver._get_is_dedup(self.vol.volume_type) + self.assertFalse(result) + + def test_get_replica_status(self): + """Test _get_replica_status.""" + result = self.driver._get_replica_status(self.vol) + self.assertTrue(result) + + def test_create_volume_replica(self): + """Test _create_volume_replica.""" + vg = FakeSaveObject() + rep = Replication() + self.driver.replica = rep + session_name = self.driver.get_session_name('1234567890987654321') + self.assertEqual('ssn-1234567890987654321', session_name) + rsession_name = self.driver.get_rep_name(session_name) + self.assertEqual('rssn-1234567890987654321', rsession_name) + src_ssn = self.driver.client.new("replication/sessions").save() + self.assertEqual('in_sync', src_ssn.state) + result = self.driver._create_volume_replica(self.vol, vg, vg, rep.rpo) + self.assertIsNone(result) + + def test_create_volume_replica_exp(self): + """Test _create_volume_replica_exp.""" + vg = FakeSaveObject() + rep = Replication() + self.driver.replica = rep + self.driver.client = FakeKrestException() + self.assertRaises(exception.KaminarioCinderDriverException, + self.driver._create_volume_replica, self.vol, + vg, vg, rep.rpo) + + def test_delete_by_ref(self): + """Test _delete_by_ref.""" + result = self.driver._delete_by_ref(self.driver.client, 'volume', + 'name', 'message') + self.assertIsNone(result) + + def test_failover_volume(self): + """Test _failover_volume.""" + self.driver.target = FakeKrest() + session_name = self.driver.get_session_name('1234567890987654321') + self.assertEqual('ssn-1234567890987654321', session_name) + rsession_name = self.driver.get_rep_name(session_name) + self.assertEqual('rssn-1234567890987654321', rsession_name) + result = self.driver._failover_volume(self.vol) + self.assertIsNone(result) + + @mock.patch.object(kaminario_common.KaminarioCinderDriver, + '_check_for_status') + @mock.patch.object(objects.service.Service, 'get_by_args') + def test_failover_host(self, get_by_args, check_stauts): + """Test failover_host.""" + mock_args = mock.Mock() + mock_args.active_backend_id = '10.0.0.1' + self.vol.replication_status = 'failed-over' + self.driver.configuration.san_ip = '10.0.0.1' + get_by_args.side_effect = [mock_args, mock_args] + self.driver.host = 'host' + volumes = [self.vol, self.vol] + self.driver.replica = Replication() + self.driver.target = FakeKrest() + self.driver.target.search().total = 1 + self.driver.client.search().total = 1 + backend_ip, res_volumes = self.driver.failover_host(None, volumes) + self.assertEqual('10.0.0.1', backend_ip) + status = res_volumes[0]['updates']['replication_status'] + self.assertEqual(fields.ReplicationStatus.FAILED_OVER, status) + # different backend ip + self.driver.configuration.san_ip = '10.0.0.2' + self.driver.client.search().hits[0].state = 'in_sync' + backend_ip, res_volumes = self.driver.failover_host(None, volumes) + self.assertEqual('10.0.0.2', backend_ip) + status = res_volumes[0]['updates']['replication_status'] + self.assertEqual(fields.ReplicationStatus.DISABLED, status) + + def test_delete_volume_replica(self): + """Test _delete_volume_replica.""" + self.driver.replica = Replication() + self.driver.target = FakeKrest() + session_name = self.driver.get_session_name('1234567890987654321') + self.assertEqual('ssn-1234567890987654321', session_name) + rsession_name = self.driver.get_rep_name(session_name) + self.assertEqual('rssn-1234567890987654321', rsession_name) + res = self.driver._delete_by_ref(self.driver.client, 'volumes', + 'test', 'test') + self.assertIsNone(res) + result = self.driver._delete_volume_replica(self.vol, 'test', 'test') + self.assertIsNone(result) + src_ssn = self.driver.client.search("replication/sessions").hits[0] + self.assertEqual('idle', src_ssn.state) + + def test_delete_volume_replica_exp(self): + """Test _delete_volume_replica_exp.""" + self.driver.replica = Replication() + self.driver.target = FakeKrestException() + self.driver._check_for_status = mock.Mock() + self.assertRaises(exception.KaminarioCinderDriverException, + self.driver._delete_volume_replica, self.vol, + 'test', 'test') + + def test_get_is_replica(self): + """Test get_is_replica.""" + result = self.driver._get_is_replica(self.vol.volume_type) + self.assertFalse(result) + + def test_get_is_replica_true(self): + """Test get_is_replica_true.""" + self.driver.replica = Replication() + self.vol.volume_type.extra_specs = {'kaminario:replication': 'enabled'} + result = self.driver._get_is_replica(self.vol.volume_type) + self.assertTrue(result) + + def test_after_volume_copy(self): + """Test after_volume_copy.""" + result = self.driver.after_volume_copy(None, self.vol, + self.vol.volume_type) + self.assertIsNone(result) + + def test_retype(self): + """Test retype.""" + replica_status = self.driver._get_replica_status('test') + self.assertTrue(replica_status) + replica = self.driver._get_is_replica(self.vol.volume_type) + self.assertFalse(replica) + self.driver.replica = Replication() + result = self.driver._add_replication(self.vol) + self.assertIsNone(result) + self.driver.target = FakeKrest() + self.driver._check_for_status = mock.Mock() + result = self.driver._delete_replication(self.vol) + self.assertIsNone(result) + self.driver._delete_volume_replica = mock.Mock() + result = self.driver.retype(None, self.vol, + self.vol.volume_type, None, None) + self.assertTrue(result) + new_vol_type = fake_volume.fake_volume_type_obj(self.context) + new_vol_type.extra_specs = {'kaminario:thin_prov_type': 'nodedup'} + result2 = self.driver.retype(None, self.vol, + new_vol_type, None, None) + self.assertFalse(result2) + + def test_add_replication(self): + """"Test _add_replication.""" + self.driver.replica = Replication() + result = self.driver._add_replication(self.vol) + self.assertIsNone(result) + + def test_delete_replication(self): + """Test _delete_replication.""" + self.driver.replica = Replication() + self.driver.target = FakeKrest() + self.driver._check_for_status = mock.Mock() + result = self.driver._delete_replication(self.vol) + self.assertIsNone(result) + + def test_create_failover_volume_replica(self): + """Test _create_failover_volume_replica.""" + self.driver.replica = Replication() + self.driver.target = FakeKrest() + self.driver.configuration.san_ip = '10.0.0.1' + result = self.driver._create_failover_volume_replica(self.vol, + 'test', 'test') + self.assertIsNone(result) + + def test_create_volume_replica_user_snap(self): + """Test create_volume_replica_user_snap.""" + result = self.driver._create_volume_replica_user_snap(FakeKrest(), + 'sess') + self.assertEqual(548, result) + + def test_is_user_snap_sync_finished(self): + """Test _is_user_snap_sync_finished.""" + sess_mock = mock.Mock() + sess_mock.refresh = mock.Mock() + sess_mock.generation_number = 548 + sess_mock.current_snapshot_id = None + sess_mock.current_snapshot_progress = 100 + sess_mock.current_snapshot_id = None + self.driver.snap_updates = [{'tgt_ssn': sess_mock, 'gno': 548, + 'stime': time.time()}] + result = self.driver._is_user_snap_sync_finished() + self.assertIsNone(result) + + def test_delete_failover_volume_replica(self): + """Test _delete_failover_volume_replica.""" + self.driver.target = FakeKrest() + result = self.driver._delete_failover_volume_replica(self.vol, 'test', + 'test') + self.assertIsNone(result) + class TestKaminarioFC(TestKaminarioISCSI): @@ -284,7 +536,7 @@ class TestKaminarioFC(TestKaminarioISCSI): def test_get_target_info(self): """Test get_target_info.""" - target_wwpn = self.driver.get_target_info() + target_wwpn = self.driver.get_target_info(self.vol) self.assertEqual(['50024f4053300300'], target_wwpn) def test_terminate_connection(self): diff --git a/cinder/tests/unit/test_lvm_driver.py b/cinder/tests/unit/volume/drivers/test_lvm_driver.py similarity index 90% rename from cinder/tests/unit/test_lvm_driver.py rename to cinder/tests/unit/volume/drivers/test_lvm_driver.py index ea9aa42e7..a1d504878 100644 --- a/cinder/tests/unit/test_lvm_driver.py +++ b/cinder/tests/unit/volume/drivers/test_lvm_driver.py @@ -22,12 +22,11 @@ from oslo_config import cfg from cinder.brick.local_dev import lvm as brick_lvm from cinder import db from cinder import exception -from cinder import objects from cinder.objects import fields +from cinder.tests import fake_driver from cinder.tests.unit.brick import fake_lvm from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_driver -from cinder.tests.unit.test_volume import DriverTestCase +from cinder.tests.unit import test_volume from cinder.tests.unit.test_volume import fake_opt from cinder.tests.unit import utils as tests_utils from cinder import utils @@ -40,7 +39,7 @@ CONF = cfg.CONF @ddt.ddt -class LVMVolumeDriverTestCase(DriverTestCase): +class LVMVolumeDriverTestCase(test_volume.DriverTestCase): """Test case for VolumeDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" FAKE_VOLUME = {'name': 'test1', @@ -145,9 +144,8 @@ class LVMVolumeDriverTestCase(DriverTestCase): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID - backup = tests_utils.create_backup(self.context, - vol['id']) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj = tests_utils.create_backup(self.context, + vol['id']) properties = {} attach_info = {'device': {'path': '/dev/null'}} @@ -233,9 +231,8 @@ class LVMVolumeDriverTestCase(DriverTestCase): mock_volume_get.return_value = vol temp_snapshot = tests_utils.create_snapshot(self.context, vol['id']) - backup = tests_utils.create_backup(self.context, - vol['id']) - backup_obj = objects.Backup.get_by_id(self.context, backup.id) + backup_obj = tests_utils.create_backup(self.context, + vol['id']) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() @@ -791,6 +788,84 @@ class LVMVolumeDriverTestCase(DriverTestCase): ret = self.volume.driver.unmanage(volume) self.assertIsNone(ret) + def test_lvm_get_manageable_volumes(self): + cinder_vols = [{'id': '00000000-0000-0000-0000-000000000000'}] + lvs = [{'name': 'volume-00000000-0000-0000-0000-000000000000', + 'size': '1.75'}, + {'name': 'volume-00000000-0000-0000-0000-000000000001', + 'size': '3.0'}, + {'name': 'snapshot-00000000-0000-0000-0000-000000000002', + 'size': '2.2'}, + {'name': 'myvol', 'size': '4.0'}] + self.volume.driver.vg = mock.Mock() + self.volume.driver.vg.get_volumes.return_value = lvs + self.volume.driver.vg.lv_is_snapshot.side_effect = [False, False, + True, False] + self.volume.driver.vg.lv_is_open.side_effect = [True, False] + res = self.volume.driver.get_manageable_volumes(cinder_vols, None, + 1000, 0, + ['size'], ['asc']) + exp = [{'size': 2, 'reason_not_safe': None, 'extra_info': None, + 'reference': {'source-name': + 'volume-00000000-0000-0000-0000-000000000000'}, + 'cinder_id': '00000000-0000-0000-0000-000000000000', + 'safe_to_manage': False, 'reason_not_safe': 'already managed'}, + {'size': 3, 'reason_not_safe': 'volume in use', + 'reference': {'source-name': + 'volume-00000000-0000-0000-0000-000000000001'}, + 'safe_to_manage': False, 'cinder_id': None, + 'extra_info': None}, + {'size': 4, 'reason_not_safe': None, + 'safe_to_manage': True, 'reference': {'source-name': 'myvol'}, + 'cinder_id': None, 'extra_info': None}] + self.assertEqual(exp, res) + + def test_lvm_get_manageable_snapshots(self): + cinder_snaps = [{'id': '00000000-0000-0000-0000-000000000000'}] + lvs = [{'name': 'snapshot-00000000-0000-0000-0000-000000000000', + 'size': '1.75'}, + {'name': 'volume-00000000-0000-0000-0000-000000000001', + 'size': '3.0'}, + {'name': 'snapshot-00000000-0000-0000-0000-000000000002', + 'size': '2.2'}, + {'name': 'mysnap', 'size': '4.0'}] + self.volume.driver.vg = mock.Mock() + self.volume.driver.vg.get_volumes.return_value = lvs + self.volume.driver.vg.lv_is_snapshot.side_effect = [True, False, True, + True] + self.volume.driver.vg.lv_is_open.side_effect = [True, False] + self.volume.driver.vg.lv_get_origin.side_effect = [ + 'volume-00000000-0000-0000-0000-000000000000', + 'volume-00000000-0000-0000-0000-000000000002', + 'myvol'] + res = self.volume.driver.get_manageable_snapshots(cinder_snaps, None, + 1000, 0, + ['size'], ['asc']) + exp = [{'size': 2, 'reason_not_safe': 'already managed', + 'reference': + {'source-name': + 'snapshot-00000000-0000-0000-0000-000000000000'}, + 'safe_to_manage': False, 'extra_info': None, + 'cinder_id': '00000000-0000-0000-0000-000000000000', + 'source_reference': + {'source-name': + 'volume-00000000-0000-0000-0000-000000000000'}}, + {'size': 3, 'reason_not_safe': 'snapshot in use', + 'reference': + {'source-name': + 'snapshot-00000000-0000-0000-0000-000000000002'}, + 'safe_to_manage': False, 'extra_info': None, + 'cinder_id': None, + 'source_reference': + {'source-name': + 'volume-00000000-0000-0000-0000-000000000002'}}, + {'size': 4, 'reason_not_safe': None, + 'reference': {'source-name': 'mysnap'}, + 'safe_to_manage': True, 'cinder_id': None, + 'source_reference': {'source-name': 'myvol'}, + 'extra_info': None}] + self.assertEqual(exp, res) + # Global setting, LVM setting, expected outcome @ddt.data((10.0, 2.0, 2.0)) @ddt.data((10.0, None, 10.0)) @@ -812,7 +887,7 @@ class LVMVolumeDriverTestCase(DriverTestCase): lvm_driver.configuration.max_over_subscription_ratio) -class LVMISCSITestCase(DriverTestCase): +class LVMISCSITestCase(test_volume.DriverTestCase): """Test Case for LVMISCSIDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" diff --git a/cinder/tests/unit/test_nfs.py b/cinder/tests/unit/volume/drivers/test_nfs.py similarity index 94% rename from cinder/tests/unit/test_nfs.py rename to cinder/tests/unit/volume/drivers/test_nfs.py index bc1615f69..37e804596 100644 --- a/cinder/tests/unit/test_nfs.py +++ b/cinder/tests/unit/volume/drivers/test_nfs.py @@ -1066,6 +1066,72 @@ class NfsDriverDoSetupTestCase(test.TestCase): mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) + def test_setup_should_not_throw_error_if_host_and_share_set(self): + """do_setup shouldn't throw shares file error if host and share set.""" + + drv = nfs.NfsDriver(configuration=self.configuration) + + self.override_config('nas_host', 'nfs-host1') + self.override_config('nas_share_path', '/export') + mock_os_path_exists = self.mock_object(os.path, 'exists') + mock_os_path_exists.return_value = False + mock_set_nas_sec_options = self.mock_object(nfs.NfsDriver, + 'set_nas_security_options') + mock_set_nas_sec_options.return_value = True + mock_execute = self.mock_object(drv, '_execute') + mock_execute.return_value = True + + drv.do_setup(self.context) + + mock_os_path_exists.assert_not_called() + + def test_setup_throw_error_if_shares_file_does_not_exist_no_host(self): + """do_setup should throw error if no shares file and no host set.""" + + drv = nfs.NfsDriver(configuration=self.configuration) + + self.override_config('nas_share_path', '/export') + mock_os_path_exists = self.mock_object(os.path, 'exists') + mock_os_path_exists.return_value = False + + with self.assertRaisesRegex(exception.NfsException, + "NFS config file.*doesn't exist"): + drv.do_setup(self.context) + + mock_os_path_exists.assert_has_calls( + [mock.call(self.configuration.nfs_shares_config)]) + + def test_setup_throw_error_if_shares_file_does_not_exist_no_share(self): + """do_setup should throw error if no shares file and no share set.""" + + drv = nfs.NfsDriver(configuration=self.configuration) + + self.override_config('nas_host', 'nfs-host1') + mock_os_path_exists = self.mock_object(os.path, 'exists') + mock_os_path_exists.return_value = False + + with self.assertRaisesRegex(exception.NfsException, + "NFS config file.*doesn't exist"): + drv.do_setup(self.context) + + mock_os_path_exists.assert_has_calls( + [mock.call(self.configuration.nfs_shares_config)]) + + def test_setup_throw_error_if_shares_file_doesnt_exist_no_share_host(self): + """do_setup should throw error if no shares file and no host/share.""" + + drv = nfs.NfsDriver(configuration=self.configuration) + + mock_os_path_exists = self.mock_object(os.path, 'exists') + mock_os_path_exists.return_value = False + + with self.assertRaisesRegex(exception.NfsException, + "NFS config file.*doesn't exist"): + drv.do_setup(self.context) + + mock_os_path_exists.assert_has_calls( + [mock.call(self.configuration.nfs_shares_config)]) + def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): """do_setup should throw error if nfs client is not installed.""" diff --git a/cinder/tests/unit/test_nimble.py b/cinder/tests/unit/volume/drivers/test_nimble.py similarity index 88% rename from cinder/tests/unit/test_nimble.py rename to cinder/tests/unit/volume/drivers/test_nimble.py index d8a58d4f9..6cc643701 100644 --- a/cinder/tests/unit/test_nimble.py +++ b/cinder/tests/unit/volume/drivers/test_nimble.py @@ -16,21 +16,22 @@ import sys import mock -from oslo_config import cfg from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import volume as obj_volume from cinder import test +from cinder.tests.unit import fake_constants as fake from cinder.volume.drivers import nimble from cinder.volume import volume_types -CONF = cfg.CONF NIMBLE_CLIENT = 'cinder.volume.drivers.nimble.client' NIMBLE_URLLIB2 = 'six.moves.urllib.request' NIMBLE_RANDOM = 'cinder.volume.drivers.nimble.random' +NIMBLE_ISCSI_DRIVER = 'cinder.volume.drivers.nimble.NimbleISCSIDriver' +DRIVER_VERSION = '3.0.0' FAKE_ENUM_STRING = """ @@ -119,6 +120,23 @@ FAKE_GET_VOL_INFO_RESPONSE = { 'agent-type': 1, 'online': False}} +FAKE_GET_VOL_INFO_BACKUP_RESPONSE = { + 'err-list': {'err-list': [{'code': 0}]}, + 'vol': {'target-name': 'iqn.test', + 'name': 'test_vol', + 'agent-type': 1, + 'clone': 1, + 'base-snap': 'test-backup-snap', + 'parent-vol': 'volume-' + fake.VOLUME2_ID, + 'online': False}} + +FAKE_GET_SNAP_INFO_BACKUP_RESPONSE = { + 'err-list': {'err-list': [{'code': 0}]}, + 'snap': {'description': "backup-vol-" + fake.VOLUME2_ID, + 'name': 'test-backup-snap', + 'vol': 'volume-' + fake.VOLUME_ID} +} + FAKE_GET_VOL_INFO_ONLINE = { 'err-list': {'err-list': [{'code': 0}]}, 'vol': {'target-name': 'iqn.test', @@ -137,8 +155,7 @@ FAKE_GET_VOL_INFO_RESPONSE_WITH_SET_AGENT_TYPE = { 'name': 'test_vol', 'agent-type': 5}} - -FAKE_TYPE_ID = 12345 +FAKE_TYPE_ID = fake.VOLUME_TYPE_ID def create_configuration(username, password, ip_address, @@ -478,6 +495,8 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) + @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( + return_value = ['', ''])) def test_delete_volume(self): self.mock_client_service.service.onlineVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE @@ -495,6 +514,46 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): request={'name': 'testvolume', 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls(expected_calls) + @mock.patch(NIMBLE_URLLIB2) + @mock.patch(NIMBLE_CLIENT) + @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( + 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) + @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( + return_value=['test-backup-snap', 'volume-' + fake.VOLUME_ID])) + @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host') + def test_delete_volume_with_backup(self, mock_volume_list): + mock_volume_list.return_value = [] + self.mock_client_service.service.onlineVol.return_value = \ + FAKE_GENERIC_POSITIVE_RESPONSE + self.mock_client_service.service.deleteVol.return_value = \ + FAKE_GENERIC_POSITIVE_RESPONSE + self.mock_client_service.service.dissocProtPol.return_value = \ + FAKE_GENERIC_POSITIVE_RESPONSE + self.mock_client_service.service.onlineSnap.return_value = \ + FAKE_GENERIC_POSITIVE_RESPONSE + self.mock_client_service.service.deleteSnap.return_value = \ + FAKE_GENERIC_POSITIVE_RESPONSE + + self.driver.delete_volume({'name': 'testvolume'}) + expected_calls = [mock.call.service.onlineVol( + request={ + 'online': False, 'name': 'testvolume', 'sid': 'a9b9aba7'}), + mock.call.service.dissocProtPol( + request={'vol-name': 'testvolume', 'sid': 'a9b9aba7'}), + mock.call.service.deleteVol( + request={'name': 'testvolume', 'sid': 'a9b9aba7'}), + mock.call.service.onlineSnap( + request={'vol': 'volume-' + fake.VOLUME_ID, + 'name': 'test-backup-snap', + 'online': False, + 'sid': 'a9b9aba7'}), + mock.call.service.deleteSnap( + request={'vol': 'volume-' + fake.VOLUME_ID, + 'name': 'test-backup-snap', + 'sid': 'a9b9aba7'})] + + self.mock_client_service.assert_has_calls(expected_calls) + @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', @@ -517,18 +576,19 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'yes', - 'nimble:multi-initiator': 'false'})) + mock.Mock(type_id=FAKE_TYPE_ID, + return_value= + {'nimble:perfpol-name': 'default', + 'nimble:encryption': 'yes', + 'nimble:multi-initiator': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False)) + @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host') @mock.patch(NIMBLE_RANDOM) - def test_create_cloned_volume(self, mock_random): - mock_random.sample.return_value = 'abcdefghijkl' + def test_create_cloned_volume(self, mock_random, mock_volume_list): + mock_random.sample.return_value = fake.VOLUME_ID + mock_volume_list.return_value = [] self.mock_client_service.service.snapVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.cloneVol.return_value = \ @@ -537,25 +597,36 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE + + volume = obj_volume.Volume(context.get_admin_context(), + id=fake.VOLUME_ID, + size=5.0, + _name_id=None, + display_name='', + volume_type_id=FAKE_TYPE_ID + ) + src_volume = obj_volume.Volume(context.get_admin_context(), + id=fake.VOLUME2_ID, + _name_id=None, + size=5.0) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, - self.driver.create_cloned_volume({'name': 'volume', - 'size': 5, - 'volume_type_id': FAKE_TYPE_ID}, - {'name': 'testvolume', - 'size': 5})) + self.driver.create_cloned_volume(volume, src_volume)) expected_calls = [mock.call.service.snapVol( request={ - 'vol': 'testvolume', - 'snapAttr': {'name': 'openstack-clone-volume-abcdefghijkl', + 'vol': "volume-" + fake.VOLUME2_ID, + 'snapAttr': {'name': 'openstack-clone-volume-' + + fake.VOLUME_ID + + "-" + fake.VOLUME_ID, 'description': ''}, 'sid': 'a9b9aba7'}), mock.call.service.cloneVol( request={ - 'snap-name': 'openstack-clone-volume-abcdefghijkl', + 'snap-name': 'openstack-clone-volume-' + fake.VOLUME_ID + + "-" + fake.VOLUME_ID, 'attr': {'snap-quota': sys.maxsize, - 'name': 'volume', + 'name': 'volume-' + fake.VOLUME_ID, 'quota': 5368709120, 'reserve': 5368709120, 'online': True, @@ -564,7 +635,7 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): 'multi-initiator': 'false', 'perfpol-name': 'default', 'agent-type': 5}, - 'name': 'testvolume', + 'name': 'volume-' + fake.VOLUME2_ID, 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls(expected_calls) @@ -744,7 +815,7 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): def test_get_volume_stats(self): self.mock_client_service.service.getGroupConfig.return_value = \ FAKE_POSITIVE_GROUP_CONFIG_RESPONSE - expected_res = {'driver_version': '2.0.2', + expected_res = {'driver_version': DRIVER_VERSION, 'vendor_name': 'Nimble', 'volume_backend_name': 'NIMBLE', 'storage_protocol': 'iSCSI', @@ -757,6 +828,33 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): expected_res, self.driver.get_volume_stats(refresh=True)) + @mock.patch(NIMBLE_URLLIB2) + @mock.patch(NIMBLE_CLIENT) + @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', + mock.Mock(return_value=[])) + @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( + 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) + def test_is_volume_backup_clone(self): + self.mock_client_service.service.getVolInfo.return_value = \ + FAKE_GET_VOL_INFO_BACKUP_RESPONSE + self.mock_client_service.service.getSnapInfo.return_value = \ + FAKE_GET_SNAP_INFO_BACKUP_RESPONSE + volume = obj_volume.Volume(context.get_admin_context(), + id=fake.VOLUME_ID, + _name_id=None) + self.assertEqual(("test-backup-snap", "volume-" + fake.VOLUME_ID), + self.driver.is_volume_backup_clone(volume)) + expected_calls = [ + mock.call.service.getVolInfo( + request={'name': 'volume-' + fake.VOLUME_ID, + 'sid': 'a9b9aba7'}), + mock.call.service.getSnapInfo( + request={'sid': 'a9b9aba7', + 'vol': 'volume-' + fake.VOLUME2_ID, + 'name': 'test-backup-snap'}) + ] + self.mock_client_service.assert_has_calls(expected_calls) + class NimbleDriverSnapshotTestCase(NimbleDriverBaseTestCase): @@ -880,7 +978,7 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase): expected_res = { 'driver_volume_type': 'iscsi', 'data': { - 'target_lun': '14', + 'target_lun': 14, 'volume_id': 12, 'target_iqn': '13', 'target_discovered': False, @@ -923,7 +1021,7 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase): expected_res = { 'driver_volume_type': 'iscsi', 'data': { - 'target_lun': '14', + 'target_lun': 14, 'volume_id': 12, 'target_iqn': '13', 'target_discovered': False, diff --git a/cinder/tests/unit/test_prophetstor_dpl.py b/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py similarity index 100% rename from cinder/tests/unit/test_prophetstor_dpl.py rename to cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py diff --git a/cinder/tests/unit/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py similarity index 92% rename from cinder/tests/unit/test_pure.py rename to cinder/tests/unit/volume/drivers/test_pure.py index 6625948e4..24fb2231f 100644 --- a/cinder/tests/unit/test_pure.py +++ b/cinder/tests/unit/volume/drivers/test_pure.py @@ -23,6 +23,7 @@ from oslo_utils import units from cinder import exception from cinder import test from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume @@ -122,7 +123,7 @@ SNAPSHOT_WITH_CGROUP = SNAPSHOT.copy() SNAPSHOT_WITH_CGROUP['cgsnapshot_id'] = \ "4a2f7e3a-312a-40c5-96a8-536b8a0fe075" INITIATOR_IQN = "iqn.1993-08.org.debian:01:222" -INITIATOR_WWN = "5001500150015081" +INITIATOR_WWN = "5001500150015081abc" ISCSI_CONNECTOR = {"initiator": INITIATOR_IQN, "host": HOSTNAME} FC_CONNECTOR = {"wwpns": {INITIATOR_WWN}, "host": HOSTNAME} TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc" @@ -132,7 +133,7 @@ INITIATOR_TARGET_MAP =\ { # _build_initiator_target_map() calls list(set()) on the list, # we must also call list(set()) to get the exact same order - '5001500150015081': list(set(FC_WWNS)), + '5001500150015081abc': list(set(FC_WWNS)), } DEVICE_MAPPING =\ { @@ -323,6 +324,108 @@ REPLICATED_VOL_TYPE = {"is_public": True, " True"}, "name": "volume_type_2", "id": VOLUME_TYPE_ID} +MANAGEABLE_PURE_VOLS = [ + { + 'name': 'myVol1', + 'serial': '8E9C7E588B16C1EA00048CCA', + 'size': 3221225472, + 'created': '2016-08-05T17:26:34Z', + 'source': None, + }, + { + 'name': 'myVol2', + 'serial': '8E9C7E588B16C1EA00048CCB', + 'size': 3221225472, + 'created': '2016-08-05T17:26:34Z', + 'source': None, + }, + { + 'name': 'myVol3', + 'serial': '8E9C7E588B16C1EA00048CCD', + 'size': 3221225472, + 'created': '2016-08-05T17:26:34Z', + 'source': None, + } +] +MANAGEABLE_PURE_VOL_REFS = [ + { + 'reference': {'name': 'myVol1'}, + 'size': 3, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + }, + { + 'reference': {'name': 'myVol2'}, + 'size': 3, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + }, + { + 'reference': {'name': 'myVol3'}, + 'size': 3, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + } +] + +MANAGEABLE_PURE_SNAPS = [ + { + 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap1', + 'serial': '8E9C7E588B16C1EA00048CCA', + 'size': 3221225472, + 'created': '2016-08-05T17:26:34Z', + 'source': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder', + }, + { + 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap2', + 'serial': '8E9C7E588B16C1EA00048CCB', + 'size': 4221225472, + 'created': '2016-08-05T17:26:34Z', + 'source': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder', + }, + { + 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap3', + 'serial': '8E9C7E588B16C1EA00048CCD', + 'size': 5221225472, + 'created': '2016-08-05T17:26:34Z', + 'source': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder', + } +] +MANAGEABLE_PURE_SNAP_REFS = [ + { + 'reference': {'name': MANAGEABLE_PURE_SNAPS[0]['name']}, + 'size': 3, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[0]['source']}, + }, + { + 'reference': {'name': MANAGEABLE_PURE_SNAPS[1]['name']}, + 'size': 4, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[1]['source']}, + }, + { + 'reference': {'name': MANAGEABLE_PURE_SNAPS[2]['name']}, + 'size': 5, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[2]['source']}, + } +] class FakePureStorageHTTPError(Exception): @@ -741,15 +844,15 @@ class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase): self.assert_error_propagates([self.array.create_snapshot], self.driver.create_snapshot, SNAPSHOT) - def test_delete_snapshot(self): + @ddt.data("does not exist", "has been destroyed") + def test_delete_snapshot(self, error_text): snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] self.driver.delete_snapshot(SNAPSHOT) expected = [mock.call.destroy_volume(snap_name)] self.array.assert_has_calls(expected) self.assertFalse(self.array.eradicate_volume.called) self.array.destroy_volume.side_effect = ( - self.purestorage_module.PureHTTPError(code=400, text="does not " - "exist")) + self.purestorage_module.PureHTTPError(code=400, text=error_text)) self.driver.delete_snapshot(SNAPSHOT) self.array.destroy_volume.side_effect = None self.assert_error_propagates([self.array.destroy_volume], @@ -1552,6 +1655,136 @@ class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase): self.assertEqual(expected, actual) return context, volume + def _test_get_manageable_things(self, + pure_objs=MANAGEABLE_PURE_VOLS, + expected_refs=MANAGEABLE_PURE_VOL_REFS, + pure_hosts=list(), + cinder_objs=list(), + is_snapshot=False): + self.array.list_volumes.return_value = pure_objs + self.array.list_hosts.return_value = pure_hosts + marker = mock.Mock() + limit = mock.Mock() + offset = mock.Mock() + sort_keys = mock.Mock() + sort_dirs = mock.Mock() + + with mock.patch('cinder.volume.utils.paginate_entries_list') as mpage: + if is_snapshot: + test_func = self.driver.get_manageable_snapshots + else: + test_func = self.driver.get_manageable_volumes + test_func(cinder_objs, marker, limit, offset, sort_keys, sort_dirs) + mpage.assert_called_once_with( + expected_refs, + marker, + limit, + offset, + sort_keys, + sort_dirs + ) + + def test_get_manageable_volumes(self,): + """Default success case. + + Given a list of pure volumes from the REST API, give back a list + of volume references. + """ + self._test_get_manageable_things(pure_hosts=[PURE_HOST]) + + def test_get_manageable_volumes_connected_vol(self): + """Make sure volumes connected to hosts are flagged as unsafe.""" + connected_host = deepcopy(PURE_HOST) + connected_host['name'] = 'host2' + connected_host['vol'] = MANAGEABLE_PURE_VOLS[0]['name'] + pure_hosts = [PURE_HOST, connected_host] + + expected_refs = deepcopy(MANAGEABLE_PURE_VOL_REFS) + expected_refs[0]['safe_to_manage'] = False + expected_refs[0]['reason_not_safe'] = 'Volume connected to host host2.' + + self._test_get_manageable_things(expected_refs=expected_refs, + pure_hosts=pure_hosts) + + def test_get_manageable_volumes_already_managed(self): + """Make sure volumes already owned by cinder are flagged as unsafe.""" + cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock()) + cinder_vol.id = VOLUME_ID + cinders_vols = [cinder_vol] + + # Have one of our vol names match up with the existing cinder volume + purity_vols = deepcopy(MANAGEABLE_PURE_VOLS) + purity_vols[0]['name'] = 'volume-' + VOLUME_ID + '-cinder' + + expected_refs = deepcopy(MANAGEABLE_PURE_VOL_REFS) + expected_refs[0]['reference'] = {'name': purity_vols[0]['name']} + expected_refs[0]['safe_to_manage'] = False + expected_refs[0]['reason_not_safe'] = 'Volume already managed.' + expected_refs[0]['cinder_id'] = VOLUME_ID + + self._test_get_manageable_things(pure_objs=purity_vols, + expected_refs=expected_refs, + pure_hosts=[PURE_HOST], + cinder_objs=cinders_vols) + + def test_get_manageable_volumes_no_pure_volumes(self): + """Expect no refs to be found if no volumes are on Purity.""" + self._test_get_manageable_things(pure_objs=[], + expected_refs=[], + pure_hosts=[PURE_HOST]) + + def test_get_manageable_volumes_no_hosts(self): + """Success case with no hosts on Purity.""" + self._test_get_manageable_things(pure_hosts=[]) + + def test_get_manageable_snapshots(self): + """Default success case. + + Given a list of pure snapshots from the REST API, give back a list + of snapshot references. + """ + self._test_get_manageable_things( + pure_objs=MANAGEABLE_PURE_SNAPS, + expected_refs=MANAGEABLE_PURE_SNAP_REFS, + pure_hosts=[PURE_HOST], + is_snapshot=True + ) + + def test_get_manageable_snapshots_already_managed(self): + """Make sure snaps already owned by cinder are flagged as unsafe.""" + cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock()) + cinder_vol.id = VOLUME_ID + cinder_snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) + cinder_snap.id = SNAPSHOT_ID + cinder_snap.volume = cinder_vol + cinder_snaps = [cinder_snap] + + purity_snaps = deepcopy(MANAGEABLE_PURE_SNAPS) + purity_snaps[0]['name'] = 'volume-%s-cinder.snapshot-%s' % ( + VOLUME_ID, SNAPSHOT_ID + ) + + expected_refs = deepcopy(MANAGEABLE_PURE_SNAP_REFS) + expected_refs[0]['reference'] = {'name': purity_snaps[0]['name']} + expected_refs[0]['safe_to_manage'] = False + expected_refs[0]['reason_not_safe'] = 'Snapshot already managed.' + expected_refs[0]['cinder_id'] = SNAPSHOT_ID + + self._test_get_manageable_things( + pure_objs=purity_snaps, + expected_refs=expected_refs, + cinder_objs=cinder_snaps, + pure_hosts=[PURE_HOST], + is_snapshot=True + ) + + def test_get_manageable_snapshots_no_pure_snapshots(self): + """Expect no refs to be found if no snapshots are on Purity.""" + self._test_get_manageable_things(pure_objs=[], + expected_refs=[], + pure_hosts=[PURE_HOST], + is_snapshot=True) + @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) def test_retype_repl_to_repl(self, mock_is_replicated_type): self._test_retype_repl(mock_is_replicated_type, True, True) @@ -2280,6 +2513,16 @@ class PureFCDriverTestCase(PureDriverTestCase): self.array, FC_CONNECTOR) + def test_get_host_uppercase_wwpn(self): + expected_host = PURE_HOST.copy() + expected_host['wwn'] = [INITIATOR_WWN] + self.array.list_hosts.return_value = [expected_host] + connector = FC_CONNECTOR.copy() + connector['wwpns'] = [wwpn.upper() for wwpn in FC_CONNECTOR['wwpns']] + + actual_result = self.driver._get_host(self.array, connector) + self.assertEqual(expected_host, actual_result) + @mock.patch(FC_DRIVER_OBJ + "._connect") def test_initialize_connection(self, mock_connection): lookup_service = self.driver._lookup_service diff --git a/cinder/tests/unit/test_quobyte.py b/cinder/tests/unit/volume/drivers/test_quobyte.py similarity index 99% rename from cinder/tests/unit/test_quobyte.py rename to cinder/tests/unit/volume/drivers/test_quobyte.py index 04d2ed4b4..e5f08bef6 100644 --- a/cinder/tests/unit/test_quobyte.py +++ b/cinder/tests/unit/volume/drivers/test_quobyte.py @@ -22,7 +22,6 @@ import traceback import mock from oslo_concurrency import processutils as putils -from oslo_config import cfg from oslo_utils import imageutils from oslo_utils import units @@ -36,9 +35,6 @@ from cinder.volume import configuration as conf from cinder.volume.drivers import quobyte -CONF = cfg.CONF - - class FakeDb(object): msg = "Tests are broken: mock this out." diff --git a/cinder/tests/unit/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py similarity index 98% rename from cinder/tests/unit/test_rbd.py rename to cinder/tests/unit/volume/drivers/test_rbd.py index e0676fd81..7857c9451 100644 --- a/cinder/tests/unit/test_rbd.py +++ b/cinder/tests/unit/volume/drivers/test_rbd.py @@ -19,6 +19,7 @@ import ddt import math import os import tempfile +import unittest import mock from oslo_utils import imageutils @@ -184,6 +185,7 @@ class RBDTestCase(test.TestCase): self.assertRaises(exception.InvalidConfigurationValue, self.driver.check_for_setup_error) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_create_volume(self): client = self.mock_client.return_value @@ -202,6 +204,7 @@ class RBDTestCase(test.TestCase): client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_manage_existing_get_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ @@ -217,6 +220,7 @@ class RBDTestCase(test.TestCase): mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_manage_existing_get_non_integer_size(self): rbd_image = self.driver.rbd.Image.return_value @@ -244,6 +248,7 @@ class RBDTestCase(test.TestCase): mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_manage_existing(self): client = self.mock_client.return_value @@ -260,6 +265,7 @@ class RBDTestCase(test.TestCase): exist_volume, self.volume_a.name) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_manage_existing_with_exist_rbd_image(self): client = self.mock_client.return_value @@ -290,6 +296,7 @@ class RBDTestCase(test.TestCase): self.assertTrue( self.driver.rbd.Image.return_value.remove_snap.called) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_delete_volume(self): client = self.mock_client.return_value @@ -327,6 +334,7 @@ class RBDTestCase(test.TestCase): # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] @@ -361,6 +369,7 @@ class RBDTestCase(test.TestCase): # Make sure the exception was raised self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_delete_volume_not_found(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] @@ -570,6 +579,7 @@ class RBDTestCase(test.TestCase): self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_create_cloned_volume_same_size(self): self.cfg.rbd_max_clone_depth = 2 @@ -596,6 +606,7 @@ class RBDTestCase(test.TestCase): self.assertEqual( 0, mock_resize.call_count) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_create_cloned_volume_different_size(self): self.cfg.rbd_max_clone_depth = 2 @@ -623,6 +634,7 @@ class RBDTestCase(test.TestCase): self.assertEqual( 1, mock_resize.call_count) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_create_cloned_volume_w_flatten(self): self.cfg.rbd_max_clone_depth = 1 @@ -659,6 +671,7 @@ class RBDTestCase(test.TestCase): 2, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_create_cloned_volume_w_clone_exception(self): self.cfg.rbd_max_clone_depth = 2 @@ -711,6 +724,7 @@ class RBDTestCase(test.TestCase): self.assertFalse( self.driver._is_cloneable(loc, {'disk_format': 'raw'})) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_cloneable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: @@ -872,6 +886,7 @@ class RBDTestCase(test.TestCase): self.assertDictMatch(expected, actual) self.assertTrue(mock_get_mon_addrs.called) + @unittest.skip("Skip until bug #1578986 is fixed") @ddt.data({'rbd_chunk_size': 1, 'order': 20}, {'rbd_chunk_size': 8, 'order': 23}, {'rbd_chunk_size': 32, 'order': 25}) @@ -946,6 +961,7 @@ class RBDTestCase(test.TestCase): self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) + @unittest.skip("Skip until bug #1578986 is fixed") @common_mocks def test_update_migrated_volume(self): client = self.mock_client.return_value diff --git a/cinder/tests/unit/test_remotefs.py b/cinder/tests/unit/volume/drivers/test_remotefs.py similarity index 98% rename from cinder/tests/unit/test_remotefs.py rename to cinder/tests/unit/volume/drivers/test_remotefs.py index 2bf9947a1..03c403640 100644 --- a/cinder/tests/unit/test_remotefs.py +++ b/cinder/tests/unit/volume/drivers/test_remotefs.py @@ -208,8 +208,11 @@ class RemoteFsSnapDriverTestCase(test.TestCase): self._fake_volume.name, self._fake_snapshot_path) command1 = ['qemu-img', 'create', '-f', 'qcow2', '-o', - 'backing_file=%s' % fake_backing_path, - self._fake_snapshot_path] + 'backing_file=%s,backing_fmt=%s' % + (fake_backing_path, + mock.sentinel.backing_fmt), + self._fake_snapshot_path, + "%dG" % self._fake_volume.size] command2 = ['qemu-img', 'rebase', '-u', '-b', self._fake_volume.name, '-F', mock.sentinel.backing_fmt, diff --git a/cinder/tests/unit/test_san.py b/cinder/tests/unit/volume/drivers/test_san.py similarity index 100% rename from cinder/tests/unit/test_san.py rename to cinder/tests/unit/volume/drivers/test_san.py diff --git a/cinder/tests/unit/test_scality.py b/cinder/tests/unit/volume/drivers/test_scality.py similarity index 100% rename from cinder/tests/unit/test_scality.py rename to cinder/tests/unit/volume/drivers/test_scality.py diff --git a/cinder/tests/unit/test_sheepdog.py b/cinder/tests/unit/volume/drivers/test_sheepdog.py similarity index 96% rename from cinder/tests/unit/test_sheepdog.py rename to cinder/tests/unit/volume/drivers/test_sheepdog.py index fec8a88e6..d2639f24d 100644 --- a/cinder/tests/unit/test_sheepdog.py +++ b/cinder/tests/unit/volume/drivers/test_sheepdog.py @@ -107,6 +107,10 @@ class SheepdogDriverTestDataGenerator(object): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'resize', name, size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT) + def cmd_dog_vdi_list(self, name): + return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'list', name, + '-r', '-a', SHEEP_ADDR, '-p', SHEEP_PORT) + def cmd_dog_node_info(self): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'node', 'info', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-r') @@ -159,6 +163,10 @@ Total 107287605248 3623897354 3% 54760833024 COLLIE_NODE_LIST = """ 0 127.0.0.1:7000 128 1 +""" + + COLLIE_VDI_LIST = """ += testvolume 0 0 0 0 1467037106 fd32fc 3 """ COLLIE_CLUSTER_INFO_0_5 = """\ @@ -1063,6 +1071,33 @@ class SheepdogClientTestCase(test.TestCase): self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) + @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') + def test_get_vdi_info_success(self, fake_execute): + + expected_cmd = ('vdi', 'list', self._vdiname, '-r') + fake_execute.return_value = (self.test_data.COLLIE_VDI_LIST, '') + self.client.get_vdi_info(self._vdiname) + fake_execute.assert_called_once_with(*expected_cmd) + + @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') + @mock.patch.object(sheepdog, 'LOG') + def test_get_vdi_info_unknown_error(self, fake_logger, fake_execute): + cmd = self.test_data.cmd_dog_vdi_list(self._vdiname) + exit_code = 2 + stdout = 'stdout_dummy' + stderr = 'stderr_dummy' + expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, + exit_code=exit_code, + stdout=stdout, + stderr=stderr) + fake_execute.side_effect = exception.SheepdogCmdError( + cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), + stderr=stderr.replace('\n', '\\n')) + ex = self.assertRaises(exception.SheepdogCmdError, + self.client.get_vdi_info, self._vdiname) + self.assertTrue(fake_logger.error.called) + self.assertEqual(expected_msg, ex.msg) + @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_update_node_list_success(self, fake_execute): expected_cmd = ('node', 'list', '-r') @@ -1278,7 +1313,7 @@ class SheepdogDriverTestCase(test.TestCase): image_service = '' patch = mock.patch.object - with patch(self.driver, '_try_execute', return_value=True): + with patch(self.driver, '_is_cloneable', return_value=True): with patch(self.driver, 'create_cloned_volume'): with patch(self.client, 'resize'): model_updated, cloned = self.driver.clone_image( @@ -1307,32 +1342,27 @@ class SheepdogDriverTestCase(test.TestCase): def test_is_cloneable(self): uuid = '87f1b01c-f46c-4537-bd5d-23962f5f4316' - location = 'sheepdog://ip:port:%s' % uuid + location = 'sheepdog://127.0.0.1:7000:%s' % uuid image_meta = {'id': uuid, 'size': 1, 'disk_format': 'raw'} invalid_image_meta = {'id': uuid, 'size': 1, 'disk_format': 'iso'} - with mock.patch.object(self.driver, '_try_execute') as try_execute: + with mock.patch.object(self.client, 'get_vdi_info') as fake_execute: + fake_execute.return_value = self.test_data.COLLIE_VDI_LIST self.assertTrue( self.driver._is_cloneable(location, image_meta)) - expected_cmd = ('collie', 'vdi', 'list', - '--address', 'ip', - '--port', 'port', - uuid) - try_execute.assert_called_once_with(*expected_cmd) - # check returning False without executing a command + # Test for invalid location self.assertFalse( self.driver._is_cloneable('invalid-location', image_meta)) - self.assertFalse( - self.driver._is_cloneable(location, invalid_image_meta)) - self.assertEqual(1, try_execute.call_count) - error = processutils.ProcessExecutionError - with mock.patch.object(self.driver, '_try_execute', - side_effect=error) as fail_try_execute: + # Test for image not exist in sheepdog cluster + fake_execute.return_value = '' self.assertFalse( self.driver._is_cloneable(location, image_meta)) - fail_try_execute.assert_called_once_with(*expected_cmd) + + # Test for invalid image meta + self.assertFalse( + self.driver._is_cloneable(location, invalid_image_meta)) def test_create_volume_from_snapshot(self): dst_volume = self.test_data.TEST_CLONED_VOLUME diff --git a/cinder/tests/unit/test_smbfs.py b/cinder/tests/unit/volume/drivers/test_smbfs.py similarity index 100% rename from cinder/tests/unit/test_smbfs.py rename to cinder/tests/unit/volume/drivers/test_smbfs.py diff --git a/cinder/tests/unit/test_solidfire.py b/cinder/tests/unit/volume/drivers/test_solidfire.py similarity index 96% rename from cinder/tests/unit/test_solidfire.py rename to cinder/tests/unit/volume/drivers/test_solidfire.py index 00ca1fa18..a230a2e0d 100644 --- a/cinder/tests/unit/test_solidfire.py +++ b/cinder/tests/unit/volume/drivers/test_solidfire.py @@ -24,6 +24,7 @@ from cinder import context from cinder import exception from cinder.objects import fields from cinder import test +from cinder.tests.unit.image import fake as fake_image from cinder.volume import configuration as conf from cinder.volume.drivers import solidfire from cinder.volume import qos_specs @@ -79,7 +80,7 @@ class SolidFireVolumeTestCase(test.TestCase): 325355), 'is_public': True, 'owner': 'testprjid'} - self.fake_image_service = 'null' + self.fake_image_service = fake_image.FakeImageService() def fake_init_cluster_pairs(*args, **kwargs): return None @@ -511,10 +512,12 @@ class SolidFireVolumeTestCase(test.TestCase): sfv._get_sfaccount_by_name, 'some-name') def test_delete_volume(self): + vol_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'id': vol_id, + 'name_id': vol_id, 'created_at': timeutils.utcnow(), 'provider_id': '1 5 None', 'multiattach': True @@ -559,10 +562,12 @@ class SolidFireVolumeTestCase(test.TestCase): 'targetSecret': 'shhhh', 'username': 'john-wayne'}] fake_no_volumes = [] + vol_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'id': vol_id, + 'name_id': vol_id, 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) @@ -580,10 +585,12 @@ class SolidFireVolumeTestCase(test.TestCase): 'targetSecret': 'shhhh', 'username': 'john-wayne'}] fake_no_volumes = [] + snap_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' testsnap = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'id': snap_id, + 'name_id': snap_id, 'volume_id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} @@ -960,7 +967,19 @@ class SolidFireVolumeTestCase(test.TestCase): 'fake')) @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - def test_clone_image_authorization(self, _mock_create_template_account): + @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume') + def test_clone_image_authorization(self, + _mock_create_image_volume, + _mock_create_template_account): + fake_sf_vref = { + 'status': 'active', 'volumeID': 1, + 'attributes': { + 'image_info': + {'image_updated_at': '2014-12-17T00:16:23+00:00', + 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'image_name': 'fake-image', + 'image_created_at': '2014-12-17T00:16:23+00:00'}}} + _mock_create_image_volume.return_value = fake_sf_vref _mock_create_template_account.return_value = 1 self.configuration.sf_allow_template_caching = True @@ -968,14 +987,24 @@ class SolidFireVolumeTestCase(test.TestCase): # Make sure if it's NOT public and we're NOT the owner it # doesn't try and cache - _fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', - 'updated_at': datetime.datetime(2013, 9, - 28, 15, - 27, 36, - 325355), - 'properties': {'virtual_size': 1}, - 'is_public': False, - 'owner': 'wrong-owner'} + timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) + _fake_image_meta = { + 'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'visibility': 'private', + 'protected': False, + 'container_format': 'raw', + 'disk_format': 'raw', + 'owner': 'wrong-owner', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64'}} + with mock.patch.object(sfv, '_do_clone_volume', return_value=('fe', 'fi', 'fo')): self.assertEqual((None, False), @@ -983,32 +1012,39 @@ class SolidFireVolumeTestCase(test.TestCase): self.mock_volume, 'fake', _fake_image_meta, - 'fake')) + self.fake_image_service)) # And is_public False, but the correct owner does work _fake_image_meta['owner'] = 'testprjid' - self.assertEqual(('fo', True), sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - 'fake')) + self.assertEqual( + ('fo', True), + sfv.clone_image( + self.ctxt, + self.mock_volume, + 'fake', + _fake_image_meta, + self.fake_image_service)) # And is_public True, even if not the correct owner _fake_image_meta['is_public'] = True _fake_image_meta['owner'] = 'wrong-owner' - self.assertEqual(('fo', True), sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - 'fake')) + self.assertEqual( + ('fo', True), + sfv.clone_image(self.ctxt, + self.mock_volume, + 'fake', + _fake_image_meta, + self.fake_image_service)) # And using the new V2 visibility tag _fake_image_meta['visibility'] = 'public' _fake_image_meta['owner'] = 'wrong-owner' - self.assertEqual(('fo', True), sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - 'fake')) + self.assertEqual( + ('fo', True), + sfv.clone_image(self.ctxt, + self.mock_volume, + 'fake', + _fake_image_meta, + self.fake_image_service)) def test_create_template_no_account(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) diff --git a/cinder/tests/unit/test_tegile.py b/cinder/tests/unit/volume/drivers/test_tegile.py similarity index 100% rename from cinder/tests/unit/test_tegile.py rename to cinder/tests/unit/volume/drivers/test_tegile.py diff --git a/cinder/tests/unit/test_tintri.py b/cinder/tests/unit/volume/drivers/test_tintri.py similarity index 98% rename from cinder/tests/unit/test_tintri.py rename to cinder/tests/unit/volume/drivers/test_tintri.py index 8509e00bb..0de9757d6 100644 --- a/cinder/tests/unit/test_tintri.py +++ b/cinder/tests/unit/volume/drivers/test_tintri.py @@ -15,6 +15,7 @@ Volume driver test for Tintri storage. """ +import ddt import mock from oslo_utils import units @@ -40,6 +41,7 @@ class FakeImage(object): return self.__dict__[key] +@ddt.ddt class TintriDriverTestCase(test.TestCase): def setUp(self): super(TintriDriverTestCase, self).setUp() @@ -250,14 +252,16 @@ class TintriDriverTestCase(test.TestCase): self._driver.manage_existing, volume, existing) - def test_manage_existing_get_size(self): + @ddt.data((123, 123), (123.5, 124)) + @ddt.unpack + def test_manage_existing_get_size(self, st_size, exp_size): volume = fake_volume.fake_volume_obj(self.context) existing = {'source-name': self._provider_location + '/' + volume.name} - file = mock.Mock(st_size=123 * units.Gi) + file = mock.Mock(st_size=int(st_size * units.Gi)) with mock.patch('os.path.isfile', return_value=True): with mock.patch('os.stat', return_value=file): - self.assertEqual(float(file.st_size / units.Gi), + self.assertEqual(exp_size, self._driver.manage_existing_get_size( volume, existing)) diff --git a/cinder/tests/unit/test_vzstorage.py b/cinder/tests/unit/volume/drivers/test_vzstorage.py similarity index 95% rename from cinder/tests/unit/test_vzstorage.py rename to cinder/tests/unit/volume/drivers/test_vzstorage.py index b5962ebea..f62cb4b69 100644 --- a/cinder/tests/unit/test_vzstorage.py +++ b/cinder/tests/unit/volume/drivers/test_vzstorage.py @@ -216,13 +216,14 @@ class VZStorageTestCase(test.TestCase): drv._check_extend_volume_support = mock.Mock(return_value=True) drv._is_file_size_equal = mock.Mock(return_value=True) - snap_info = """{"volume_format": "raw", - "active": "%s"}""" % self.vol.id - with mock.patch.object(drv, 'local_path', - return_value=self._FAKE_VOLUME_PATH): - with mock.patch.object(drv, '_read_file', - return_value=snap_info): - drv.extend_volume(self.vol, 10) + snap_info = '{"active": "%s"}' % self.vol.id + with mock.patch.object(drv, 'get_volume_format', + return_value="raw"): + with mock.patch.object(drv, 'local_path', + return_value=self._FAKE_VOLUME_PATH): + with mock.patch.object(drv, '_read_file', + return_value=snap_info): + drv.extend_volume(self.vol, 10) mock_resize_image.assert_called_once_with(self._FAKE_VOLUME_PATH, 10) @@ -263,7 +264,10 @@ class VZStorageTestCase(test.TestCase): def test_copy_volume_from_snapshot(self, mock_convert_image): drv = self._vz_driver - fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'} + fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name', + 'backing-files': + {self._FAKE_SNAPSHOT_ID: + self._FAKE_VOLUME_NAME}} fake_img_info = mock.MagicMock() fake_img_info.backing_file = self._FAKE_VOLUME_NAME diff --git a/cinder/tests/unit/test_xio.py b/cinder/tests/unit/volume/drivers/test_xio.py similarity index 100% rename from cinder/tests/unit/test_xio.py rename to cinder/tests/unit/volume/drivers/test_xio.py diff --git a/cinder/tests/unit/test_zfssa.py b/cinder/tests/unit/volume/drivers/test_zfssa.py similarity index 100% rename from cinder/tests/unit/test_zfssa.py rename to cinder/tests/unit/volume/drivers/test_zfssa.py diff --git a/cinder/tests/unit/volume/drivers/violin/__init__.py b/cinder/tests/unit/volume/drivers/violin/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/fake_vmem_client.py b/cinder/tests/unit/volume/drivers/violin/fake_vmem_client.py similarity index 100% rename from cinder/tests/unit/fake_vmem_client.py rename to cinder/tests/unit/volume/drivers/violin/fake_vmem_client.py diff --git a/cinder/tests/unit/test_v7000_common.py b/cinder/tests/unit/volume/drivers/violin/test_v7000_common.py similarity index 99% rename from cinder/tests/unit/test_v7000_common.py rename to cinder/tests/unit/volume/drivers/violin/test_v7000_common.py index 57f99009e..7cc723ee1 100644 --- a/cinder/tests/unit/test_v7000_common.py +++ b/cinder/tests/unit/volume/drivers/violin/test_v7000_common.py @@ -27,7 +27,8 @@ from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_vmem_client as vmemclient +from cinder.tests.unit.volume.drivers.violin \ + import fake_vmem_client as vmemclient from cinder.volume import configuration as conf from cinder.volume.drivers.violin import v7000_common from cinder.volume import volume_types @@ -1152,7 +1153,7 @@ class V7000CommonTestCase(test.TestCase): result = self.driver._delete_lun_snapshot(SNAPSHOT) - self.assertIsNone(result) + self.assertTrue(result) def test_delete_lun_snapshot_with_retry(self): response = [ @@ -1172,7 +1173,7 @@ class V7000CommonTestCase(test.TestCase): result = self.driver._delete_lun_snapshot(SNAPSHOT) - self.assertIsNone(result) + self.assertTrue(result) self.assertEqual( len(response), self.driver.vmem_mg.snapshot.delete_lun_snapshot.call_count) diff --git a/cinder/tests/unit/test_v7000_fcp.py b/cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py similarity index 99% rename from cinder/tests/unit/test_v7000_fcp.py rename to cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py index 00d7d1150..8700887ba 100644 --- a/cinder/tests/unit/test_v7000_fcp.py +++ b/cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py @@ -21,7 +21,8 @@ import mock from cinder import exception from cinder import test -from cinder.tests.unit import fake_vmem_client as vmemclient +from cinder.tests.unit.volume.drivers.violin \ + import fake_vmem_client as vmemclient from cinder.volume import configuration as conf from cinder.volume.drivers.violin import v7000_common from cinder.volume.drivers.violin import v7000_fcp diff --git a/cinder/tests/unit/volume/drivers/vmware/__init__.py b/cinder/tests/unit/volume/drivers/vmware/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/tests/unit/test_vmware_datastore.py b/cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py similarity index 97% rename from cinder/tests/unit/test_vmware_datastore.py rename to cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py index 8d78a88f3..9ae89d9e0 100644 --- a/cinder/tests/unit/test_vmware_datastore.py +++ b/cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py @@ -341,19 +341,21 @@ class DatastoreTest(test.TestCase): get_profile_id_by_name.reset_mock() profile_id = mock.sentinel.profile_id get_profile_id_by_name.return_value = profile_id - filter_by_profile.return_value = [] + filter_by_profile.return_value = {} self.assertFalse(self._ds_sel.is_datastore_compliant(datastore, profile_name)) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) - filter_by_profile.assert_called_once_with([datastore], profile_id) + filter_by_profile.assert_called_once_with({datastore: None}, + profile_id) # Test with valid profile and compliant datastore. get_profile_id_by_name.reset_mock() filter_by_profile.reset_mock() - filter_by_profile.return_value = [datastore] + filter_by_profile.return_value = {datastore: None} self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, profile_name)) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) - filter_by_profile.assert_called_once_with([datastore], profile_id) + filter_by_profile.assert_called_once_with({datastore: None}, + profile_id) diff --git a/cinder/tests/unit/test_vmware_vmdk.py b/cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py similarity index 91% rename from cinder/tests/unit/test_vmware_vmdk.py rename to cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py index 472a2e3ca..7712adfcf 100644 --- a/cinder/tests/unit/test_vmware_vmdk.py +++ b/cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py @@ -27,8 +27,10 @@ from oslo_vmware import exceptions from oslo_vmware import image_transfer import six +from cinder import context from cinder import exception as cinder_exceptions from cinder import test +from cinder.tests.unit import fake_volume from cinder.volume import configuration from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions @@ -111,6 +113,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): create_session=False) self._volumeops = volumeops.VMwareVolumeOps(self._session, self.MAX_OBJECTS) + self._context = context.get_admin_context() def test_get_volume_stats(self): stats = self._driver.get_volume_stats() @@ -140,6 +143,19 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): 'project_id': project_id, } + def _create_volume_obj(self, + vol_id=VOL_ID, + display_name=DISPLAY_NAME, + volume_type_id=VOL_TYPE_ID, + status='available', + size=VOL_SIZE, + attachment=None, + project_id=PROJECT_ID): + vol = self._create_volume_dict( + vol_id, display_name, volume_type_id, status, size, attachment, + project_id) + return fake_volume.fake_volume_obj(self._context, **vol) + @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') def test_verify_volume_creation(self, select_ds_for_volume): volume = self._create_volume_dict() @@ -872,8 +888,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], - image_version=1, - is_public=image_meta['is_public']) + image_version=1) def test_copy_volume_to_image(self): self._test_copy_volume_to_image() @@ -924,9 +939,11 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') + @mock.patch.object(VMDK_DRIVER, '_select_datastore') def test_retype_with_diff_profile_and_ds_compliance( - self, ds_sel, get_extra_spec_storage_profile, get_storage_profile, - get_extra_spec_disk_type, get_disk_type, vops, in_use): + self, select_datastore, ds_sel, get_extra_spec_storage_profile, + get_storage_profile, get_extra_spec_disk_type, get_disk_type, + vops, in_use): backing = mock.sentinel.backing vops.get_backing.return_value = backing @@ -957,7 +974,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) - self.assertFalse(ds_sel.select_datastore.called) + select_datastore.assert_not_called() vops.change_backing_profile.assert_called_once_with(backing, new_profile_id) @@ -970,9 +987,11 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') + @mock.patch.object(VMDK_DRIVER, '_select_datastore') def test_retype_with_diff_profile_and_ds_sel_no_candidate( - self, ds_sel, get_extra_spec_storage_profile, get_storage_profile, - get_extra_spec_disk_type, get_disk_type, vops, in_use): + self, select_datastore, ds_sel, get_extra_spec_storage_profile, + get_storage_profile, get_extra_spec_disk_type, get_disk_type, + vops, in_use): backing = mock.sentinel.backing vops.get_backing.return_value = backing @@ -992,7 +1011,8 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): get_extra_spec_storage_profile.return_value = new_profile ds_sel.is_datastore_compliant.return_value = False - ds_sel.select_datastore.return_value = () + select_datastore.side_effect = ( + vmdk_exceptions.NoValidDatastoreException) context = mock.sentinel.context volume = self._create_volume_dict(status='retyping') @@ -1003,7 +1023,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) - ds_sel.select_datastore.assert_called_once_with( + select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: new_profile}) @@ -1016,10 +1036,12 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') + @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') def test_retype_with_diff_extra_spec_and_vol_snapshot( self, get_volume_group_folder, + select_datastore, ds_sel, get_extra_spec_storage_profile, get_storage_profile, get_extra_spec_disk_type, @@ -1049,7 +1071,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): rp = mock.sentinel.rp new_datastore = mock.Mock(value='ds2') summary = mock.Mock(datastore=new_datastore) - ds_sel.select_datastore.return_value = (host, rp, summary) + select_datastore.return_value = (host, rp, summary) folder = mock.sentinel.folder get_volume_group_folder.return_value = folder @@ -1066,7 +1088,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) - ds_sel.select_datastore.assert_called_once_with( + select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: ['ds1'], hub.DatastoreSelector.PROFILE_NAME: new_profile}) @@ -1085,6 +1107,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') + @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') @@ -1093,7 +1116,9 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): delete_temp_backing, generate_uuid, get_volume_group_folder, - ds_sel, get_extra_spec_storage_profile, + select_datastore, + ds_sel, + get_extra_spec_storage_profile, get_storage_profile, get_extra_spec_disk_type, get_disk_type, @@ -1122,7 +1147,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): host = mock.sentinel.host rp = mock.sentinel.rp summary = mock.Mock(datastore=datastore) - ds_sel.select_datastore.return_value = (host, rp, summary) + select_datastore.return_value = (host, rp, summary) folder = mock.sentinel.folder get_volume_group_folder.return_value = folder @@ -1152,7 +1177,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): diff, host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) - ds_sel.select_datastore.assert_called_once_with( + select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: new_profile}) vops.clone_backing.assert_called_once_with( @@ -1496,17 +1521,27 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): context, name, volume, tmp_file_path, file_size_bytes) delete_temp_backing.assert_called_once_with(backing) - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'session', new_callable=mock.PropertyMock) - def test_get_vc_version(self, session): - # test config overrides fetching from vCenter server - version = self._driver._get_vc_version() - self.assertEqual(ver.LooseVersion(self.DEFAULT_VC_VERSION), version) - # explicitly remove config entry + @mock.patch.object(VMDK_DRIVER, 'session') + @mock.patch('oslo_vmware.vim_util.get_vc_version') + def test_get_vc_version(self, get_vc_version, session): self._driver.configuration.vmware_host_version = None - session.return_value.vim.service_content.about.version = '6.0.1' + + version_str = '6.0.0' + get_vc_version.return_value = version_str + version = self._driver._get_vc_version() - self.assertEqual(ver.LooseVersion('6.0.1'), version) + + self.assertEqual(ver.LooseVersion(version_str), version) + get_vc_version.assert_called_once_with(session) + + @mock.patch('oslo_vmware.vim_util.get_vc_version') + def test_get_vc_version_override(self, get_vc_version): + version = self._driver._get_vc_version() + + self.assertEqual( + ver.LooseVersion(self._driver.configuration.vmware_host_version), + version) + get_vc_version.assert_not_called() @mock.patch('cinder.volume.drivers.vmware.vmdk.LOG') @ddt.data('5.1', '5.5') @@ -1527,268 +1562,183 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): self._driver._validate_vcenter_version, vc_version) - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_validate_vcenter_version') + @mock.patch.object(VMDK_DRIVER, '_validate_params') + @mock.patch.object(VMDK_DRIVER, '_get_vc_version') + @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version') + @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_vc_version') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'session', new_callable=mock.PropertyMock) - def test_do_setup_with_pbm_disabled(self, session, get_vc_version, - vops_cls, validate_vc_version): - session_obj = mock.Mock(name='session') - session.return_value = session_obj - vc_version = ver.LooseVersion('5.0') + @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'session') + def _test_do_setup( + self, session, vops, ds_sel_cls, vops_cls, get_pbm_wsdl_loc, + validate_vc_version, get_vc_version, validate_params, + enable_pbm=True): + if enable_pbm: + ver_str = '5.5' + pbm_wsdl = mock.sentinel.pbm_wsdl + get_pbm_wsdl_loc.return_value = pbm_wsdl + else: + ver_str = '5.1' + vc_version = ver.LooseVersion(ver_str) get_vc_version.return_value = vc_version - cluster_refs = mock.Mock() - cluster_refs.values.return_value = mock.sentinel.cluster_refs - vops = mock.Mock() + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + cluster_refs = {'cls-1': cls_1, 'cls-2': cls_2} vops.get_cluster_refs.return_value = cluster_refs - def vops_side_effect(session, max_objects): - vops._session = session - vops._max_objects = max_objects - return vops - - vops_cls.side_effect = vops_side_effect - self._driver.do_setup(mock.ANY) - validate_vc_version.assert_called_once_with(vc_version) - self.assertFalse(self._driver._storage_policy_enabled) + validate_params.assert_called_once_with() get_vc_version.assert_called_once_with() - self.assertEqual(session_obj, self._driver.volumeops._session) - self.assertEqual(session_obj, self._driver.ds_sel._session) - self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters) - vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS) + validate_vc_version.assert_called_once_with(vc_version) + if enable_pbm: + get_pbm_wsdl_loc.assert_called_once_with(ver_str) + self.assertEqual(pbm_wsdl, self._driver.pbm_wsdl) + self.assertEqual(enable_pbm, self._driver._storage_policy_enabled) + vops_cls.assert_called_once_with( + session, self._driver.configuration.vmware_max_objects_retrieval) + self.assertEqual(vops_cls.return_value, self._driver._volumeops) + ds_sel_cls.assert_called_once_with( + vops, + session, + self._driver.configuration.vmware_max_objects_retrieval) + self.assertEqual(ds_sel_cls.return_value, self._driver._ds_sel) + vops.get_cluster_refs.assert_called_once_with( + self._driver.configuration.vmware_cluster_name) + self.assertEqual(list(cluster_refs.values()), + list(self._driver._clusters)) - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_validate_vcenter_version') + def test_do_setup(self): + self._test_do_setup() + + def test_do_setup_with_pbm_disabled(self): + self._test_do_setup(enable_pbm=False) + + @mock.patch.object(VMDK_DRIVER, '_validate_params') + @mock.patch.object(VMDK_DRIVER, '_get_vc_version') + @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version') @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_vc_version') - def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version, - get_pbm_wsdl_location, - validate_vc_version): - vc_version = ver.LooseVersion('5.5') + def test_do_setup_with_invalid_pbm_wsdl( + self, get_pbm_wsdl_loc, validate_vc_version, get_vc_version, + validate_params): + ver_str = '5.5' + vc_version = ver.LooseVersion(ver_str) get_vc_version.return_value = vc_version - get_pbm_wsdl_location.return_value = None + + get_pbm_wsdl_loc.return_value = None self.assertRaises(exceptions.VMwareDriverException, self._driver.do_setup, mock.ANY) - validate_vc_version.assert_called_once_with(vc_version) - self.assertFalse(self._driver._storage_policy_enabled) + validate_params.assert_called_once_with() get_vc_version.assert_called_once_with() - get_pbm_wsdl_location.assert_called_once_with( - six.text_type(vc_version)) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_validate_vcenter_version') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') - @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_vc_version') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'session', new_callable=mock.PropertyMock) - def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location, - vops_cls, validate_vc_version): - session_obj = mock.Mock(name='session') - session.return_value = session_obj - - vc_version = ver.LooseVersion('5.5') - get_vc_version.return_value = vc_version - get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl' - - cluster_refs = mock.Mock() - cluster_refs.values.return_value = mock.sentinel.cluster_refs - vops = mock.Mock() - vops.get_cluster_refs.return_value = cluster_refs - - def vops_side_effect(session, max_objects): - vops._session = session - vops._max_objects = max_objects - return vops - - vops_cls.side_effect = vops_side_effect - - self._driver.do_setup(mock.ANY) - validate_vc_version.assert_called_once_with(vc_version) - self.assertTrue(self._driver._storage_policy_enabled) - get_vc_version.assert_called_once_with() - get_pbm_wsdl_location.assert_called_once_with( - six.text_type(vc_version)) - self.assertEqual(session_obj, self._driver.volumeops._session) - self.assertEqual(session_obj, self._driver.ds_sel._session) - self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters) - vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS) + get_pbm_wsdl_loc.assert_called_once_with(ver_str) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') + @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - def test_select_ds_for_volume(self, get_volume_group_folder, vops, ds_sel, - get_storage_profile): + @ddt.data(None, {vmdk.CREATE_PARAM_DISK_SIZE: 2 * VOL_SIZE}) + def test_select_ds_for_volume( + self, create_params, get_volume_group_folder, vops, + select_datastore, get_storage_profile): profile = mock.sentinel.profile get_storage_profile.return_value = profile - host_ref = mock.sentinel.host_ref + host = mock.sentinel.host rp = mock.sentinel.rp summary = mock.sentinel.summary - ds_sel.select_datastore.return_value = (host_ref, rp, summary) + select_datastore.return_value = (host, rp, summary) dc = mock.sentinel.dc vops.get_dc.return_value = dc + folder = mock.sentinel.folder get_volume_group_folder.return_value = folder - host = mock.sentinel.host - project_id = '63c19a12292549818c09946a5e59ddaf' - vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1, - 'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0', - 'project_id': project_id} - ret = self._driver._select_ds_for_volume(vol, host) + vol = self._create_volume_dict() + ret = self._driver._select_ds_for_volume( + vol, host=host, create_params=create_params) - self.assertEqual((host_ref, rp, folder, summary), ret) - exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi, + self.assertEqual((host, rp, folder, summary), ret) + if create_params: + exp_size = create_params[vmdk.CREATE_PARAM_DISK_SIZE] * units.Gi + else: + exp_size = vol['size'] * units.Gi + exp_req = {hub.DatastoreSelector.SIZE_BYTES: exp_size, hub.DatastoreSelector.PROFILE_NAME: profile} - ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=[host]) + select_datastore.assert_called_once_with(exp_req, host) vops.get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with(dc, project_id) - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - def test_select_ds_for_volume_with_no_host( - self, get_volume_group_folder, vops, ds_sel, get_storage_profile): - - profile = mock.sentinel.profile - get_storage_profile.return_value = profile - - host_ref = mock.sentinel.host_ref - rp = mock.sentinel.rp - summary = mock.sentinel.summary - ds_sel.select_datastore.return_value = (host_ref, rp, summary) - - dc = mock.sentinel.dc - vops.get_dc.return_value = dc - folder = mock.sentinel.folder - get_volume_group_folder.return_value = folder - - project_id = '63c19a12292549818c09946a5e59ddaf' - vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1, - 'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0', - 'project_id': project_id} - ret = self._driver._select_ds_for_volume(vol) - - self.assertEqual((host_ref, rp, folder, summary), ret) - exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi, - hub.DatastoreSelector.PROFILE_NAME: profile} - ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None) - vops.get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with(dc, project_id) - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_select_ds_for_volume_with_no_best_candidate( - self, ds_sel, get_storage_profile): - - profile = mock.sentinel.profile - get_storage_profile.return_value = profile - - ds_sel.select_datastore.return_value = () - - vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1, - 'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0'} - self.assertRaises(vmdk_exceptions.NoValidDatastoreException, - self._driver._select_ds_for_volume, vol) - - exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi, - hub.DatastoreSelector.PROFILE_NAME: profile} - ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None) + get_volume_group_folder.assert_called_once_with(dc, vol['project_id']) @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_relocate_backing') - def test_initialize_connection_with_instance_and_backing( - self, relocate_backing, vops): - - instance = mock.sentinel.instance - connector = {'instance': instance} - - backing = mock.Mock(value=mock.sentinel.backing_value) - vops.get_backing.return_value = backing - - host = mock.sentinel.host - vops.get_host.return_value = host - - volume = {'name': 'vol-1', 'id': 1} - conn_info = self._driver.initialize_connection(volume, connector) - - relocate_backing.assert_called_once_with(volume, backing, host) - - self.assertEqual('vmdk', conn_info['driver_volume_type']) - self.assertEqual(backing.value, conn_info['data']['volume']) - self.assertEqual(volume['id'], - conn_info['data']['volume_id']) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_relocate_backing') + @mock.patch('oslo_vmware.vim_util.get_moref') @mock.patch.object(VMDK_DRIVER, '_create_backing') - def test_initialize_connection_with_instance_and_no_backing( - self, create_backing, relocate_backing, vops): - - instance = mock.sentinel.instance - connector = {'instance': instance} - - vops.get_backing.return_value = None - - host = mock.sentinel.host - vops.get_host.return_value = host - - backing = mock.Mock(value=mock.sentinel.backing_value) - create_backing.return_value = backing - - volume = {'name': 'vol-1', 'id': 1} - conn_info = self._driver.initialize_connection(volume, connector) - - create_backing.assert_called_once_with(volume, host) - self.assertFalse(relocate_backing.called) - - self.assertEqual('vmdk', conn_info['driver_volume_type']) - self.assertEqual(backing.value, conn_info['data']['volume']) - self.assertEqual(volume['id'], - conn_info['data']['volume_id']) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_relocate_backing') - @mock.patch.object(VMDK_DRIVER, '_create_backing') - def test_initialize_connection_with_no_instance_and_no_backing( - self, create_backing, relocate_backing, vops): + def _test_initialize_connection( + self, relocate_backing, create_backing, get_moref, vops, + backing_exists=True, instance_exists=True): - vops.get_backing.return_value = None + backing_val = mock.sentinel.backing_val + backing = mock.Mock(value=backing_val) + if backing_exists: + vops.get_backing.return_value = backing + else: + vops.get_backing.return_value = None + create_backing.return_value = backing - host = mock.sentinel.host - vops.get_host.return_value = host + if instance_exists: + instance_val = mock.sentinel.instance_val + connector = {'instance': instance_val} - backing = mock.Mock(value=mock.sentinel.backing_value) - create_backing.return_value = backing + instance_moref = mock.sentinel.instance_moref + get_moref.return_value = instance_moref - connector = {} - volume = {'name': 'vol-1', 'id': 1} + host = mock.sentinel.host + vops.get_host.return_value = host + else: + connector = {} + + volume = self._create_volume_obj() conn_info = self._driver.initialize_connection(volume, connector) - create_backing.assert_called_once_with(volume) - self.assertFalse(relocate_backing.called) - self.assertEqual('vmdk', conn_info['driver_volume_type']) - self.assertEqual(backing.value, conn_info['data']['volume']) - self.assertEqual(volume['id'], - conn_info['data']['volume_id']) + self.assertEqual(backing_val, conn_info['data']['volume']) + self.assertEqual(volume.id, conn_info['data']['volume_id']) + self.assertEqual(volume.name, conn_info['data']['name']) + + if instance_exists: + vops.get_host.assert_called_once_with(instance_moref) + if backing_exists: + relocate_backing.assert_called_once_with(volume, backing, host) + create_backing.assert_not_called() + else: + create_backing.assert_called_once_with(volume, host) + relocate_backing.assert_not_called() + elif not backing_exists: + create_backing.assert_called_once_with(volume) + relocate_backing.assert_not_called() + else: + create_backing.assert_not_called() + relocate_backing.assert_not_called() + + def test_initialize_connection_with_instance_and_backing(self): + self._test_initialize_connection() + + def test_initialize_connection_with_instance_and_no_backing(self): + self._test_initialize_connection(backing_exists=False) + + def test_initialize_connection_with_no_instance_and_no_backing(self): + self._test_initialize_connection( + backing_exists=False, instance_exists=False) + + def test_initialize_connection_with_no_instance_and_backing(self): + self._test_initialize_connection(instance_exists=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_volume_group_folder(self, vops): diff --git a/cinder/tests/unit/test_vmware_volumeops.py b/cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py similarity index 100% rename from cinder/tests/unit/test_vmware_volumeops.py rename to cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py index 79bb91b45..ba4556f66 100644 --- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py +++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py @@ -17,6 +17,7 @@ import ddt import mock +from castellan.tests.unit.key_manager import mock_key_manager from oslo_utils import imageutils from cinder import context @@ -27,7 +28,6 @@ from cinder.tests.unit import fake_constants as fakes from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit.keymgr import mock_key_mgr from cinder.tests.unit import utils from cinder.tests.unit.volume.flows import fake_volume_api from cinder.volume.flows.api import create_volume @@ -70,7 +70,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'image_id': None, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None} + 'cgsnapshot_id': None, + 'group_id': None, } # Fake objects assert specs task = create_volume.VolumeCastTask( @@ -87,7 +88,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'image_id': 4, 'source_replicaid': 5, 'consistencygroup_id': 5, - 'cgsnapshot_id': None} + 'cgsnapshot_id': None, + 'group_id': None, } # Fake objects assert specs task = create_volume.VolumeCastTask( @@ -103,8 +105,14 @@ class CreateVolumeFlowTestCase(test.TestCase): @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') + @mock.patch('cinder.volume.flows.api.create_volume.' + 'ExtractVolumeRequestTask.' + '_get_encryption_key_id') + @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_extract_volume_request_from_image_encrypted( self, + fake_get_qos, + fake_get_encryption_key, fake_get_volume_type_id, fake_is_encrypted): @@ -115,27 +123,29 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = True - self.assertRaises(exception.InvalidInput, - task.execute, - self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=None, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None) + fake_get_volume_type_id.return_value = fakes.VOLUME_TYPE_ID + task.execute(self.ctxt, + size=1, + snapshot=None, + image_id=image_id, + source_volume=None, + availability_zone='nova', + volume_type=None, + metadata=None, + key_manager=fake_key_manager, + source_replica=None, + consistencygroup=None, + cgsnapshot=None, + group=None) + fake_get_encryption_key.assert_called_once_with( + fake_key_manager, self.ctxt, fakes.VOLUME_TYPE_ID, None, None) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @@ -155,7 +165,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( @@ -176,7 +186,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, @@ -187,7 +198,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None, } + 'cgsnapshot_id': None, + 'group_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @@ -207,7 +219,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( @@ -230,7 +242,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @@ -252,7 +265,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( @@ -273,7 +286,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, @@ -284,7 +298,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None, } + 'cgsnapshot_id': None, + 'group_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @@ -305,7 +320,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( @@ -327,7 +342,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, @@ -338,7 +354,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'qos_specs': {'fake_key': 'fake'}, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None, } + 'cgsnapshot_id': None, + 'group_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @@ -366,7 +383,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['properties'] = {} image_meta['properties']['cinder_img_volume_type'] = image_volume_type fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, @@ -388,7 +405,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, @@ -399,7 +417,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None, } + 'cgsnapshot_id': None, + 'group_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.db.volume_type_get_by_name') @@ -427,7 +446,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['properties'] = {} image_meta['properties']['cinder_img_volume_type'] = image_volume_type fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, @@ -450,7 +469,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, @@ -461,7 +481,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None, } + 'cgsnapshot_id': None, + 'group_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.db.volume_type_get_by_name') @@ -490,7 +511,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['size'] = 1 image_meta['properties'] = fake_img_properties fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, @@ -511,7 +532,8 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, @@ -522,7 +544,8 @@ class CreateVolumeFlowTestCase(test.TestCase): 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, - 'cgsnapshot_id': None, } + 'cgsnapshot_id': None, + 'group_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.db.volume_type_get_by_name') @@ -546,7 +569,7 @@ class CreateVolumeFlowTestCase(test.TestCase): image_meta['id'] = image_id image_meta['status'] = 'inactive' fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_mgr.MockKeyManager() + fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, @@ -570,9 +593,11 @@ class CreateVolumeFlowTestCase(test.TestCase): key_manager=fake_key_manager, source_replica=None, consistencygroup=None, - cgsnapshot=None) + cgsnapshot=None, + group=None) +@ddt.ddt class CreateVolumeFlowManagerTestCase(test.TestCase): def setUp(self): @@ -601,7 +626,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase): snapshot_obj.id) fake_driver.create_volume_from_snapshot.assert_called_once_with( volume_obj, snapshot_obj) - handle_bootable.assert_called_once_with(self.ctxt, volume_obj.id, + handle_bootable.assert_called_once_with(self.ctxt, volume_obj, snapshot_id=snapshot_obj.id) @mock.patch('cinder.objects.Snapshot.get_by_id') @@ -622,6 +647,71 @@ class CreateVolumeFlowManagerTestCase(test.TestCase): fake_driver.create_volume_from_snapshot.assert_called_once_with( volume, snapshot_obj) + @mock.patch('cinder.volume.flows.manager.create_volume.' + 'CreateVolumeFromSpecTask.' + '_handle_bootable_volume_glance_meta') + @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') + @mock.patch('cinder.image.image_utils.qemu_img_info') + @mock.patch('cinder.image.image_utils.check_virtual_size') + def test_create_encrypted_volume_from_image(self, + mock_check_size, + mock_qemu_img, + mock_fetch_img, + mock_handle_bootable): + fake_db = mock.MagicMock() + fake_driver = mock.MagicMock() + fake_volume_manager = mock.MagicMock() + fake_manager = create_volume_manager.CreateVolumeFromSpecTask( + fake_volume_manager, fake_db, fake_driver) + volume = fake_volume.fake_volume_obj( + self.ctxt, + encryption_key_id=fakes.ENCRYPTION_KEY_ID) + + fake_image_service = fake_image.FakeImageService() + image_meta = {} + image_id = fakes.IMAGE_ID + image_meta['id'] = image_id + image_meta['status'] = 'active' + image_meta['size'] = 1 + image_location = 'abc' + + fake_db.volume_update.return_value = volume + fake_manager._create_from_image(self.ctxt, volume, + image_location, image_id, + image_meta, fake_image_service) + + fake_driver.create_volume.assert_called_once_with(volume) + fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( + self.ctxt, volume, fake_image_service, image_id) + mock_handle_bootable.assert_called_once_with(self.ctxt, volume, + image_id=image_id, + image_meta=image_meta) + + @ddt.data(True, False) + def test__copy_image_to_volume(self, is_encrypted): + fake_db = mock.MagicMock() + fake_driver = mock.MagicMock() + fake_volume_manager = mock.MagicMock() + fake_manager = create_volume_manager.CreateVolumeFromSpecTask( + fake_volume_manager, fake_db, fake_driver) + key = fakes.ENCRYPTION_KEY_ID if is_encrypted else None + volume = fake_volume.fake_volume_obj( + self.ctxt, + encryption_key_id=key) + + fake_image_service = fake_image.FakeImageService() + image_id = fakes.IMAGE_ID + image_location = 'abc' + + fake_manager._copy_image_to_volume(self.ctxt, volume, image_id, + image_location, fake_image_service) + if is_encrypted: + fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( + self.ctxt, volume, fake_image_service, image_id) + else: + fake_driver.copy_image_to_volume.assert_called_once_with( + self.ctxt, volume, fake_image_service, image_id) + class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase): @@ -675,7 +765,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase): if format is 'raw' and not owner and location: fake_driver.create_cloned_volume.assert_called_once_with( volume, image_volume) - handle_bootable.assert_called_once_with(self.ctxt, volume['id'], + handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) else: @@ -752,7 +842,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): mock_handle_bootable.assert_called_once_with( self.ctxt, - volume['id'], + volume, image_id=image_id, image_meta=image_meta ) @@ -812,7 +902,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): mock_handle_bootable.assert_called_once_with( self.ctxt, - volume['id'], + volume, image_id=image_id, image_meta=image_meta ) @@ -886,7 +976,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): mock_handle_bootable.assert_called_once_with( self.ctxt, - volume['id'], + volume, image_id=image_id, image_meta=image_meta ) @@ -953,7 +1043,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): mock_handle_bootable.assert_called_once_with( self.ctxt, - volume['id'], + volume, image_id=image_id, image_meta=image_meta ) @@ -1079,7 +1169,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): mock_handle_bootable.assert_called_once_with( self.ctxt, - volume['id'], + volume, image_id=image_id, image_meta=image_meta ) diff --git a/cinder/tests/unit/volume/flows/test_manage_volume_flow.py b/cinder/tests/unit/volume/flows/test_manage_volume_flow.py index c14220994..44f0772df 100644 --- a/cinder/tests/unit/volume/flows/test_manage_volume_flow.py +++ b/cinder/tests/unit/volume/flows/test_manage_volume_flow.py @@ -11,11 +11,16 @@ # under the License. """ Tests for manage_existing TaskFlow """ +import mock + from cinder import context from cinder import test +from cinder.tests.unit import fake_constants as fakes from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.flows import fake_volume_api from cinder.volume.flows.api import manage_existing +from cinder.volume.flows import common as flow_common +from cinder.volume.flows.manager import manage_existing as manager class ManageVolumeFlowTestCase(test.TestCase): @@ -49,3 +54,44 @@ class ManageVolumeFlowTestCase(test.TestCase): create_what.update({'volume': volume}) create_what.pop('volume_id') task.execute(self.ctxt, **create_what) + + @staticmethod + def _stub_volume_object_get(self): + volume = { + 'id': fakes.VOLUME_ID, + 'volume_type_id': fakes.VOLUME_TYPE_ID, + 'status': 'creating', + 'name': fakes.VOLUME_NAME, + } + return fake_volume.fake_volume_obj(self.ctxt, **volume) + + def test_prepare_for_quota_reserveration_task_execute(self): + mock_db = mock.MagicMock() + mock_driver = mock.MagicMock() + mock_manage_existing_ref = mock.MagicMock() + mock_get_size = self.mock_object( + mock_driver, 'manage_existing_get_size') + mock_get_size.return_value = '5' + + volume_ref = self._stub_volume_object_get(self) + task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) + + result = task.execute(self.ctxt, volume_ref, mock_manage_existing_ref) + + self.assertEqual(volume_ref, result['volume_properties']) + self.assertEqual('5', result['size']) + self.assertEqual(volume_ref.id, result['volume_spec']['volume_id']) + mock_get_size.assert_called_once_with( + volume_ref, mock_manage_existing_ref) + + def test_prepare_for_quota_reservation_task_revert(self): + mock_db = mock.MagicMock() + mock_driver = mock.MagicMock() + mock_result = mock.MagicMock() + mock_flow_failures = mock.MagicMock() + mock_error_out = self.mock_object(flow_common, 'error_out') + volume_ref = self._stub_volume_object_get(self) + task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) + + task.revert(self.ctxt, mock_result, mock_flow_failures, volume_ref) + mock_error_out.assert_called_once_with(volume_ref, reason=mock.ANY) diff --git a/cinder/tests/unit/zonemanager/test_fc_zone_manager.py b/cinder/tests/unit/zonemanager/test_fc_zone_manager.py index c2072941a..1ff7f0c40 100644 --- a/cinder/tests/unit/zonemanager/test_fc_zone_manager.py +++ b/cinder/tests/unit/zonemanager/test_fc_zone_manager.py @@ -55,8 +55,8 @@ class TestFCZoneManager(test.TestCase): def fake_build_driver(self): self.driver = mock.Mock(fc_zone_driver.FCZoneDriver) - self.stubs.Set(fc_zone_manager.ZoneManager, '_build_driver', - fake_build_driver) + self.mock_object(fc_zone_manager.ZoneManager, '_build_driver', + fake_build_driver) self.zm = fc_zone_manager.ZoneManager(configuration=config) self.configuration = conf.Configuration(None) diff --git a/cinder/tests/unit/zonemanager/test_volume_driver.py b/cinder/tests/unit/zonemanager/test_volume_driver.py index c8aa6c64c..34dd1de20 100644 --- a/cinder/tests/unit/zonemanager/test_volume_driver.py +++ b/cinder/tests/unit/zonemanager/test_volume_driver.py @@ -22,7 +22,7 @@ import mock from cinder import test -from cinder.tests.unit import fake_driver +from cinder.tests import fake_driver from cinder import utils from cinder.volume import configuration as conf from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver diff --git a/cinder/utils.py b/cinder/utils.py index 2e6a1e76c..84864fd61 100644 --- a/cinder/utils.py +++ b/cinder/utils.py @@ -36,11 +36,8 @@ import sys import tempfile import time import types -from xml.dom import minidom -from xml.parsers import expat -from xml import sax -from xml.sax import expatreader +from os_brick import encryptors from os_brick.initiator import connector from oslo_concurrency import lockutils from oslo_concurrency import processutils @@ -57,6 +54,7 @@ import webob.exc from cinder import exception from cinder.i18n import _, _LE, _LW +from cinder import keymgr CONF = cfg.CONF @@ -270,52 +268,6 @@ def last_completed_audit_period(unit=None): return (begin, end) -class ProtectedExpatParser(expatreader.ExpatParser): - """An expat parser which disables DTD's and entities by default.""" - - def __init__(self, forbid_dtd=True, forbid_entities=True, - *args, **kwargs): - # Python 2.x old style class - expatreader.ExpatParser.__init__(self, *args, **kwargs) - self.forbid_dtd = forbid_dtd - self.forbid_entities = forbid_entities - - def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): - raise ValueError("Inline DTD forbidden") - - def entity_decl(self, entityName, is_parameter_entity, value, base, - systemId, publicId, notationName): - raise ValueError(" forbidden") - - def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): - # expat 1.2 - raise ValueError(" forbidden") - - def reset(self): - expatreader.ExpatParser.reset(self) - if self.forbid_dtd: - self._parser.StartDoctypeDeclHandler = self.start_doctype_decl - if self.forbid_entities: - self._parser.EntityDeclHandler = self.entity_decl - self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - - -def safe_minidom_parse_string(xml_string): - """Parse an XML string using minidom safely. - - """ - try: - if six.PY3 and isinstance(xml_string, bytes): - # On Python 3, minidom.parseString() requires Unicode when - # the parser parameter is used. - # - # Bet that XML used in Cinder is always encoded to UTF-8. - xml_string = xml_string.decode('utf-8') - return minidom.parseString(xml_string, parser=ProtectedExpatParser()) - except sax.SAXParseException: - raise expat.ExpatError() - - def is_valid_boolstr(val): """Check if the provided string is a valid bool string or not.""" val = str(val).lower() @@ -557,6 +509,36 @@ def brick_get_connector(protocol, driver=None, *args, **kwargs) +def brick_get_encryptor(connection_info, *args, **kwargs): + """Wrapper to get a brick encryptor object.""" + + root_helper = get_root_helper() + key_manager = keymgr.API() + return encryptors.get_volume_encryptor(root_helper=root_helper, + connection_info=connection_info, + keymgr=key_manager, + *args, **kwargs) + + +def brick_attach_volume_encryptor(context, attach_info, encryption): + """Attach encryption layer.""" + connection_info = attach_info['conn'] + connection_info['data']['device_path'] = attach_info['device']['path'] + encryptor = brick_get_encryptor(connection_info, + **encryption) + encryptor.attach_volume(context, **encryption) + + +def brick_detach_volume_encryptor(attach_info, encryption): + """Detach encryption layer.""" + connection_info = attach_info['conn'] + connection_info['data']['device_path'] = attach_info['device']['path'] + + encryptor = brick_get_encryptor(connection_info, + **encryption) + encryptor.detach_volume(**encryption) + + def require_driver_initialized(driver): """Verifies if `driver` is initialized @@ -570,6 +552,21 @@ def require_driver_initialized(driver): driver_name = driver.__class__.__name__ LOG.error(_LE("Volume driver %s not initialized"), driver_name) raise exception.DriverNotInitialized() + else: + log_unsupported_driver_warning(driver) + + +def log_unsupported_driver_warning(driver): + """Annoy the log about unsupported drivers.""" + if not driver.supported: + # Check to see if the driver is flagged as supported. + LOG.warning(_LW("Volume driver (%(driver_name)s %(version)s) is " + "currently unsupported and may be removed in the " + "next release of OpenStack. Use at your own risk."), + {'driver_name': driver.__class__.__name__, + 'version': driver.get_version()}, + resource={'type': 'driver', + 'id': driver.__class__.__name__}) def get_file_mode(path): @@ -889,6 +886,7 @@ def trace(f): return f(*args, **kwargs) all_args = inspect.getcallargs(f, *args, **kwargs) + logger.debug('==> %(func)s: call %(all_args)r', {'func': func_name, 'all_args': all_args}) @@ -904,10 +902,17 @@ def trace(f): raise total_time = int(round(time.time() * 1000)) - start_time + if isinstance(result, dict): + mask_result = strutils.mask_dict_password(result) + elif isinstance(result, six.string_types): + mask_result = strutils.mask_password(result) + else: + mask_result = result + logger.debug('<== %(func)s: return (%(time)dms) %(result)r', {'func': func_name, 'time': total_time, - 'result': result}) + 'result': mask_result}) return result return trace_logging_wrapper @@ -1004,7 +1009,8 @@ def calculate_virtual_free_capacity(total_capacity, provisioned_capacity, thin_provisioning_support, max_over_subscription_ratio, - reserved_percentage): + reserved_percentage, + thin): """Calculate the virtual free capacity based on thin provisioning support. :param total_capacity: total_capacity_gb of a host_state or pool. @@ -1017,13 +1023,14 @@ def calculate_virtual_free_capacity(total_capacity, a host_state or a pool :param reserved_percentage: reserved_percentage of a host_state or a pool. + :param thin: whether volume to be provisioned is thin :returns: the calculated virtual free capacity. """ total = float(total_capacity) reserved = float(reserved_percentage) / 100 - if thin_provisioning_support: + if thin and thin_provisioning_support: free = (total * max_over_subscription_ratio - provisioned_capacity - math.floor(total * reserved)) @@ -1061,10 +1068,10 @@ def validate_integer(value, name, min_value=None, max_value=None): return value -def validate_extra_specs(specs): - """Validating key and value of extra specs.""" +def validate_dictionary_string_length(specs): + """Check the length of each key and value of dictionary.""" if not isinstance(specs, dict): - msg = _('extra_specs must be a dictionary.') + msg = _('specs must be a dictionary.') raise exception.InvalidInput(reason=msg) for key, value in specs.items(): @@ -1075,3 +1082,8 @@ def validate_extra_specs(specs): if value is not None: check_string_length(value, 'Value for key "%s"' % key, min_length=0, max_length=255) + + +def service_expired_time(with_timezone=False): + return (timeutils.utcnow(with_timezone=with_timezone) - + datetime.timedelta(seconds=CONF.service_down_time)) diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 2162ed642..231c47101 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -30,6 +30,7 @@ from oslo_utils import uuidutils import six from cinder.api import common +from cinder.common import constants from cinder import context from cinder import db from cinder.db import base @@ -38,7 +39,7 @@ from cinder import flow_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import cache as image_cache from cinder.image import glance -from cinder import keymgr +from cinder import keymgr as key_manager from cinder import objects from cinder.objects import base as objects_base from cinder.objects import fields @@ -130,7 +131,7 @@ class API(base.Base): self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.availability_zones = [] self.availability_zones_last_fetched = None - self.key_manager = keymgr.API() + self.key_manager = key_manager.API(CONF) super(API, self).__init__(db_driver) def list_availability_zones(self, enable_cache=False): @@ -149,7 +150,7 @@ class API(base.Base): if cache_age >= CONF.az_cache_duration: refresh_cache = True if refresh_cache or not enable_cache: - topic = CONF.volume_topic + topic = constants.VOLUME_TOPIC ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic(ctxt, topic) az_data = [(s.availability_zone, s.disabled) @@ -177,9 +178,10 @@ class API(base.Base): first_type=None, second_type=None): safe = False elevated = context.elevated() - services = objects.ServiceList.get_all_by_topic(elevated, - 'cinder-volume', - disabled=True) + services = objects.ServiceList.get_all_by_topic( + elevated, + constants.VOLUME_TOPIC, + disabled=True) if len(services.objects) == 1: safe = True else: @@ -210,7 +212,8 @@ class API(base.Base): availability_zone=None, source_volume=None, scheduler_hints=None, source_replica=None, consistencygroup=None, - cgsnapshot=None, multiattach=False, source_cg=None): + cgsnapshot=None, multiattach=False, source_cg=None, + group=None, group_snapshot=None, source_group=None): check_policy(context, 'create') @@ -242,6 +245,18 @@ class API(base.Base): "group).") % volume_type raise exception.InvalidInput(reason=msg) + if group and (not group_snapshot and not source_group): + if not volume_type: + msg = _("volume_type must be provided when creating " + "a volume in a group.") + raise exception.InvalidInput(reason=msg) + vol_type_ids = [v_type.id for v_type in group.volume_types] + if volume_type.get('id') not in vol_type_ids: + msg = _("Invalid volume_type provided: %s (requested " + "type must be supported by this " + "group).") % volume_type + raise exception.InvalidInput(reason=msg) + if volume_type and 'extra_specs' not in volume_type: extra_specs = volume_types.get_volume_type_extra_specs( volume_type['id']) @@ -284,6 +299,8 @@ class API(base.Base): if CONF.storage_availability_zone: availability_zones.add(CONF.storage_availability_zone) + utils.check_metadata_properties(metadata) + create_what = { 'context': context, 'raw_size': size, @@ -302,12 +319,19 @@ class API(base.Base): 'consistencygroup': consistencygroup, 'cgsnapshot': cgsnapshot, 'multiattach': multiattach, + 'group': group, + 'group_snapshot': group_snapshot, + 'source_group': source_group, } try: - sched_rpcapi = (self.scheduler_rpcapi if (not cgsnapshot and - not source_cg) else None) - volume_rpcapi = (self.volume_rpcapi if (not cgsnapshot and - not source_cg) else None) + sched_rpcapi = (self.scheduler_rpcapi if ( + not cgsnapshot and not source_cg and + not group_snapshot and not source_group) + else None) + volume_rpcapi = (self.volume_rpcapi if ( + not cgsnapshot and not source_cg and + not group_snapshot and not source_group) + else None) flow_engine = create_volume.get_flow(self.db, self.image_service, availability_zones, @@ -370,7 +394,8 @@ class API(base.Base): # Build required conditions for conditional update expected = {'attach_status': db.Not('attached'), 'migration_status': self.AVAILABLE_MIGRATION_STATUS, - 'consistencygroup_id': None} + 'consistencygroup_id': None, + 'group_id': None} # If not force deleting we have status conditions if not force: @@ -391,14 +416,15 @@ class API(base.Base): status = utils.build_or_str(expected.get('status'), _('status must be %s and')) msg = _('Volume %s must not be migrating, attached, belong to a ' - 'consistency group or have snapshots.') % status + 'group or have snapshots.') % status LOG.info(msg) raise exception.InvalidVolume(reason=msg) if cascade: values = {'status': 'deleting'} expected = {'status': ('available', 'error', 'deleting'), - 'cgsnapshot_id': None} + 'cgsnapshot_id': None, + 'group_snapshot_id': None} snapshots = objects.snapshot.SnapshotList.get_all_for_volume( context, volume.id) for s in snapshots: @@ -422,7 +448,7 @@ class API(base.Base): encryption_key_id = volume.get('encryption_key_id', None) if encryption_key_id is not None: try: - self.key_manager.delete_key(context, encryption_key_id) + self.key_manager.delete(context, encryption_key_id) except Exception as e: LOG.warning(_LW("Unable to delete encryption key for " "volume: %s."), e.msg, resource=volume) @@ -523,6 +549,24 @@ class API(base.Base): LOG.info(_LI("Get all volumes completed successfully.")) return volumes + def get_volume_summary(self, context, filters=None): + check_policy(context, 'get_all') + + if filters is None: + filters = {} + + allTenants = utils.get_bool_param('all_tenants', filters) + + if context.is_admin and allTenants: + del filters['all_tenants'] + volumes = objects.VolumeList.get_volume_summary_all(context) + else: + volumes = objects.VolumeList.get_volume_summary_by_project( + context, context.project_id) + + LOG.info(_LI("Get summary completed successfully.")) + return volumes + def get_snapshot(self, context, snapshot_id): check_policy(context, 'get_snapshot') snapshot = objects.Snapshot.get_by_id(context, snapshot_id) @@ -660,7 +704,7 @@ class API(base.Base): @wrap_check_policy def initialize_connection(self, context, volume, connector): - if volume['status'] == 'maintenance': + if volume.status == 'maintenance': LOG.info(_LI('Unable to initialize the connection for ' 'volume, because it is in ' 'maintenance.'), resource=volume) @@ -702,10 +746,12 @@ class API(base.Base): def _create_snapshot(self, context, volume, name, description, force=False, metadata=None, - cgsnapshot_id=None): + cgsnapshot_id=None, + group_snapshot_id=None): snapshot = self.create_snapshot_in_db( context, volume, name, - description, force, metadata, cgsnapshot_id) + description, force, metadata, cgsnapshot_id, + True, group_snapshot_id) self.volume_rpcapi.create_snapshot(context, volume, snapshot) return snapshot @@ -714,7 +760,8 @@ class API(base.Base): volume, name, description, force, metadata, cgsnapshot_id, - commit_quota=True): + commit_quota=True, + group_snapshot_id=None): check_policy(context, 'create_snapshot', volume) if volume['status'] == 'maintenance': @@ -763,6 +810,7 @@ class API(base.Base): kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, + 'group_snapshot_id': group_snapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, @@ -793,10 +841,16 @@ class API(base.Base): def create_snapshots_in_db(self, context, volume_list, name, description, - force, cgsnapshot_id): + cgsnapshot_id, + group_snapshot_id=None): snapshot_list = [] for volume in volume_list: - self._create_snapshot_in_db_validate(context, volume, force) + self._create_snapshot_in_db_validate(context, volume, True) + if volume['status'] == 'error': + msg = _("The snapshot cannot be created when the volume is " + "in error status.") + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) reservations = self._create_snapshots_in_db_reserve( context, volume_list) @@ -804,7 +858,8 @@ class API(base.Base): options_list = [] for volume in volume_list: options = self._create_snapshot_in_db_options( - context, volume, name, description, cgsnapshot_id) + context, volume, name, description, cgsnapshot_id, + group_snapshot_id) options_list.append(options) try: @@ -877,9 +932,11 @@ class API(base.Base): def _create_snapshot_in_db_options(self, context, volume, name, description, - cgsnapshot_id): + cgsnapshot_id, + group_snapshot_id=None): options = {'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, + 'group_snapshot_id': group_snapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, @@ -893,9 +950,11 @@ class API(base.Base): def create_snapshot(self, context, volume, name, description, - metadata=None, cgsnapshot_id=None): + metadata=None, cgsnapshot_id=None, + group_snapshot_id=None): result = self._create_snapshot(context, volume, name, description, - False, metadata, cgsnapshot_id) + False, metadata, cgsnapshot_id, + group_snapshot_id) LOG.info(_LI("Snapshot create request issued successfully."), resource=result) return result @@ -913,7 +972,8 @@ class API(base.Base): def delete_snapshot(self, context, snapshot, force=False, unmanage_only=False): # Build required conditions for conditional update - expected = {'cgsnapshot_id': None} + expected = {'cgsnapshot_id': None, + 'group_snapshot_id': None} # If not force deleting we have status conditions if not force: expected['status'] = (fields.SnapshotStatus.AVAILABLE, @@ -924,7 +984,7 @@ class API(base.Base): if not result: status = utils.build_or_str(expected.get('status'), _('status must be %s and')) - msg = (_('Snapshot %s must not be part of a consistency group.') % + msg = (_('Snapshot %s must not be part of a group.') % status) LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) @@ -949,6 +1009,15 @@ class API(base.Base): resource=volume) return dict(rv) + @wrap_check_policy + def create_volume_metadata(self, context, volume, metadata): + """Creates volume metadata.""" + db_meta = self._update_volume_metadata(context, volume, metadata) + + LOG.info(_LI("Create volume metadata completed successfully."), + resource=volume) + return db_meta + @wrap_check_policy def delete_volume_metadata(self, context, volume, key, meta_type=common.METADATA_TYPES.user): @@ -962,26 +1031,28 @@ class API(base.Base): LOG.info(_LI("Delete volume metadata completed successfully."), resource=volume) - @wrap_check_policy - def update_volume_metadata(self, context, volume, - metadata, delete=False, - meta_type=common.METADATA_TYPES.user): - """Updates or creates volume metadata. - - If delete is True, metadata items that are not specified in the - `metadata` argument will be deleted. - - """ + def _update_volume_metadata(self, context, volume, metadata, delete=False, + meta_type=common.METADATA_TYPES.user): if volume['status'] in ('maintenance', 'uploading'): msg = _('Updating volume metadata is not allowed for volumes in ' '%s status.') % volume['status'] LOG.info(msg, resource=volume) raise exception.InvalidVolume(reason=msg) utils.check_metadata_properties(metadata) - db_meta = self.db.volume_metadata_update(context, volume['id'], - metadata, - delete, - meta_type) + return self.db.volume_metadata_update(context, volume['id'], + metadata, delete, meta_type) + + @wrap_check_policy + def update_volume_metadata(self, context, volume, metadata, delete=False, + meta_type=common.METADATA_TYPES.user): + """Updates volume metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + db_meta = self._update_volume_metadata(context, volume, metadata, + delete, meta_type) # TODO(jdg): Implement an RPC call for drivers that may use this info @@ -1251,7 +1322,7 @@ class API(base.Base): """Migrate the volume to the specified host.""" # Make sure the host is in the list of available hosts elevated = context.elevated() - topic = CONF.volume_topic + topic = constants.VOLUME_TOPIC services = objects.ServiceList.get_all_by_topic( elevated, topic, disabled=False) found = False @@ -1270,6 +1341,7 @@ class API(base.Base): 'migration_status': self.AVAILABLE_MIGRATION_STATUS, 'replication_status': (None, 'disabled'), 'consistencygroup_id': (None, ''), + 'group_id': (None, ''), 'host': db.Not(host)} filters = [~db.volume_has_snapshots_filter()] @@ -1293,8 +1365,8 @@ class API(base.Base): if not result: msg = _('Volume %s status must be available or in-use, must not ' 'be migrating, have snapshots, be replicated, be part of ' - 'a consistency group and destination host must be ' - 'different than the current host') % {'vol_id': volume.id} + 'a group and destination host must be different than the ' + 'current host') % {'vol_id': volume.id} LOG.error(msg) raise exception.InvalidVolume(reason=msg) @@ -1308,7 +1380,7 @@ class API(base.Base): 'volume_type': volume_type, 'volume_id': volume.id} self.scheduler_rpcapi.migrate_volume_to_host(context, - CONF.volume_topic, + constants.VOLUME_TOPIC, volume.id, host, force_host_copy, @@ -1427,24 +1499,24 @@ class API(base.Base): expected = {'status': ('available', 'in-use'), 'migration_status': self.AVAILABLE_MIGRATION_STATUS, 'consistencygroup_id': (None, ''), + 'group_id': (None, ''), 'volume_type_id': db.Not(vol_type_id)} # We don't support changing encryption requirements yet # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). - filters = [db.volume_has_same_encryption_type(vol_type_id), - db.volume_qos_allows_retype(vol_type_id)] + filters = [db.volume_qos_allows_retype(vol_type_id)] updates = {'status': 'retyping', 'previous_status': objects.Volume.model.status} if not volume.conditional_update(updates, expected, filters): msg = _('Retype needs volume to be in available or in-use state, ' - 'have same encryption requirements, not be part of an ' - 'active migration or a consistency group, requested type ' - 'has to be different that the one from the volume, and ' - 'for in-use volumes front-end qos specs cannot change.') + 'not be part of an active migration or a consistency ' + 'group, requested type has to be different that the ' + 'one from the volume, and for in-use volumes front-end ' + 'qos specs cannot change.') LOG.error(msg) QUOTAS.rollback(context, reservations + old_reservations, project_id=volume.project_id) @@ -1457,13 +1529,14 @@ class API(base.Base): 'quota_reservations': reservations, 'old_reservations': old_reservations} - self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume.id, + self.scheduler_rpcapi.retype(context, constants.VOLUME_TOPIC, + volume.id, request_spec=request_spec, filter_properties={}, volume=volume) LOG.info(_LI("Retype volume request issued successfully."), resource=volume) - def _get_service_by_host(self, context, host): + def _get_service_by_host(self, context, host, resource='volume'): elevated = context.elevated() try: svc_host = volume_utils.extract_host(host, 'backend') @@ -1473,11 +1546,16 @@ class API(base.Base): with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to find service: %(service)s for ' 'given host: %(host)s.'), - {'service': CONF.volume_topic, 'host': host}) + {'service': constants.VOLUME_BINARY, 'host': host}) if service.disabled: - LOG.error(_LE('Unable to manage_existing volume on a disabled ' - 'service.')) + LOG.error(_LE('Unable to manage existing %s on a disabled ' + 'service.'), resource) + raise exception.ServiceUnavailable() + + if not utils.service_is_up(service): + LOG.error(_LE('Unable to manage existing %s on a service that is ' + 'down.'), resource) raise exception.ServiceUnavailable() return service @@ -1537,33 +1615,18 @@ class API(base.Base): def manage_existing_snapshot(self, context, ref, volume, name=None, description=None, metadata=None): - host = volume_utils.extract_host(volume['host']) - try: - # NOTE(jdg): We don't use this, we just make sure it's valid - # and exists before sending off the call - service = objects.Service.get_by_args( - context.elevated(), host, 'cinder-volume') - except exception.ServiceNotFound: - with excutils.save_and_reraise_exception(): - LOG.error(_LE('Unable to find service: %(service)s for ' - 'given host: %(host)s.'), - {'service': CONF.volume_topic, 'host': host}) - if service.disabled: - LOG.error(_LE('Unable to manage_existing snapshot on a disabled ' - 'service.')) - raise exception.ServiceUnavailable() - + service = self._get_service_by_host(context, volume.host, 'snapshot') snapshot_object = self.create_snapshot_in_db(context, volume, name, description, False, metadata, None, commit_quota=False) self.volume_rpcapi.manage_existing_snapshot(context, snapshot_object, - ref, host) + ref, service.host) return snapshot_object def get_manageable_snapshots(self, context, host, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): - self._get_service_by_host(context, host) + self._get_service_by_host(context, host, resource='snapshot') return self.volume_rpcapi.get_manageable_snapshots(context, host, marker, limit, offset, sort_keys, @@ -1581,7 +1644,7 @@ class API(base.Base): svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( - ctxt, svc_host, 'cinder-volume') + ctxt, svc_host, constants.VOLUME_BINARY) expected = {'replication_status': [fields.ReplicationStatus.ENABLED, fields.ReplicationStatus.FAILED_OVER]} result = service.conditional_update( @@ -1603,7 +1666,7 @@ class API(base.Base): svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( - ctxt, svc_host, 'cinder-volume') + ctxt, svc_host, constants.VOLUME_BINARY) expected = {'frozen': False} result = service.conditional_update( {'frozen': True}, expected) @@ -1624,7 +1687,7 @@ class API(base.Base): svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( - ctxt, svc_host, 'cinder-volume') + ctxt, svc_host, constants.VOLUME_BINARY) expected = {'frozen': True} result = service.conditional_update( {'frozen': False}, expected) @@ -1698,9 +1761,6 @@ class API(base.Base): class HostAPI(base.Base): - def __init__(self): - super(HostAPI, self).__init__() - """Sub-set of the Volume Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new volumes.""" diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 44383e78d..1a36e8462 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -83,7 +83,9 @@ volume_opts = [ help='Method used to wipe old volumes'), cfg.IntOpt('volume_clear_size', default=0, - help='Size in MiB to wipe at start of old volumes. 0 => all'), + max=1024, + help='Size in MiB to wipe at start of old volumes. 1024 MiB' + 'at max. 0 => all'), cfg.StrOpt('volume_clear_ionice', help='The flag to pass to ionice to alter the i/o priority ' 'of the process used to zero a volume after deletion, ' @@ -258,6 +260,18 @@ volume_opts = [ choices=['iscsi', 'fc'], help='Protocol for transferring data between host and ' 'storage back-end.'), + cfg.BoolOpt('backup_use_temp_snapshot', + default=False, + help='If this is set to True, the backup_use_temp_snapshot ' + 'path will be used during the backup. Otherwise, it ' + 'will use backup_use_temp_volume path.'), + cfg.BoolOpt('enable_unsupported_driver', + default=False, + help="Set this to True when you want to allow an unsupported " + "driver to start. Drivers that haven't maintained a " + "working CI system and testing are marked as unsupported " + "until CI is working again. This also marks a driver as " + "deprecated and may be removed in the next release."), ] # for backward compatibility @@ -349,6 +363,12 @@ class BaseVD(object): # set True by manager after successful check_for_setup self._initialized = False + # If a driver hasn't maintained their CI system, this will get + # set to False, which prevents the driver from starting. + # Add enable_unsupported_driver = True inn cinder.conf to get + # the unsupported driver started. + self._supported = True + def _driver_data_namespace(self): namespace = self.__class__.__name__ if self.configuration: @@ -473,6 +493,10 @@ class BaseVD(object): def initialized(self): return self._initialized + @property + def supported(self): + return self._supported + def set_throttle(self): bps_limit = ((self.configuration and self.configuration.safe_get('volume_copy_bps_limit')) or @@ -758,6 +782,18 @@ class BaseVD(object): self._stats = data def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch image from image_service and write to unencrypted volume.""" + self._copy_image_data_to_volume( + context, volume, image_service, image_id, False) + + def copy_image_to_encrypted_volume( + self, context, volume, image_service, image_id): + """Fetch image from image_service and write to encrypted volume.""" + self._copy_image_data_to_volume( + context, volume, image_service, image_id, True) + + def _copy_image_data_to_volume(self, context, volume, image_service, + image_id, encrypted=False): """Fetch the image from image_service and write it to the volume.""" LOG.debug('copy_image_to_volume %s.', volume['name']) @@ -766,14 +802,25 @@ class BaseVD(object): properties = utils.brick_get_connector_properties(use_multipath, enforce_multipath) attach_info, volume = self._attach_volume(context, volume, properties) - try: - image_utils.fetch_to_raw(context, - image_service, - image_id, - attach_info['device']['path'], - self.configuration.volume_dd_blocksize, - size=volume['size']) + if encrypted: + encryption = self.db.volume_encryption_metadata_get(context, + volume.id) + utils.brick_attach_volume_encryptor(context, + attach_info, + encryption) + try: + image_utils.fetch_to_raw( + context, + image_service, + image_id, + attach_info['device']['path'], + self.configuration.volume_dd_blocksize, + size=volume['size']) + finally: + if encrypted: + utils.brick_detach_volume_encryptor(attach_info, + encryption) finally: self._detach_volume(context, attach_info, volume, properties) @@ -1093,7 +1140,7 @@ class BaseVD(object): if snapshot: temp_vol_ref = self._create_temp_volume_from_snapshot( context, volume, snapshot) - backup.temp_volume_id = temp_vol_ref['id'] + backup.temp_volume_id = temp_vol_ref.id backup.save() device_to_backup = temp_vol_ref @@ -1106,7 +1153,7 @@ class BaseVD(object): if previous_status == "in-use": temp_vol_ref = self._create_temp_cloned_volume( context, volume) - backup.temp_volume_id = temp_vol_ref['id'] + backup.temp_volume_id = temp_vol_ref.id backup.save() device_to_backup = temp_vol_ref @@ -1119,7 +1166,7 @@ class BaseVD(object): Otherwise for in-use volume, create a temp snapshot and back it up. """ - volume = self.db.volume_get(context, backup.volume_id) + volume = objects.Volume.get_by_id(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) @@ -1195,7 +1242,7 @@ class BaseVD(object): if snapshot: temp_vol_ref = self._create_temp_volume_from_snapshot( context, volume, snapshot) - backup.temp_volume_id = temp_vol_ref['id'] + backup.temp_volume_id = temp_vol_ref.id backup.save() device_to_backup = temp_vol_ref @@ -1208,7 +1255,7 @@ class BaseVD(object): if previous_status == "in-use": temp_vol_ref = self._create_temp_cloned_volume( context, volume) - backup.temp_volume_id = temp_vol_ref['id'] + backup.temp_volume_id = temp_vol_ref.id backup.save() device_to_backup = temp_vol_ref @@ -1359,8 +1406,8 @@ class BaseVD(object): temp_snap_ref.save() return temp_snap_ref - def _create_temp_cloned_volume(self, context, volume): - temp_volume = { + def _create_temp_volume(self, context, volume): + kwargs = { 'size': volume['size'], 'display_name': 'backup-vol-%s' % volume['id'], 'host': volume['host'], @@ -1371,46 +1418,38 @@ class BaseVD(object): 'availability_zone': volume.availability_zone, 'volume_type_id': volume.volume_type_id, } - temp_vol_ref = self.db.volume_create(context, temp_volume) + temp_vol_ref = objects.Volume(context=context, **kwargs) + temp_vol_ref.create() + return temp_vol_ref + + def _create_temp_cloned_volume(self, context, volume): + temp_vol_ref = self._create_temp_volume(context, volume) try: - # Some drivers return None, because they do not need to update the - # model for the volume. For those cases we set the model_update to - # an empty dictionary. - model_update = self.create_cloned_volume(temp_vol_ref, - volume) or {} + model_update = self.create_cloned_volume(temp_vol_ref, volume) + if model_update: + temp_vol_ref.update(model_update) except Exception: with excutils.save_and_reraise_exception(): - self.db.volume_destroy(context.elevated(), - temp_vol_ref['id']) + temp_vol_ref.destroy() - model_update['status'] = 'available' - self.db.volume_update(context, temp_vol_ref['id'], model_update) - return self.db.volume_get(context, temp_vol_ref['id']) + temp_vol_ref.status = 'available' + temp_vol_ref.save() + return temp_vol_ref def _create_temp_volume_from_snapshot(self, context, volume, snapshot): - temp_volume = { - 'size': volume['size'], - 'display_name': 'backup-vol-%s' % volume['id'], - 'host': volume['host'], - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': 'creating', - 'attach_status': 'detached', - 'availability_zone': volume.availability_zone, - 'volume_type_id': volume.volume_type_id, - } - temp_vol_ref = self.db.volume_create(context, temp_volume) + temp_vol_ref = self._create_temp_volume(context, volume) try: model_update = self.create_volume_from_snapshot(temp_vol_ref, - snapshot) or {} + snapshot) + if model_update: + temp_vol_ref.update(model_update) except Exception: with excutils.save_and_reraise_exception(): - self.db.volume_destroy(context.elevated(), - temp_vol_ref['id']) + temp_vol_ref.destroy() - model_update['status'] = 'available' - self.db.volume_update(context, temp_vol_ref['id'], model_update) - return self.db.volume_get(context, temp_vol_ref['id']) + temp_vol_ref.status = 'available' + temp_vol_ref.save() + return temp_vol_ref def _delete_temp_snapshot(self, context, snapshot): self.delete_snapshot(snapshot) @@ -1664,6 +1703,216 @@ class BaseVD(object): """Old replication update method, deprecate.""" raise NotImplementedError() + def create_group(self, context, group): + """Creates a group. + + :param context: the context of the caller. + :param group: the dictionary of the group to be created. + :returns: model_update + + model_update will be in this format: {'status': xxx, ......}. + + If the status in model_update is 'error', the manager will throw + an exception and it will be caught in the try-except block in the + manager. If the driver throws an exception, the manager will also + catch it in the try-except block. The group status in the db will + be changed to 'error'. + + For a successful operation, the driver can either build the + model_update and return it or return None. The group status will + be set to 'available'. + """ + raise NotImplementedError() + + def delete_group(self, context, group, volumes): + """Deletes a group. + + :param context: the context of the caller. + :param group: the dictionary of the group to be deleted. + :param volumes: a list of volume dictionaries in the group. + :returns: model_update, volumes_model_update + + param volumes is retrieved directly from the db. It is a list of + cinder.db.sqlalchemy.models.Volume to be precise. It cannot be + assigned to volumes_model_update. volumes_model_update is a list of + dictionaries. It has to be built by the driver. An entry will be + in this format: {'id': xxx, 'status': xxx, ......}. model_update + will be in this format: {'status': xxx, ......}. + + The driver should populate volumes_model_update and model_update + and return them. + + The manager will check volumes_model_update and update db accordingly + for each volume. If the driver successfully deleted some volumes + but failed to delete others, it should set statuses of the volumes + accordingly so that the manager can update db correctly. + + If the status in any entry of volumes_model_update is 'error_deleting' + or 'error', the status in model_update will be set to the same if it + is not already 'error_deleting' or 'error'. + + If the status in model_update is 'error_deleting' or 'error', the + manager will raise an exception and the status of the group will be + set to 'error' in the db. If volumes_model_update is not returned by + the driver, the manager will set the status of every volume in the + group to 'error' in the except block. + + If the driver raises an exception during the operation, it will be + caught by the try-except block in the manager. The statuses of the + group and all volumes in it will be set to 'error'. + + For a successful operation, the driver can either build the + model_update and volumes_model_update and return them or + return None, None. The statuses of the group and all volumes + will be set to 'deleted' after the manager deletes them from db. + """ + raise NotImplementedError() + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + """Updates a group. + + :param context: the context of the caller. + :param group: the dictionary of the group to be updated. + :param add_volumes: a list of volume dictionaries to be added. + :param remove_volumes: a list of volume dictionaries to be removed. + :returns: model_update, add_volumes_update, remove_volumes_update + + model_update is a dictionary that the driver wants the manager + to update upon a successful return. If None is returned, the manager + will set the status to 'available'. + + add_volumes_update and remove_volumes_update are lists of dictionaries + that the driver wants the manager to update upon a successful return. + Note that each entry requires a {'id': xxx} so that the correct + volume entry can be updated. If None is returned, the volume will + remain its original status. Also note that you cannot directly + assign add_volumes to add_volumes_update as add_volumes is a list of + cinder.db.sqlalchemy.models.Volume objects and cannot be used for + db update directly. Same with remove_volumes. + + If the driver throws an exception, the status of the group as well as + those of the volumes to be added/removed will be set to 'error'. + """ + raise NotImplementedError() + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + """Creates a group from source. + + :param context: the context of the caller. + :param group: the Group object to be created. + :param volumes: a list of Volume objects in the group. + :param group_snapshot: the GroupSnapshot object as source. + :param snapshots: a list of snapshot objects in group_snapshot. + :param source_group: the Group object as source. + :param source_vols: a list of volume objects in the source_group. + :returns: model_update, volumes_model_update + + The source can be group_snapshot or a source_group. + + param volumes is retrieved directly from the db. It is a list of + cinder.db.sqlalchemy.models.Volume to be precise. It cannot be + assigned to volumes_model_update. volumes_model_update is a list of + dictionaries. It has to be built by the driver. An entry will be + in this format: {'id': xxx, 'status': xxx, ......}. model_update + will be in this format: {'status': xxx, ......}. + + To be consistent with other volume operations, the manager will + assume the operation is successful if no exception is thrown by + the driver. For a successful operation, the driver can either build + the model_update and volumes_model_update and return them or + return None, None. + """ + raise NotImplementedError() + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Creates a group_snapshot. + + :param context: the context of the caller. + :param group_snapshot: the GroupSnapshot object to be created. + :param snapshots: a list of Snapshot objects in the group_snapshot. + :returns: model_update, snapshots_model_update + + param snapshots is a list of Snapshot objects. It cannot be assigned + to snapshots_model_update. snapshots_model_update is a list of + dictionaries. It has to be built by the driver. An entry will be + in this format: {'id': xxx, 'status': xxx, ......}. model_update + will be in this format: {'status': xxx, ......}. + + The driver should populate snapshots_model_update and model_update + and return them. + + The manager will check snapshots_model_update and update db accordingly + for each snapshot. If the driver successfully deleted some snapshots + but failed to delete others, it should set statuses of the snapshots + accordingly so that the manager can update db correctly. + + If the status in any entry of snapshots_model_update is 'error', the + status in model_update will be set to the same if it is not already + 'error'. + + If the status in model_update is 'error', the manager will raise an + exception and the status of group_snapshot will be set to 'error' in + the db. If snapshots_model_update is not returned by the driver, the + manager will set the status of every snapshot to 'error' in the except + block. + + If the driver raises an exception during the operation, it will be + caught by the try-except block in the manager and the statuses of + group_snapshot and all snapshots will be set to 'error'. + + For a successful operation, the driver can either build the + model_update and snapshots_model_update and return them or + return None, None. The statuses of group_snapshot and all snapshots + will be set to 'available' at the end of the manager function. + """ + raise NotImplementedError() + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Deletes a group_snapshot. + + :param context: the context of the caller. + :param group_snapshot: the GroupSnapshot object to be deleted. + :param snapshots: a list of snapshot objects in the group_snapshot. + :returns: model_update, snapshots_model_update + + param snapshots is a list of objects. It cannot be assigned to + snapshots_model_update. snapshots_model_update is a list of of + dictionaries. It has to be built by the driver. An entry will be + in this format: {'id': xxx, 'status': xxx, ......}. model_update + will be in this format: {'status': xxx, ......}. + + The driver should populate snapshots_model_update and model_update + and return them. + + The manager will check snapshots_model_update and update db accordingly + for each snapshot. If the driver successfully deleted some snapshots + but failed to delete others, it should set statuses of the snapshots + accordingly so that the manager can update db correctly. + + If the status in any entry of snapshots_model_update is + 'error_deleting' or 'error', the status in model_update will be set to + the same if it is not already 'error_deleting' or 'error'. + + If the status in model_update is 'error_deleting' or 'error', the + manager will raise an exception and the status of group_snapshot will + be set to 'error' in the db. If snapshots_model_update is not returned + by the driver, the manager will set the status of every snapshot to + 'error' in the except block. + + If the driver raises an exception during the operation, it will be + caught by the try-except block in the manager and the statuses of + group_snapshot and all snapshots will be set to 'error'. + + For a successful operation, the driver can either build the + model_update and snapshots_model_update and return them or + return None, None. The statuses of group_snapshot and all snapshots + will be set to 'deleted' after the manager deletes them from db. + """ + raise NotImplementedError() + @six.add_metaclass(abc.ABCMeta) class LocalVD(object): @@ -2067,8 +2316,8 @@ class ReplicaVD(object): return -class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, ExtendVD, - CloneableImageVD, ManageableSnapshotsVD, +class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, + ExtendVD, CloneableImageVD, ManageableSnapshotsVD, SnapshotVD, ReplicaVD, LocalVD, MigrateVD, BaseVD): """This class will be deprecated soon. diff --git a/cinder/volume/drivers/block_device.py b/cinder/volume/drivers/block_device.py index 03b809bc5..c41e3792f 100644 --- a/cinder/volume/drivers/block_device.py +++ b/cinder/volume/drivers/block_device.py @@ -46,7 +46,10 @@ CONF.register_opts(volume_opts) @interface.volumedriver class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, driver.CloneableImageVD, driver.TransferVD): - VERSION = '2.2.0' + VERSION = '2.3.0' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" def __init__(self, *args, **kwargs): super(BlockDeviceDriver, self).__init__(*args, **kwargs) @@ -64,12 +67,13 @@ class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, def check_for_setup_error(self): pass - def _update_provider_location(self, object, device): + def _update_provider_location(self, obj, device): # We update provider_location and host to mark device as used to # avoid race with other threads. # TODO(ynesenenko): need to remove DB access from driver - object.update({'provider_location': device, 'host': self.host}) - object.save() + host = '{host}#{pool}'.format(host=self.host, pool=self.get_pool(obj)) + obj.update({'provider_location': device, 'host': host}) + obj.save() @utils.synchronized('block_device', external=True) def create_volume(self, volume): @@ -155,18 +159,26 @@ class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, total_size += size LOG.debug("Updating volume stats.") - backend_name = self.configuration.safe_get('volume_backend_name') - data = {'total_capacity_gb': total_size / units.Ki, - 'free_capacity_gb': free_size / units.Ki, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False, - 'volume_backend_name': backend_name or self.__class__.__name__, - 'vendor_name': "Open Source", - 'driver_version': self.VERSION, - 'storage_protocol': 'unknown'} + data = { + 'volume_backend_name': self.backend_name, + 'vendor_name': "Open Source", + 'driver_version': self.VERSION, + 'storage_protocol': 'unknown', + 'pools': []} + single_pool = { + 'pool_name': data['volume_backend_name'], + 'total_capacity_gb': total_size / units.Ki, + 'free_capacity_gb': free_size / units.Ki, + 'reserved_percentage': self.configuration.reserved_percentage, + 'QoS_support': False} + + data['pools'].append(single_pool) self._stats = data + def get_pool(self, volume): + return self.backend_name + def _get_used_paths(self, lst): used_dev = set() for item in lst: diff --git a/cinder/volume/drivers/blockbridge.py b/cinder/volume/drivers/blockbridge.py index 08ff40d0f..3098a6f23 100644 --- a/cinder/volume/drivers/blockbridge.py +++ b/cinder/volume/drivers/blockbridge.py @@ -175,6 +175,9 @@ class BlockbridgeISCSIDriver(driver.ISCSIDriver): VERSION = '1.3.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Blockbridge_EPS_CI" + def __init__(self, *args, **kwargs): super(BlockbridgeISCSIDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/cloudbyte/cloudbyte.py b/cinder/volume/drivers/cloudbyte/cloudbyte.py index 93c0ce4d2..ac46bd377 100644 --- a/cinder/volume/drivers/cloudbyte/cloudbyte.py +++ b/cinder/volume/drivers/cloudbyte/cloudbyte.py @@ -48,6 +48,7 @@ class CloudByteISCSIDriver(san.SanISCSIDriver): """ VERSION = '1.2.0' + CI_WIKI_NAME = "CloudByte_CI" volume_stats = {} def __init__(self, *args, **kwargs): diff --git a/cinder/volume/drivers/coho.py b/cinder/volume/drivers/coho.py index 255c92bf1..0d25e1b01 100644 --- a/cinder/volume/drivers/coho.py +++ b/cinder/volume/drivers/coho.py @@ -23,11 +23,14 @@ from oslo_config import cfg from oslo_log import log as logging from random import randint +from cinder import context from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers import nfs +from cinder.volume import qos_specs +from cinder.volume import volume_types # # RPC Definition @@ -57,9 +60,12 @@ COHO_V1 = 1 COHO1_CREATE_SNAPSHOT = 1 COHO1_DELETE_SNAPSHOT = 2 COHO1_CREATE_VOLUME_FROM_SNAPSHOT = 3 +COHO1_SET_QOS_POLICY = 4 COHO_MAX_RETRIES = 5 +COHO_NO_QOS = {'maxIOPS': 0, 'maxMBS': 0} + # # Simple RPC Client # @@ -262,8 +268,8 @@ class CohoRPCClient(Client): def create_snapshot(self, src, dst, flags): self._call(COHO1_CREATE_SNAPSHOT, [(six.b(src), self.packer.pack_string), - (six.b(dst), self.packer.pack_string), - (flags, self.packer.pack_uint)]) + (six.b(dst), self.packer.pack_string), + (flags, self.packer.pack_uint)]) def delete_snapshot(self, name): self._call(COHO1_DELETE_SNAPSHOT, @@ -272,21 +278,30 @@ class CohoRPCClient(Client): def create_volume_from_snapshot(self, src, dst): self._call(COHO1_CREATE_VOLUME_FROM_SNAPSHOT, [(six.b(src), self.packer.pack_string), - (six.b(dst), self.packer.pack_string)]) + (six.b(dst), self.packer.pack_string)]) + + def set_qos_policy(self, src, qos): + self._call(COHO1_SET_QOS_POLICY, + [(six.b(src), self.packer.pack_string), + (six.b(qos.get('uuid', '')), self.packer.pack_string), + (0, self.packer.pack_uhyper), + (qos.get('maxIOPS', 0), self.packer.pack_uhyper), + (0, self.packer.pack_uhyper), + (qos.get('maxMBS', 0), self.packer.pack_uhyper)]) # # Coho Data Volume Driver # -VERSION = '1.0.0' +VERSION = '1.1.0' LOG = logging.getLogger(__name__) coho_opts = [ cfg.IntOpt('coho_rpc_port', default=2049, - help='RPC port to connect to Coha Data MicroArray') + help='RPC port to connect to Coho Data MicroArray') ] CONF = cfg.CONF @@ -300,6 +315,7 @@ class CohoDriver(nfs.NfsDriver): Creates file on NFS share for using it as block device on hypervisor. Version history: 1.0.0 - Initial driver + 1.1.0 - Added QoS support """ # We have to overload this attribute of RemoteFSDriver because @@ -309,6 +325,11 @@ class CohoDriver(nfs.NfsDriver): # We are more permissive. SHARE_FORMAT_REGEX = r'.+:/.*' + COHO_QOS_KEYS = ['maxIOPS', 'maxMBS'] + + # ThirdPartySystems wiki page name + CI_WIKI_NAME = "Coho_Data_CI" + def __init__(self, *args, **kwargs): super(CohoDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(coho_opts) @@ -355,6 +376,9 @@ class CohoDriver(nfs.NfsDriver): self._execute('cp', source_path, volume_path, run_as_root=self._execute_as_root) + qos = self._retrieve_qos_setting(volume) + self._do_set_qos_policy(volume, qos) + def _get_volume_location(self, volume_id): """Returns provider location for given volume.""" @@ -365,6 +389,47 @@ class CohoDriver(nfs.NfsDriver): addr, path = volume.provider_location.split(":") return addr, path + def _do_set_qos_policy(self, volume, qos): + if qos: + addr, path = volume['provider_location'].split(':') + volume_path = os.path.join(path, volume['name']) + + client = self._get_rpcclient(addr, + self.configuration.coho_rpc_port) + client.set_qos_policy(volume_path, qos) + + def _get_qos_by_volume_type(self, ctxt, type_id): + qos = {} + + # NOTE(bardia): we only honor qos_specs + if type_id: + volume_type = volume_types.get_volume_type(ctxt, type_id) + qos_specs_id = volume_type.get('qos_specs_id') + + if qos_specs_id is not None: + kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] + qos['uuid'] = qos_specs_id + else: + kvs = {} + + for key, value in kvs.items(): + if key in self.COHO_QOS_KEYS: + qos[key] = int(value) + return qos + + def _retrieve_qos_setting(self, volume): + ctxt = context.get_admin_context() + type_id = volume['volume_type_id'] + + return self._get_qos_by_volume_type(ctxt, type_id) + + def create_volume(self, volume): + resp = super(CohoDriver, self).create_volume(volume) + qos = self._retrieve_qos_setting(volume) + self._do_set_qos_policy(volume, qos) + + return resp + def create_snapshot(self, snapshot): """Create a volume snapshot.""" addr, path = self._get_volume_location(snapshot['volume_id']) @@ -376,7 +441,7 @@ class CohoDriver(nfs.NfsDriver): def delete_snapshot(self, snapshot): """Delete a volume snapshot.""" - addr, path = self._get_volume_location(snapshot['volume_id']) + addr, unused = self._get_volume_location(snapshot['volume_id']) snapshot_name = snapshot['name'] client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) client.delete_snapshot(snapshot_name) @@ -387,9 +452,13 @@ class CohoDriver(nfs.NfsDriver): addr, path = volume['provider_location'].split(":") volume_path = os.path.join(path, volume['name']) snapshot_name = snapshot['name'] + client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) client.create_volume_from_snapshot(snapshot_name, volume_path) + qos = self._retrieve_qos_setting(volume) + self._do_set_qos_policy(volume, qos) + return {'provider_location': volume['provider_location']} def _extend_file_sparse(self, path, size): @@ -408,7 +477,23 @@ class CohoDriver(nfs.NfsDriver): self._extend_file_sparse(volume_path, new_size) - def get_volume_stats(self, refresh): + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type. + + Changes the volume's QoS policy if needed. + """ + qos = self._get_qos_by_volume_type(ctxt, new_type['id']) + + # Reset the QoS policy on the volume in case the previous + # type had a QoS policy + if not qos: + qos = COHO_NO_QOS + + self._do_set_qos_policy(volume, qos) + + return True, None + + def get_volume_stats(self, refresh=False): """Pass in Coho Data information in volume stats.""" _stats = super(CohoDriver, self).get_volume_stats(refresh) _stats["vendor_name"] = 'Coho Data' @@ -418,5 +503,6 @@ class CohoDriver(nfs.NfsDriver): _stats["total_capacity_gb"] = 'unknown' _stats["free_capacity_gb"] = 'unknown' _stats["export_paths"] = self._mounted_shares + _stats["QoS_support"] = True return _stats diff --git a/cinder/volume/drivers/coprhd/common.py b/cinder/volume/drivers/coprhd/common.py index 6b53a045d..60793eff5 100644 --- a/cinder/volume/drivers/coprhd/common.py +++ b/cinder/volume/drivers/coprhd/common.py @@ -150,7 +150,7 @@ class EMCCoprHDDriverCommon(object): coprhd_utils.AUTH_TOKEN = None - # instantiate a few coprhd api objects for later use + # instantiate coprhd api objects for later use self.volume_obj = coprhd_vol.Volume( self.configuration.coprhd_hostname, self.configuration.coprhd_port) @@ -233,7 +233,6 @@ class EMCCoprHDDriverCommon(object): self.vpool = vpool['CoprHD:VPOOL'] try: - cgid = None coprhd_cgid = None try: cgid = vol['consistencygroup_id'] @@ -270,14 +269,14 @@ class EMCCoprHDDriverCommon(object): self.configuration.coprhd_project, self.configuration.coprhd_tenant) - cgUri = self.consistencygroup_obj.consistencygroup_query( + cg_uri = self.consistencygroup_obj.consistencygroup_query( name, self.configuration.coprhd_project, self.configuration.coprhd_tenant) self.set_tags_for_resource( coprhd_cg.ConsistencyGroup.URI_CONSISTENCY_GROUP_TAGS, - cgUri, group) + cg_uri, group) except coprhd_utils.CoprHdError as e: coprhd_err_msg = (_("Consistency Group %(name)s:" @@ -423,17 +422,17 @@ class EMCCoprHDDriverCommon(object): if not rslt: continue - volUri = rslt[0] + vol_uri = rslt[0] snapshots_of_volume = self.snapshot_obj.snapshot_list_uri( 'block', 'volumes', - volUri) + vol_uri) for snapUri in snapshots_of_volume: snapshot_obj = self.snapshot_obj.snapshot_show_uri( 'block', - volUri, + vol_uri, snapUri['id']) if not coprhd_utils.get_node_value(snapshot_obj, @@ -554,9 +553,9 @@ class EMCCoprHDDriverCommon(object): log_err_msg) @retry_wrapper - def set_volume_tags(self, vol, exemptTags=None, truncate_name=False): - if exemptTags is None: - exemptTags = [] + def set_volume_tags(self, vol, exempt_tags=None, truncate_name=False): + if exempt_tags is None: + exempt_tags = [] self.authenticate_user() name = self._get_resource_name(vol, truncate_name) @@ -568,19 +567,19 @@ class EMCCoprHDDriverCommon(object): name) self.set_tags_for_resource( - coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exemptTags) + coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exempt_tags) @retry_wrapper - def set_tags_for_resource(self, uri, resourceId, resource, - exemptTags=None): - if exemptTags is None: - exemptTags = [] + def set_tags_for_resource(self, uri, resource_id, resource, + exempt_tags=None): + if exempt_tags is None: + exempt_tags = [] self.authenticate_user() # first, get the current tags that start with the OPENSTACK_TAG # eyecatcher - formattedUri = uri.format(resourceId) + formattedUri = uri.format(resource_id) remove_tags = [] currentTags = self.tag_obj.list_tags(formattedUri) for cTag in currentTags: @@ -590,7 +589,7 @@ class EMCCoprHDDriverCommon(object): try: if remove_tags: self.tag_obj.tag_resource(uri, - resourceId, + resource_id, None, remove_tags) except coprhd_utils.CoprHdError as e: @@ -604,7 +603,7 @@ class EMCCoprHDDriverCommon(object): try: for prop, value in vars(resource).items(): try: - if prop in exemptTags: + if prop in exempt_tags: continue if prop.startswith("_"): @@ -627,7 +626,7 @@ class EMCCoprHDDriverCommon(object): try: self.tag_obj.tag_resource( uri, - resourceId, + resource_id, add_tags, None) except coprhd_utils.CoprHdError as e: @@ -653,12 +652,12 @@ class EMCCoprHDDriverCommon(object): except KeyError as e: pass try: - (storageresType, - storageresTypename) = self.volume_obj.get_storageAttributes( + (storageres_type, + storageres_typename) = self.volume_obj.get_storageAttributes( srcname, None, None) resource_id = self.volume_obj.storage_resource_query( - storageresType, + storageres_type, srcname, None, None, @@ -683,7 +682,7 @@ class EMCCoprHDDriverCommon(object): self.volume_obj.volume_clone_detach( "", full_project_name, name, True) - except IndexError as e: + except IndexError: LOG.exception(_LE("Volume clone detach returned empty task list")) except coprhd_utils.CoprHdError as e: @@ -751,6 +750,12 @@ class EMCCoprHDDriverCommon(object): self.create_cloned_volume(volume, snapshot, truncate_name) return + if snapshot.get('cgsnapshot_id'): + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + _("Volume cannot be created individually from a snapshot " + "that is part of a Consistency Group")) + src_snapshot_name = None src_vol_ref = snapshot['volume'] new_volume_name = self._get_resource_name(volume, truncate_name) @@ -761,12 +766,12 @@ class EMCCoprHDDriverCommon(object): src_snapshot_name = self._get_coprhd_snapshot_name( snapshot, coprhd_vol_info['volume_uri']) - (storageresType, - storageresTypename) = self.volume_obj.get_storageAttributes( + (storageres_type, + storageres_typename) = self.volume_obj.get_storageAttributes( coprhd_vol_info['volume_name'], None, src_snapshot_name) resource_id = self.volume_obj.storage_resource_query( - storageresType, + storageres_type, coprhd_vol_info['volume_name'], None, src_snapshot_name, @@ -861,10 +866,10 @@ class EMCCoprHDDriverCommon(object): volumename = self._get_coprhd_volume_name(vol) projectname = self.configuration.coprhd_project tenantname = self.configuration.coprhd_tenant - storageresType = 'block' - storageresTypename = 'volumes' - resourceUri = self.snapshot_obj.storage_resource_query( - storageresType, + storageres_type = 'block' + storageres_typename = 'volumes' + resource_uri = self.snapshot_obj.storage_resource_query( + storageres_type, volume_name=volumename, cg_name=None, project=projectname, @@ -872,22 +877,22 @@ class EMCCoprHDDriverCommon(object): inactive = False sync = True self.snapshot_obj.snapshot_create( - storageresType, - storageresTypename, - resourceUri, + storageres_type, + storageres_typename, + resource_uri, snapshotname, inactive, sync) - snapshotUri = self.snapshot_obj.snapshot_query( - storageresType, - storageresTypename, - resourceUri, + snapshot_uri = self.snapshot_obj.snapshot_query( + storageres_type, + storageres_typename, + resource_uri, snapshotname) self.set_tags_for_resource( coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, - snapshotUri, snapshot, ['_volume']) + snapshot_uri, snapshot, ['_volume']) except coprhd_utils.CoprHdError as e: coprhd_err_msg = (_("Snapshot: %(snapshotname)s, create failed" @@ -922,27 +927,27 @@ class EMCCoprHDDriverCommon(object): volumename = self._get_coprhd_volume_name(vol) projectname = self.configuration.coprhd_project tenantname = self.configuration.coprhd_tenant - storageresType = 'block' - storageresTypename = 'volumes' - resourceUri = self.snapshot_obj.storage_resource_query( - storageresType, + storageres_type = 'block' + storageres_typename = 'volumes' + resource_uri = self.snapshot_obj.storage_resource_query( + storageres_type, volume_name=volumename, cg_name=None, project=projectname, tenant=tenantname) - if resourceUri is None: + if resource_uri is None: LOG.info(_LI( "Snapshot %s" " is not found; snapshot deletion" " is considered successful."), snapshotname) else: snapshotname = self._get_coprhd_snapshot_name( - snapshot, resourceUri) + snapshot, resource_uri) self.snapshot_obj.snapshot_delete( - storageresType, - storageresTypename, - resourceUri, + storageres_type, + storageres_typename, + resource_uri, snapshotname, sync=True) except coprhd_utils.CoprHdError as e: @@ -954,21 +959,21 @@ class EMCCoprHDDriverCommon(object): log_err_msg) @retry_wrapper - def initialize_connection(self, volume, protocol, initiatorPorts, + def initialize_connection(self, volume, protocol, initiator_ports, hostname): try: self.authenticate_user() volumename = self._get_coprhd_volume_name(volume) - foundgroupname = self._find_exportgroup(initiatorPorts) + foundgroupname = self._find_exportgroup(initiator_ports) foundhostname = None if foundgroupname is None: - for i in range(len(initiatorPorts)): + for i in range(len(initiator_ports)): # check if this initiator is contained in any CoprHD Host # object LOG.debug( - "checking for initiator port: %s", initiatorPorts[i]) - foundhostname = self._find_host(initiatorPorts[i]) + "checking for initiator port: %s", initiator_ports[i]) + foundhostname = self._find_host(initiator_ports[i]) if foundhostname: LOG.info(_LI("Found host %s"), foundhostname) @@ -1006,7 +1011,7 @@ class EMCCoprHDDriverCommon(object): None, None) - return self._find_device_info(volume, initiatorPorts) + return self._find_device_info(volume, initiator_ports) except coprhd_utils.CoprHdError as e: raise coprhd_utils.CoprHdError( @@ -1017,12 +1022,12 @@ class EMCCoprHDDriverCommon(object): {'name': self._get_coprhd_volume_name( volume), 'hostname': hostname, - 'initiatorport': initiatorPorts[0], + 'initiatorport': initiator_ports[0], 'err': six.text_type(e.msg)}) ) @retry_wrapper - def terminate_connection(self, volume, protocol, initiatorPorts, + def terminate_connection(self, volume, protocol, initiator_ports, hostname): try: self.authenticate_user() @@ -1038,7 +1043,7 @@ class EMCCoprHDDriverCommon(object): itls = exports['itl'] for itl in itls: itl_port = itl['initiator']['port'] - if itl_port in initiatorPorts: + if itl_port in initiator_ports: exportgroups.add(itl['export']['id']) for exportgroup in exportgroups: @@ -1159,11 +1164,11 @@ class EMCCoprHDDriverCommon(object): self.configuration.coprhd_port) if len(rslt) > 0: - rsltCg = self.consistencygroup_obj.show( + rslt_cg = self.consistencygroup_obj.show( rslt[0], self.configuration.coprhd_project, self.configuration.coprhd_tenant) - return rsltCg['id'] + return rslt_cg['id'] else: raise coprhd_utils.CoprHdError( coprhd_utils.CoprHdError.NOT_FOUND_ERR, @@ -1193,11 +1198,11 @@ class EMCCoprHDDriverCommon(object): if rslt is None or len(rslt) == 0: return snapshot['name'] else: - rsltSnap = self.snapshot_obj.snapshot_show_uri( + rslt_snap = self.snapshot_obj.snapshot_show_uri( 'block', resUri, rslt[0]) - return rsltSnap['name'] + return rslt_snap['name'] def _get_coprhd_volume_name(self, vol, verbose=False): tagname = self.OPENSTACK_TAG + ":id:" + vol['id'] @@ -1217,12 +1222,12 @@ class EMCCoprHDDriverCommon(object): self.configuration.coprhd_port) if len(rslt) > 0: - rsltVol = self.volume_obj.show_by_uri(rslt[0]) + rslt_vol = self.volume_obj.show_by_uri(rslt[0]) if verbose is True: - return {'volume_name': rsltVol['name'], 'volume_uri': rslt[0]} + return {'volume_name': rslt_vol['name'], 'volume_uri': rslt[0]} else: - return rsltVol['name'] + return rslt_vol['name'] else: raise coprhd_utils.CoprHdError( coprhd_utils.CoprHdError.NOT_FOUND_ERR, @@ -1337,22 +1342,6 @@ class EMCCoprHDDriverCommon(object): return foundhostname - @retry_wrapper - def _host_exists(self, host_name): - """Check if Host object with given hostname already exists in CoprHD. - - """ - hosts = self.host_obj.search_by_name(host_name) - - if len(hosts) > 0: - for host in hosts: - hostname = host['match'] - if host_name == hostname: - return hostname - return hostname - LOG.debug("no host found for: %s", host_name) - return None - @retry_wrapper def get_exports_count_by_initiators(self, initiator_ports): """Fetches ITL map for a given list of initiator ports.""" diff --git a/cinder/volume/drivers/coprhd/fc.py b/cinder/volume/drivers/coprhd/fc.py index f334245e8..bd16e0186 100644 --- a/cinder/volume/drivers/coprhd/fc.py +++ b/cinder/volume/drivers/coprhd/fc.py @@ -23,7 +23,7 @@ from oslo_log import log as logging from cinder import interface from cinder.volume import driver from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.zonemanager import utils +from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @@ -31,6 +31,10 @@ LOG = logging.getLogger(__name__) @interface.volumedriver class EMCCoprHDFCDriver(driver.FibreChannelDriver): """CoprHD FC Driver.""" + VERSION = "3.0.0.0" + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "EMC_CoprHD_CI" def __init__(self, *args, **kwargs): super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs) @@ -92,8 +96,8 @@ class EMCCoprHDFCDriver(driver.FibreChannelDriver): """Creates a consistencygroup.""" return self.common.create_consistencygroup(context, group) - def update_consistencygroup(self, context, group, add_volumes, - remove_volumes): + def update_consistencygroup(self, context, group, add_volumes=None, + remove_volumes=None): """Updates volumes in consistency group.""" return self.common.update_consistencygroup(group, add_volumes, remove_volumes) @@ -114,7 +118,7 @@ class EMCCoprHDFCDriver(driver.FibreChannelDriver): """Make sure volume is exported.""" pass - @utils.AddFCZone + @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info.""" @@ -150,10 +154,10 @@ class EMCCoprHDFCDriver(driver.FibreChannelDriver): LOG.debug('FC properties: %s', properties) return { 'driver_volume_type': 'fibre_channel', - 'data': properties + 'data': properties, } - @utils.RemoveFCZone + @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to detach a volume from an instance.""" diff --git a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py index fb6c39eac..eaa8721b2 100644 --- a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py +++ b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py @@ -21,9 +21,9 @@ except ImportError: import json import re import socket -import threading import oslo_serialization +from oslo_utils import timeutils from oslo_utils import units import requests from requests import exceptions @@ -31,20 +31,20 @@ import six from cinder import exception from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers.urihelper import ( - singletonURIHelperInstance) +from cinder.volume.drivers.coprhd.helpers import urihelper PROD_NAME = 'storageos' TIMEOUT_SEC = 20 # 20 SECONDS -IS_TASK_TIMEOUT = False global AUTH_TOKEN AUTH_TOKEN = None TASK_TIMEOUT = 300 +URI_TASKS_BY_OPID = '/vdc/tasks/{0}' + def _decode_list(data): rv = [] @@ -77,7 +77,6 @@ def _decode_dict(data): def json_decode(rsp): """Used to decode the JSON encoded response.""" - o = "" try: o = json.loads(rsp, object_hook=_decode_dict) except ValueError: @@ -144,34 +143,34 @@ def service_json_request(ip_addr, port, http_method, uri, body, error_msg = None if response.status_code == 500: - responseText = json_decode(response.text) - errorDetails = "" - if 'details' in responseText: - errorDetails = responseText['details'] + response_text = json_decode(response.text) + error_details = "" + if 'details' in response_text: + error_details = response_text['details'] error_msg = (_("CoprHD internal server error. Error details: %s"), - errorDetails) + error_details) elif response.status_code == 401: error_msg = _("Access forbidden: Authentication required") elif response.status_code == 403: error_msg = "" - errorDetails = "" - errorDescription = "" + error_details = "" + error_description = "" - responseText = json_decode(response.text) + response_text = json_decode(response.text) - if 'details' in responseText: - errorDetails = responseText['details'] + if 'details' in response_text: + error_details = response_text['details'] error_msg = (_("%(error_msg)s Error details:" - " %(errorDetails)s"), + " %(error_details)s"), {'error_msg': error_msg, - 'errorDetails': errorDetails + 'error_details': error_details }) - elif 'description' in responseText: - errorDescription = responseText['description'] + elif 'description' in response_text: + error_description = response_text['description'] error_msg = (_("%(error_msg)s Error description:" - " %(errorDescription)s"), + " %(error_description)s"), {'error_msg': error_msg, - 'errorDescription': errorDescription + 'error_description': error_description }) else: error_msg = _("Access forbidden: You don't have" @@ -184,21 +183,21 @@ def service_json_request(ip_addr, port, http_method, uri, body, error_msg = six.text_type(response.text) elif response.status_code == 503: error_msg = "" - errorDetails = "" - errorDescription = "" + error_details = "" + error_description = "" - responseText = json_decode(response.text) + response_text = json_decode(response.text) - if 'code' in responseText: - errorCode = responseText['code'] - error_msg = error_msg + "Error " + six.text_type(errorCode) + if 'code' in response_text: + errorCode = response_text['code'] + error_msg = "Error " + six.text_type(errorCode) - if 'details' in responseText: - errorDetails = responseText['details'] - error_msg = error_msg + ": " + errorDetails - elif 'description' in responseText: - errorDescription = responseText['description'] - error_msg = error_msg + ": " + errorDescription + if 'details' in response_text: + error_details = response_text['details'] + error_msg = error_msg + ": " + error_details + elif 'description' in response_text: + error_description = response_text['description'] + error_msg = error_msg + ": " + error_description else: error_msg = _("Service temporarily unavailable:" " The server is temporarily unable to" @@ -381,8 +380,8 @@ def search_by_tag(resource_search_uri, ipaddr, port): :param port: Port number """ # check if the URI passed has both project and name parameters - strUri = six.text_type(resource_search_uri) - if strUri.__contains__("search") and strUri.__contains__("?tag="): + str_uri = six.text_type(resource_search_uri) + if 'search' in str_uri and '?tag=' in str_uri: # Get the project URI (s, h) = service_json_request( @@ -404,47 +403,41 @@ def search_by_tag(resource_search_uri, ipaddr, port): " is not in the expected" " format, it should end" " with ?tag={0}") - % strUri)) - -# Timeout handler for synchronous operations - - -def timeout_handler(): - global IS_TASK_TIMEOUT - IS_TASK_TIMEOUT = True + % str_uri)) # Blocks the operation until the task is complete/error out/timeout def block_until_complete(component_type, resource_uri, task_id, - ipAddr, + ipaddr, port, synctimeout=0): - global IS_TASK_TIMEOUT - IS_TASK_TIMEOUT = False - if synctimeout: - t = threading.Timer(synctimeout, timeout_handler) - else: + + if not synctimeout: synctimeout = TASK_TIMEOUT - t = threading.Timer(synctimeout, timeout_handler) + t = timeutils.StopWatch(duration=synctimeout) t.start() - while True: - out = get_task_by_resourceuri_and_taskId( - component_type, resource_uri, task_id, ipAddr, port) + while not t.expired(): + if component_type == 'block': + out = show_task_opid(task_id, ipaddr, port) + else: + out = get_task_by_resourceuri_and_taskId( + component_type, resource_uri, task_id, ipaddr, port) if out: if out["state"] == "ready": - # cancel the timer and return - t.cancel() + # stop the timer and return + t.stop() break - # if the status of the task is 'error' then cancel the timer + # if the status of the task is 'error' then stop the timer # and raise exception if out["state"] == "error": - # cancel the timer - t.cancel() + # stop the timer + t.stop() + error_message = "Please see logs for more details" if ("service_error" in out and "details" in out["service_error"]): error_message = out["service_error"]["details"] @@ -456,24 +449,35 @@ def block_until_complete(component_type, 'error_message': error_message })) - if IS_TASK_TIMEOUT: - IS_TASK_TIMEOUT = False - raise CoprHdError(CoprHdError.TIME_OUT, - (_("Task did not complete in %d secs." - " Operation timed out. Task in CoprHD" - " will continue") % synctimeout)) + else: + raise CoprHdError(CoprHdError.TIME_OUT, + (_("Task did not complete in %d secs." + " Operation timed out. Task in CoprHD" + " will continue") % synctimeout)) return +def show_task_opid(taskid, ipaddr, port): + (s, h) = service_json_request( + ipaddr, port, + "GET", + URI_TASKS_BY_OPID.format(taskid), + None) + if (not s): + return None + o = json_decode(s) + return o + + def get_task_by_resourceuri_and_taskId(component_type, resource_uri, - task_id, ipAddr, port): + task_id, ipaddr, port): """Returns the single task details.""" - task_uri_constant = singletonURIHelperInstance.getUri( + task_uri_constant = urihelper.singletonURIHelperInstance.getUri( component_type, "task") (s, h) = service_json_request( - ipAddr, port, "GET", + ipaddr, port, "GET", task_uri_constant.format(resource_uri, task_id), None) if not s: return None diff --git a/cinder/volume/drivers/coprhd/helpers/host.py b/cinder/volume/drivers/coprhd/helpers/host.py index e728b3a65..8f6cb4b54 100644 --- a/cinder/volume/drivers/coprhd/helpers/host.py +++ b/cinder/volume/drivers/coprhd/helpers/host.py @@ -24,7 +24,6 @@ class Host(common.CoprHDResource): URI_HOST_DETAILS = "/compute/hosts/{0}" URI_HOST_LIST_INITIATORS = "/compute/hosts/{0}/initiators" URI_COMPUTE_HOST = "/compute/hosts" - URI_HOSTS_SEARCH_BY_NAME = "/compute/hosts/search?name={0}" def query_by_name(self, host_name, tenant_name=None): """Search host matching host_name and tenant if tenant_name provided. @@ -92,13 +91,3 @@ class Host(common.CoprHDResource): if inactive: return None return o - - def search_by_name(self, host_name): - """Search host by its name.""" - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_HOSTS_SEARCH_BY_NAME.format(host_name), None) - o = common.json_decode(s) - if not o: - return [] - return common.get_node_value(o, "resource") diff --git a/cinder/volume/drivers/coprhd/helpers/snapshot.py b/cinder/volume/drivers/coprhd/helpers/snapshot.py index 1a31c2eaf..857b8babd 100644 --- a/cinder/volume/drivers/coprhd/helpers/snapshot.py +++ b/cinder/volume/drivers/coprhd/helpers/snapshot.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import threading - import oslo_serialization from cinder.i18n import _ @@ -43,7 +41,6 @@ class Snapshot(common.CoprHDResource): CG = 'consistency-groups' BLOCK = 'block' - is_timeout = False timeout = 300 def snapshot_list_uri(self, otype, otypename, ouri): @@ -108,60 +105,6 @@ class Snapshot(common.CoprHDResource): (_("snapshot with the name: " "%s Not Found") % snapshot_name)) - def snapshot_show_task_opid(self, otype, snap, taskid): - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_SNAPSHOT_TASKS_BY_OPID.format(taskid), - None) - if (not s): - return None - o = common.json_decode(s) - return o - - # Blocks the operation until the task is complete/error out/timeout - def block_until_complete(self, storageres_type, resuri, - task_id, synctimeout=0): - if synctimeout: - t = threading.Timer(synctimeout, common.timeout_handler) - else: - synctimeout = self.timeout - t = threading.Timer(synctimeout, common.timeout_handler) - t.start() - while True: - out = self.snapshot_show_task_opid( - storageres_type, resuri, task_id) - - if out: - if out["state"] == "ready": - # cancel the timer and return - t.cancel() - break - # if the status of the task is 'error' then cancel the timer - # and raise exception - if out["state"] == "error": - # cancel the timer - t.cancel() - error_message = "Please see logs for more details" - if("service_error" in out and - "details" in out["service_error"]): - error_message = out["service_error"]["details"] - raise common.CoprHdError( - common.CoprHdError.VALUE_ERR, - (_("Task: %(task_id)s is failed with error: " - "%(error_message)s") % - {'task_id': task_id, - 'error_message': error_message})) - - if self.is_timeout: - self.is_timeout = False - raise common.CoprHdError(common.CoprHdError.TIME_OUT, - (_("Task did not complete in %d secs." - " Operation timed out. Task in" - " CoprHD will continue") % - synctimeout)) - return - def storage_resource_query(self, storageres_type, volume_name, @@ -248,10 +191,10 @@ class Snapshot(common.CoprHDResource): if sync: return ( - self.block_until_complete( + common.block_until_complete( otype, task['resource']['id'], - task["id"], synctimeout) + task["id"], self.ipaddr, self.port, synctimeout) ) else: return o @@ -291,10 +234,10 @@ class Snapshot(common.CoprHDResource): if sync: return ( - self.block_until_complete( + common.block_until_complete( otype, task['resource']['id'], - task["id"], synctimeout) + task["id"], self.ipaddr, self.port, synctimeout) ) else: return o diff --git a/cinder/volume/drivers/coprhd/helpers/virtualpool.py b/cinder/volume/drivers/coprhd/helpers/virtualpool.py index ae64b52ba..703134b63 100644 --- a/cinder/volume/drivers/coprhd/helpers/virtualpool.py +++ b/cinder/volume/drivers/coprhd/helpers/virtualpool.py @@ -49,7 +49,7 @@ class VirtualPool(common.CoprHDResource): """Makes REST API call to query the vpool by name and type. This function will take the VPOOL name and type of VPOOL - as input and get uri of the first occurance of given VPOOL. + as input and get uri of the first occurence of given VPOOL. :param name: Name of the VPOOL :param vpooltype: Type of the VPOOL {'block'} diff --git a/cinder/volume/drivers/coprhd/helpers/volume.py b/cinder/volume/drivers/coprhd/helpers/volume.py index 6f93ca7f4..97835ada3 100644 --- a/cinder/volume/drivers/coprhd/helpers/volume.py +++ b/cinder/volume/drivers/coprhd/helpers/volume.py @@ -275,8 +275,6 @@ class Volume(common.CoprHDResource): secs, an exception is thrown :returns: Created task details in JSON response payload """ - from cinder.volume.drivers.coprhd.helpers import snapshot - snap_obj = snapshot.Snapshot(self.ipaddr, self.port) is_snapshot_clone = False clone_full_uri = None @@ -311,10 +309,10 @@ class Volume(common.CoprHDResource): if is_snapshot_clone: return ( - snap_obj.block_until_complete( + common.block_until_complete( "block", task["resource"]["id"], - task["id"]) + task["id"], self.ipaddr, self.port) ) else: return self.check_for_sync(task, sync, synctimeout) diff --git a/cinder/volume/drivers/coprhd/iscsi.py b/cinder/volume/drivers/coprhd/iscsi.py index 31d64b6fa..0575b4da0 100644 --- a/cinder/volume/drivers/coprhd/iscsi.py +++ b/cinder/volume/drivers/coprhd/iscsi.py @@ -29,6 +29,10 @@ LOG = logging.getLogger(__name__) @interface.volumedriver class EMCCoprHDISCSIDriver(driver.ISCSIDriver): """CoprHD iSCSI Driver.""" + VERSION = "3.0.0.0" + + # ThirdPartySystems wiki page name + CI_WIKI_NAME = "EMC_CoprHD_CI" def __init__(self, *args, **kwargs): super(EMCCoprHDISCSIDriver, self).__init__(*args, **kwargs) @@ -95,7 +99,7 @@ class EMCCoprHDISCSIDriver(driver.ISCSIDriver): return self.common.delete_consistencygroup(context, group, volumes) def update_consistencygroup(self, context, group, - add_volumes, remove_volumes): + add_volumes=None, remove_volumes=None): """Updates volumes in consistency group.""" return self.common.update_consistencygroup(group, add_volumes, remove_volumes) diff --git a/cinder/volume/drivers/coprhd/scaleio.py b/cinder/volume/drivers/coprhd/scaleio.py index 841ad0020..20287c6c4 100644 --- a/cinder/volume/drivers/coprhd/scaleio.py +++ b/cinder/volume/drivers/coprhd/scaleio.py @@ -33,9 +33,9 @@ from cinder.volume.drivers.coprhd import common as coprhd_common LOG = logging.getLogger(__name__) scaleio_opts = [ - cfg.StrOpt('coprhd_scaleio_rest_gateway_ip', + cfg.StrOpt('coprhd_scaleio_rest_gateway_host', default='None', - help='Rest Gateway for Scaleio'), + help='Rest Gateway IP or FQDN for Scaleio'), cfg.PortOpt('coprhd_scaleio_rest_gateway_port', default=4984, help='Rest Gateway Port for Scaleio'), @@ -61,8 +61,12 @@ CONF.register_opts(scaleio_opts) @interface.volumedriver class EMCCoprHDScaleIODriver(driver.VolumeDriver): """CoprHD ScaleIO Driver.""" + VERSION = "3.0.0.0" server_token = None + # ThirdPartySystems wiki page + CI_WIKI_NAME = "EMC_CoprHD_CI" + def __init__(self, *args, **kwargs): super(EMCCoprHDScaleIODriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(scaleio_opts) @@ -141,7 +145,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): return self.common.create_consistencygroup(context, group, True) def update_consistencygroup(self, context, group, - add_volumes, remove_volumes): + add_volumes=None, remove_volumes=None): """Updates volumes in consistency group.""" return self.common.update_consistencygroup(group, add_volumes, remove_volumes) @@ -170,9 +174,10 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): properties = {} properties['scaleIO_volname'] = volname + properties['scaleIO_volume_id'] = volume['provider_id'] properties['hostIP'] = connector['ip'] properties[ - 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_ip + 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host properties[ 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port properties[ @@ -185,23 +190,23 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): properties['bandwidthLimit'] = None properties['serverToken'] = self.server_token - initiatorPorts = [] - initiatorPort = self._get_client_id(properties['serverIP'], - properties['serverPort'], - properties['serverUsername'], - properties['serverPassword'], - properties['hostIP']) - initiatorPorts.append(initiatorPort) + initiator_ports = [] + initiator_port = self._get_client_id(properties['serverIP'], + properties['serverPort'], + properties['serverUsername'], + properties['serverPassword'], + properties['hostIP']) + initiator_ports.append(initiator_port) properties['serverToken'] = self.server_token self.common.initialize_connection(volume, 'scaleio', - initiatorPorts, + initiator_ports, connector['host']) dictobj = { 'driver_volume_type': 'scaleio', - 'data': properties + 'data': properties, } return dictobj @@ -212,9 +217,10 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): volname = volume['display_name'] properties = {} properties['scaleIO_volname'] = volname + properties['scaleIO_volume_id'] = volume['provider_id'] properties['hostIP'] = connector['ip'] properties[ - 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_ip + 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host properties[ 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port properties[ @@ -225,16 +231,16 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): self.configuration.coprhd_scaleio_rest_server_password) properties['serverToken'] = self.server_token - initiatorPort = self._get_client_id(properties['serverIP'], - properties['serverPort'], - properties['serverUsername'], - properties['serverPassword'], - properties['hostIP']) - initPorts = [] - initPorts.append(initiatorPort) + initiator_port = self._get_client_id(properties['serverIP'], + properties['serverPort'], + properties['serverUsername'], + properties['serverPassword'], + properties['hostIP']) + init_ports = [] + init_ports.append(initiator_port) self.common.terminate_connection(volume, 'scaleio', - initPorts, + init_ports, connector['host']) def get_volume_stats(self, refresh=False): diff --git a/cinder/volume/drivers/datera.py b/cinder/volume/drivers/datera.py index dc3e26409..5fff4e4aa 100644 --- a/cinder/volume/drivers/datera.py +++ b/cinder/volume/drivers/datera.py @@ -15,9 +15,10 @@ import functools import json -import time +import re import uuid +import eventlet import ipaddress from oslo_config import cfg from oslo_log import log as logging @@ -33,12 +34,11 @@ from cinder import interface from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import qos_specs +from cinder.volume import utils as volutils from cinder.volume import volume_types LOG = logging.getLogger(__name__) -DATERA_SI_SLEEP = 4 - d_opts = [ cfg.StrOpt('datera_api_port', default='7717', @@ -48,6 +48,7 @@ d_opts = [ help='Datera API version.'), cfg.IntOpt('datera_num_replicas', default='3', + deprecated_for_removal=True, help='Number of replicas to create of an inode.'), cfg.IntOpt('datera_503_timeout', default='120', @@ -55,12 +56,18 @@ d_opts = [ cfg.IntOpt('datera_503_interval', default='5', help='Interval between 503 retries'), - cfg.BoolOpt('datera_acl_allow_all', - default=False, - help="True to set acl 'allow_all' on volumes created"), cfg.BoolOpt('datera_debug', default=False, - help="True to set function arg and return logging") + help="True to set function arg and return logging"), + cfg.BoolOpt('datera_acl_allow_all', + default=False, + deprecated_for_removal=True, + help="True to set acl 'allow_all' on volumes " + "created"), + cfg.BoolOpt('datera_debug_replica_count_override', + default=False, + help="ONLY FOR DEBUG/TESTING PURPOSES\n" + "True to set replica_count to 1") ] @@ -68,25 +75,41 @@ CONF = cfg.CONF CONF.import_opt('driver_use_ssl', 'cinder.volume.driver') CONF.register_opts(d_opts) -DEFAULT_STORAGE_NAME = 'storage-1' -DEFAULT_VOLUME_NAME = 'volume-1' +DEFAULT_SI_SLEEP = 10 +INITIATOR_GROUP_PREFIX = "IG-" +OS_PREFIX = "OS-" +UNMANAGE_PREFIX = "UNMANAGED-" + +# Taken from this SO post : +# http://stackoverflow.com/a/18516125 +# Using old-style string formatting because of the nature of the regex +# conflicting with new-style curly braces +UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]" + "[a-f0-9]{3}-?[a-f0-9]{12}") +UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX) # Recursive dict to assemble basic url structure for the most common # API URL endpoints. Most others are constructed from these -# Don't use this object to get a url though -_URL_TEMPLATES_BASE = { +URL_TEMPLATES = { 'ai': lambda: 'app_instances', - 'ai_inst': lambda: (_URL_TEMPLATES_BASE['ai']() + '/{}'), - 'si': lambda: (_URL_TEMPLATES_BASE['ai_inst']() + '/storage_instances'), - 'si_inst': lambda: ((_URL_TEMPLATES_BASE['si']() + '/{}').format( - '{}', DEFAULT_STORAGE_NAME)), - 'vol': lambda: ((_URL_TEMPLATES_BASE['si_inst']() + '/volumes').format( - '{}', DEFAULT_STORAGE_NAME)), - 'vol_inst': lambda: ((_URL_TEMPLATES_BASE['vol']() + '/{}').format( - '{}', DEFAULT_VOLUME_NAME))} + 'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'), + 'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'), + 'si_inst': lambda storage_name: ( + (URL_TEMPLATES['si']() + '/{}').format( + '{}', storage_name)), + 'vol': lambda storage_name: ( + (URL_TEMPLATES['si_inst'](storage_name) + '/volumes')), + 'vol_inst': lambda storage_name, volume_name: ( + (URL_TEMPLATES['vol'](storage_name) + '/{}').format( + '{}', volume_name))} -# Use this one since I haven't found a way to inline call lambdas -URL_TEMPLATES = {k: v() for k, v in _URL_TEMPLATES_BASE.items()} + +def _get_name(name): + return "".join((OS_PREFIX, name)) + + +def _get_unmanaged(name): + return "".join((UNMANAGE_PREFIX, name)) def _authenticated(func): @@ -123,54 +146,30 @@ class DateraDriver(san.SanISCSIDriver): 1.1 - Look for lun-0 instead of lun-1. 2.0 - Update For Datera API v2 2.1 - Multipath, ACL and reorg + 2.2 - Capabilites List, Extended Volume-Type Support + Naming convention change, + Volume Manage/Unmanage support """ - VERSION = '2.1' + VERSION = '2.2' + + CI_WIKI_NAME = "datera-ci" def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) - self.num_replicas = self.configuration.datera_num_replicas self.username = self.configuration.san_login self.password = self.configuration.san_password - self.auth_token = None self.cluster_stats = {} self.datera_api_token = None - self.retry_attempts = (int(self.configuration.datera_503_timeout / - self.configuration.datera_503_interval)) self.interval = self.configuration.datera_503_interval - self.allow_all = self.configuration.datera_acl_allow_all + self.retry_attempts = (self.configuration.datera_503_timeout / + self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = self.configuration.datera_debug if self.datera_debug: utils.setup_tracing(['method']) - def _login(self): - """Use the san_login and san_password to set self.auth_token.""" - body = { - 'name': self.username, - 'password': self.password - } - - # Unset token now, otherwise potential expired token will be sent - # along to be used for authorization when trying to login. - self.auth_token = None - - try: - LOG.debug('Getting Datera auth token.') - results = self._issue_api_request('login', 'put', body=body, - sensitive=True) - self.datera_api_token = results['key'] - except exception.NotAuthorized: - with excutils.save_and_reraise_exception(): - LOG.error(_LE('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.')) - - def _get_lunid(self): - return 0 - def do_setup(self, context): # If we can't authenticate through the old and new method, just fail # now. @@ -184,64 +183,65 @@ class DateraDriver(san.SanISCSIDriver): self._login() @utils.retry(exception.VolumeDriverException, retries=3) - def _wait_for_resource(self, id, resource_type): + def _wait_for_resource(self, id, resource_type, policies): result = self._issue_api_request(resource_type, 'get', id) - if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][ - DEFAULT_VOLUME_NAME]['op_state'] == 'available': + if result['storage_instances'][ + policies['default_storage_name']]['volumes'][ + policies['default_volume_name']]['op_state'] == 'available': return else: raise exception.VolumeDriverException( message=_('Resource not ready.')) def _create_resource(self, resource, resource_type, body): - type_id = resource.get('volume_type_id', None) result = None try: result = self._issue_api_request(resource_type, 'post', body=body) except exception.Invalid: + type_id = resource.get('volume_type_id', None) if resource_type == 'volumes' and type_id: LOG.error(_LE("Creation request failed. Please verify the " "extra-specs set for your volume types are " "entered correctly.")) raise else: + policies = self._get_policies_for_resource(resource) # Handle updating QOS Policies - if resource_type == URL_TEMPLATES['ai']: - url = URL_TEMPLATES['vol_inst'] + '/performance_policy' - url = url.format(resource['id']) - if type_id is not None: - # Filter for just QOS policies in result. All of their keys - # should end with "max" - policies = {k: int(v) for k, v in - self._get_policies_by_volume_type( - type_id).items() if k.endswith("max")} - if policies: - self._issue_api_request(url, 'post', body=policies) - if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][ - DEFAULT_VOLUME_NAME]['op_state'] == 'available': + if resource_type == URL_TEMPLATES['ai'](): + self._update_qos(resource, policies) + if result['storage_instances'][policies['default_storage_name']][ + 'volumes'][policies['default_volume_name']][ + 'op_state'] == 'available': return - self._wait_for_resource(resource['id'], resource_type) + self._wait_for_resource(_get_name(resource['id']), + resource_type, + policies) def create_volume(self, volume): """Create a logical volume.""" # Generate App Instance, Storage Instance and Volume # Volume ID will be used as the App Instance Name # Storage Instance and Volumes will have standard names + policies = self._get_policies_for_resource(volume) + num_replicas = int(policies['replica_count']) + storage_name = policies['default_storage_name'] + volume_name = policies['default_volume_name'] + app_params = ( { 'create_mode': "openstack", 'uuid': str(volume['id']), - 'name': str(volume['id']), + 'name': _get_name(volume['id']), 'access_control_mode': 'deny_all', 'storage_instances': { - DEFAULT_STORAGE_NAME: { - 'name': DEFAULT_STORAGE_NAME, + storage_name: { + 'name': storage_name, 'volumes': { - DEFAULT_VOLUME_NAME: { - 'name': DEFAULT_VOLUME_NAME, + volume_name: { + 'name': volume_name, 'size': volume['size'], - 'replica_count': self.num_replicas, + 'replica_count': num_replicas, 'snapshot_policies': { } } @@ -249,23 +249,26 @@ class DateraDriver(san.SanISCSIDriver): } } }) - self._create_resource(volume, URL_TEMPLATES['ai'], body=app_params) + self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params) def extend_volume(self, volume, new_size): # Offline App Instance, if necessary reonline = False app_inst = self._issue_api_request( - URL_TEMPLATES['ai_inst'].format(volume['id'])) + URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))) if app_inst['admin_state'] == 'online': reonline = True self.detach_volume(None, volume, delete_initiator=False) # Change Volume Size - app_inst = volume['id'] + app_inst = _get_name(volume['id']) data = { 'size': new_size } + policies = self._get_policies_for_resource(volume) self._issue_api_request( - URL_TEMPLATES['vol_inst'].format(app_inst), + URL_TEMPLATES['vol_inst']( + policies['default_storage_name'], + policies['default_volume_name']).format(app_inst), method='put', body=data) # Online Volume, if it was online before @@ -273,29 +276,32 @@ class DateraDriver(san.SanISCSIDriver): self.create_export(None, volume, None) def create_cloned_volume(self, volume, src_vref): - src = "/" + URL_TEMPLATES['vol_inst'].format(src_vref['id']) + policies = self._get_policies_for_resource(volume) + src = "/" + URL_TEMPLATES['vol_inst']( + policies['default_storage_name'], + policies['default_volume_name']).format(_get_name(src_vref['id'])) data = { 'create_mode': 'openstack', - 'name': str(volume['id']), + 'name': _get_name(volume['id']), 'uuid': str(volume['id']), 'clone_src': src, - # 'access_control_mode': 'allow_all' } - self._issue_api_request(URL_TEMPLATES['ai'], 'post', body=data) + self._issue_api_request(URL_TEMPLATES['ai'](), 'post', body=data) if volume['size'] > src_vref['size']: self.extend_volume(volume, volume['size']) def delete_volume(self, volume): self.detach_volume(None, volume) - app_inst = volume['id'] + app_inst = _get_name(volume['id']) try: - self._issue_api_request(URL_TEMPLATES['ai_inst'].format(app_inst), - method='delete') + self._issue_api_request(URL_TEMPLATES['ai_inst']().format( + app_inst), + method='delete') except exception.NotFound: msg = _LI("Tried to delete volume %s, but it was not found in the " "Datera cluster. Continuing with delete.") - LOG.info(msg, volume['id']) + LOG.info(msg, _get_name(volume['id'])) def ensure_export(self, context, volume, connector): """Gets the associated account, retrieves CHAP info and updates.""" @@ -304,7 +310,7 @@ class DateraDriver(san.SanISCSIDriver): def initialize_connection(self, volume, connector): # Now online the app_instance (which will online all storage_instances) multipath = connector.get('multipath', False) - url = URL_TEMPLATES['ai_inst'].format(volume['id']) + url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id'])) data = { 'admin_state': 'online' } @@ -347,20 +353,23 @@ class DateraDriver(san.SanISCSIDriver): def create_export(self, context, volume, connector): # Online volume in case it hasn't been already - url = URL_TEMPLATES['ai_inst'].format(volume['id']) + url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id'])) data = { 'admin_state': 'online' } self._issue_api_request(url, method='put', body=data) # Check if we've already setup everything for this volume - url = (URL_TEMPLATES['si'].format(volume['id'])) + url = (URL_TEMPLATES['si']().format(_get_name(volume['id']))) storage_instances = self._issue_api_request(url) # Handle adding initiator to product if necessary # Then add initiator to ACL - if connector and connector.get('initiator') and not self.allow_all: + policies = self._get_policies_for_resource(volume) + if (connector and + connector.get('initiator') and + not policies['acl_allow_all']): initiator_name = "OpenStack_{}_{}".format( self.driver_prefix, str(uuid.uuid4())[:4]) - initiator_group = 'IG-' + volume['id'] + initiator_group = INITIATOR_GROUP_PREFIX + volume['id'] found = False initiator = connector['initiator'] current_initiators = self._issue_api_request('initiators') @@ -388,24 +397,31 @@ class DateraDriver(san.SanISCSIDriver): conflict_ok=True) # Create ACL with initiator group as reference for each # storage_instance in app_instance - # TODO(_alastor_) We need to avoid changing the ACLs if the + # TODO(_alastor_): We need to avoid changing the ACLs if the # template already specifies an ACL policy. for si_name in storage_instances.keys(): - acl_url = (URL_TEMPLATES['si'] + "/{}/acl_policy").format( - volume['id'], si_name) + acl_url = (URL_TEMPLATES['si']() + "/{}/acl_policy").format( + _get_name(volume['id']), si_name) data = {'initiator_groups': [initiator_group_path]} self._issue_api_request(acl_url, method="put", body=data) if connector and connector.get('ip'): - # Determine IP Pool from IP and update storage_instance try: - initiator_ip_pool_path = self._get_ip_pool_for_string_ip( - connector['ip']) + # Case where volume_type has non default IP Pool info + if policies['ip_pool'] != 'default': + initiator_ip_pool_path = self._issue_api_request( + "access_network_ip_pools/{}".format( + policies['ip_pool']))['path'] + # Fallback to trying reasonable IP based guess + else: + initiator_ip_pool_path = self._get_ip_pool_for_string_ip( + connector['ip']) - ip_pool_url = URL_TEMPLATES['si_inst'].format( - volume['id']) + ip_pool_url = URL_TEMPLATES['si_inst']( + policies['default_storage_name']).format( + _get_name(volume['id'])) ip_pool_data = {'ip_pool': initiator_ip_pool_path} self._issue_api_request(ip_pool_url, method="put", @@ -413,13 +429,12 @@ class DateraDriver(san.SanISCSIDriver): except exception.DateraAPIException: # Datera product 1.0 support pass - # Some versions of Datera software require more time to make the - # ISCSI lun available, but don't report that it's unavailable. We - # can remove this when we deprecate those versions - time.sleep(DATERA_SI_SLEEP) + + # Check to ensure we're ready for go-time + self._si_poll(volume, policies) def detach_volume(self, context, volume, attachment=None): - url = URL_TEMPLATES['ai_inst'].format(volume['id']) + url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id'])) data = { 'admin_state': 'offline', 'force': True @@ -430,7 +445,7 @@ class DateraDriver(san.SanISCSIDriver): msg = _LI("Tried to detach volume %s, but it was not found in the " "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) - # TODO(_alastor_) Make acl cleaning multi-attach aware + # TODO(_alastor_): Make acl cleaning multi-attach aware self._clean_acl(volume) def _check_for_acl(self, initiator_path): @@ -447,8 +462,10 @@ class DateraDriver(san.SanISCSIDriver): return False def _clean_acl(self, volume): - acl_url = (URL_TEMPLATES["si_inst"] + "/acl_policy").format( - volume['id']) + policies = self._get_policies_for_resource(volume) + acl_url = (URL_TEMPLATES["si_inst"]( + policies['default_storage_name']) + "/acl_policy").format( + _get_name(volume['id'])) try: initiator_group = self._issue_api_request(acl_url)[ 'initiator_groups'][0] @@ -468,8 +485,11 @@ class DateraDriver(san.SanISCSIDriver): volume) def create_snapshot(self, snapshot): - url_template = URL_TEMPLATES['vol_inst'] + '/snapshots' - url = url_template.format(snapshot['volume_id']) + policies = self._get_policies_for_resource(snapshot) + url_template = URL_TEMPLATES['vol_inst']( + policies['default_storage_name'], + policies['default_volume_name']) + '/snapshots' + url = url_template.format(_get_name(snapshot['volume_id'])) snap_params = { 'uuid': snapshot['id'], @@ -477,8 +497,11 @@ class DateraDriver(san.SanISCSIDriver): self._issue_api_request(url, method='post', body=snap_params) def delete_snapshot(self, snapshot): - snap_temp = URL_TEMPLATES['vol_inst'] + '/snapshots' - snapu = snap_temp.format(snapshot['volume_id']) + policies = self._get_policies_for_resource(snapshot) + snap_temp = URL_TEMPLATES['vol_inst']( + policies['default_storage_name'], + policies['default_volume_name']) + '/snapshots' + snapu = snap_temp.format(_get_name(snapshot['volume_id'])) snapshots = self._issue_api_request(snapu, method='get') try: @@ -493,11 +516,14 @@ class DateraDriver(san.SanISCSIDriver): except exception.NotFound: msg = _LI("Tried to delete snapshot %s, but was not found in " "Datera cluster. Continuing with delete.") - LOG.info(msg, snapshot['id']) + LOG.info(msg, _get_name(snapshot['id'])) def create_volume_from_snapshot(self, volume, snapshot): - snap_temp = URL_TEMPLATES['vol_inst'] + '/snapshots' - snapu = snap_temp.format(snapshot['volume_id']) + policies = self._get_policies_for_resource(snapshot) + snap_temp = URL_TEMPLATES['vol_inst']( + policies['default_storage_name'], + policies['default_volume_name']) + '/snapshots' + snapu = snap_temp.format(_get_name(snapshot['volume_id'])) snapshots = self._issue_api_request(snapu, method='get') for ts, snap in snapshots.items(): if snap['uuid'] == snapshot['id']: @@ -506,19 +532,183 @@ class DateraDriver(san.SanISCSIDriver): else: raise exception.NotFound - src = "/" + (snap_temp + '/{}').format(snapshot['volume_id'], found_ts) + src = "/" + (snap_temp + '/{}').format( + _get_name(snapshot['volume_id']), found_ts) app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), - 'name': str(volume['id']), + 'name': _get_name(volume['id']), 'clone_src': src, }) self._issue_api_request( - URL_TEMPLATES['ai'], + URL_TEMPLATES['ai'](), method='post', body=app_params) + def manage_existing(self, volume, existing_ref): + """Manage an existing volume on the Datera backend + + The existing_ref must be either the current name or Datera UUID of + an app_instance on the Datera backend in a colon separated list with + the storage instance name and volume name. This means only + single storage instances and single volumes are supported for + managing by cinder. + + Eg. + + existing_ref['source-name'] == app_inst_name:storage_inst_name:vol_name + + :param volume: Cinder volume to manage + :param existing_ref: Driver-specific information used to identify a + volume + """ + existing_ref = existing_ref['source-name'] + if existing_ref.count(":") != 2: + raise exception.ManageExistingInvalidReference( + _("existing_ref argument must be of this format:" + "app_inst_name:storage_inst_name:vol_name")) + app_inst_name = existing_ref.split(":")[0] + LOG.debug("Managing existing Datera volume %(volume)s. " + "Changing name to %(existing)s", + existing=existing_ref, volume=_get_name(volume['id'])) + data = {'name': _get_name(volume['id'])} + self._issue_api_request(URL_TEMPLATES['ai_inst']().format( + app_inst_name), method='put', body=data) + + def manage_existing_get_size(self, volume, existing_ref): + """Get the size of an unmanaged volume on the Datera backend + + The existing_ref must be either the current name or Datera UUID of + an app_instance on the Datera backend in a colon separated list with + the storage instance name and volume name. This means only + single storage instances and single volumes are supported for + managing by cinder. + + Eg. + + existing_ref == app_inst_name:storage_inst_name:vol_name + + :param volume: Cinder volume to manage + :param existing_ref: Driver-specific information used to identify a + volume on the Datera backend + """ + existing_ref = existing_ref['source-name'] + if existing_ref.count(":") != 2: + raise exception.ManageExistingInvalidReference( + _("existing_ref argument must be of this format:" + "app_inst_name:storage_inst_name:vol_name")) + app_inst_name, si_name, vol_name = existing_ref.split(":") + app_inst = self._issue_api_request( + URL_TEMPLATES['ai_inst']().format(app_inst_name)) + return self._get_size(volume, app_inst, si_name, vol_name) + + def _get_size(self, volume, app_inst=None, si_name=None, vol_name=None): + """Helper method for getting the size of a backend object + + If app_inst is provided, we'll just parse the dict to get + the size instead of making a separate http request + """ + policies = self._get_policies_for_resource(volume) + si_name = si_name if si_name else policies['default_storage_name'] + vol_name = vol_name if vol_name else policies['default_volume_name'] + if not app_inst: + vol_url = URL_TEMPLATES['ai_inst']().format( + _get_name(volume['id'])) + app_inst = self._issue_api_request(vol_url) + size = app_inst[ + 'storage_instances'][si_name]['volumes'][vol_name]['size'] + return size + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder. + + Returns a list of dictionaries, each specifying a volume in the host, + with the following keys: + - reference (dictionary): The reference for a volume, which can be + passed to "manage_existing". + - size (int): The size of the volume according to the storage + backend, rounded up to the nearest GB. + - safe_to_manage (boolean): Whether or not this volume is safe to + manage according to the storage backend. For example, is the volume + in use or invalid for any reason. + - reason_not_safe (string): If safe_to_manage is False, the reason why. + - cinder_id (string): If already managed, provide the Cinder ID. + - extra_info (string): Any extra information to return to the user + + :param cinder_volumes: A list of volumes in this host that Cinder + currently manages, used to determine if + a volume is manageable or not. + :param marker: The last item of the previous page; we return the + next results after this value (after sorting) + :param limit: Maximum number of items to return + :param offset: Number of items to skip after marker + :param sort_keys: List of keys to sort results by (valid keys are + 'identifier' and 'size') + :param sort_dirs: List of directions to sort by, corresponding to + sort_keys (valid directions are 'asc' and 'desc') + """ + LOG.debug("Listing manageable Datera volumes") + app_instances = self._issue_api_request(URL_TEMPLATES['ai']()).values() + + results = [] + + cinder_volume_ids = [vol['id'] for vol in cinder_volumes] + + for ai in app_instances: + ai_name = ai['name'] + reference = None + size = None + safe_to_manage = False + reason_not_safe = None + cinder_id = None + extra_info = None + if re.match(UUID4_RE, ai_name): + cinder_id = ai_name.lstrip(OS_PREFIX) + if (not cinder_id and + ai_name.lstrip(OS_PREFIX) not in cinder_volume_ids): + safe_to_manage = self._is_manageable(ai) + if safe_to_manage: + si = list(ai['storage_instances'].values())[0] + si_name = si['name'] + vol = list(si['volumes'].values())[0] + vol_name = vol['name'] + size = vol['size'] + reference = {"source-name": "{}:{}:{}".format( + ai_name, si_name, vol_name)} + + results.append({ + 'reference': reference, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info}) + + page_results = volutils.paginate_entries_list( + results, marker, limit, offset, sort_keys, sort_dirs) + + return page_results + + def _is_manageable(self, app_inst): + if len(app_inst['storage_instances']) == 1: + si = list(app_inst['storage_instances'].values())[0] + if len(si['volumes']) == 1: + return True + return False + + def unmanage(self, volume): + """Unmanage a currently managed volume in Cinder + + :param volume: Cinder volume to unmanage + """ + LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", + volume['id'], _get_unmanaged(volume['id'])) + data = {'name': _get_unmanaged(volume['id'])} + self._issue_api_request(URL_TEMPLATES['ai_inst']().format( + _get_name(volume['id'])), method='put', body=data) + def get_volume_stats(self, refresh=False): """Get volume stats. @@ -556,30 +746,258 @@ class DateraDriver(san.SanISCSIDriver): self.cluster_stats = stats - def _get_policies_by_volume_type(self, type_id): + def _login(self): + """Use the san_login and san_password to set token.""" + body = { + 'name': self.username, + 'password': self.password + } + + # Unset token now, otherwise potential expired token will be sent + # along to be used for authorization when trying to login. + + try: + LOG.debug('Getting Datera auth token.') + results = self._issue_api_request('login', 'put', body=body, + sensitive=True) + self.datera_api_token = results['key'] + except exception.NotAuthorized: + with excutils.save_and_reraise_exception(): + LOG.error(_LE('Logging into the Datera cluster failed. Please ' + 'check your username and password set in the ' + 'cinder.conf and start the cinder-volume ' + 'service again.')) + + def _get_lunid(self): + return 0 + + def _init_vendor_properties(self): + """Create a dictionary of vendor unique properties. + + This method creates a dictionary of vendor unique properties + and returns both created dictionary and vendor name. + Returned vendor name is used to check for name of vendor + unique properties. + + - Vendor name shouldn't include colon(:) because of the separator + and it is automatically replaced by underscore(_). + ex. abc:d -> abc_d + - Vendor prefix is equal to vendor name. + ex. abcd + - Vendor unique properties must start with vendor prefix + ':'. + ex. abcd:maxIOPS + + Each backend driver needs to override this method to expose + its own properties using _set_property() like this: + + self._set_property( + properties, + "vendorPrefix:specific_property", + "Title of property", + _("Description of property"), + "type") + + : return dictionary of vendor unique properties + : return vendor name + + prefix: DF --> Datera Fabric + """ + + properties = {} + + if self.configuration.get('datera_debug_replica_count_override'): + replica_count = 1 + else: + replica_count = 3 + self._set_property( + properties, + "DF:replica_count", + "Datera Volume Replica Count", + _("Specifies number of replicas for each volume. Can only be " + "increased once volume is created"), + "integer", + minimum=1, + default=replica_count) + + self._set_property( + properties, + "DF:acl_allow_all", + "Datera ACL Allow All", + _("True to set acl 'allow_all' on volumes created. Cannot be " + "changed on volume once set"), + "boolean", + default=False) + + self._set_property( + properties, + "DF:ip_pool", + "Datera IP Pool", + _("Specifies IP pool to use for volume"), + "string", + default="default") + + # ###### QoS Settings ###### # + self._set_property( + properties, + "DF:read_bandwidth_max", + "Datera QoS Max Bandwidth Read", + _("Max read bandwidth setting for volume qos, " + "use 0 for unlimited"), + "integer", + minimum=0, + default=0) + + self._set_property( + properties, + "DF:default_storage_name", + "Datera Default Storage Instance Name", + _("The name to use for storage instances created"), + "string", + default="storage-1") + + self._set_property( + properties, + "DF:default_volume_name", + "Datera Default Volume Name", + _("The name to use for volumes created"), + "string", + default="volume-1") + + self._set_property( + properties, + "DF:write_bandwidth_max", + "Datera QoS Max Bandwidth Write", + _("Max write bandwidth setting for volume qos, " + "use 0 for unlimited"), + "integer", + minimum=0, + default=0) + + self._set_property( + properties, + "DF:total_bandwidth_max", + "Datera QoS Max Bandwidth Total", + _("Max total bandwidth setting for volume qos, " + "use 0 for unlimited"), + "integer", + minimum=0, + default=0) + + self._set_property( + properties, + "DF:read_iops_max", + "Datera QoS Max iops Read", + _("Max read iops setting for volume qos, " + "use 0 for unlimited"), + "integer", + minimum=0, + default=0) + + self._set_property( + properties, + "DF:write_iops_max", + "Datera QoS Max IOPS Write", + _("Max write iops setting for volume qos, " + "use 0 for unlimited"), + "integer", + minimum=0, + default=0) + + self._set_property( + properties, + "DF:total_iops_max", + "Datera QoS Max IOPS Total", + _("Max total iops setting for volume qos, " + "use 0 for unlimited"), + "integer", + minimum=0, + default=0) + # ###### End QoS Settings ###### # + + return properties, 'DF' + + def _get_policies_for_resource(self, resource): """Get extra_specs and qos_specs of a volume_type. This fetches the scoped keys from the volume type. Anything set from qos_specs will override key/values set from extra_specs. """ - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - specs = volume_type.get('extra_specs') + type_id = resource.get('volume_type_id', None) + # Handle case of volume with no type. We still want the + # specified defaults from above + if type_id: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, type_id) + specs = volume_type.get('extra_specs') + else: + volume_type = None + specs = {} - policies = {} - for key, value in specs.items(): - if ':' in key: - fields = key.split(':') - key = fields[1] - policies[key] = value + # Set defaults: + policies = {k.lstrip('DF:'): str(v['default']) for (k, v) + in self._init_vendor_properties()[0].items()} - qos_specs_id = volume_type.get('qos_specs_id') - if qos_specs_id is not None: - qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - if qos_kvs: - policies.update(qos_kvs) + if volume_type: + # Populate updated value + for key, value in specs.items(): + if ':' in key: + fields = key.split(':') + key = fields[1] + policies[key] = value + + qos_specs_id = volume_type.get('qos_specs_id') + if qos_specs_id is not None: + qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] + if qos_kvs: + policies.update(qos_kvs) + # Cast everything except booleans int that can be cast + for k, v in policies.items(): + # Handle String Boolean case + if v == 'True' or v == 'False': + policies[k] = policies[k] == 'True' + continue + # Int cast + try: + policies[k] = int(v) + except ValueError: + pass return policies + def _si_poll(self, volume, policies): + # Initial 4 second sleep required for some Datera versions + eventlet.sleep(DEFAULT_SI_SLEEP) + TIMEOUT = 10 + retry = 0 + check_url = URL_TEMPLATES['si_inst']( + policies['default_storage_name']).format(_get_name(volume['id'])) + poll = True + while poll and not retry >= TIMEOUT: + retry += 1 + si = self._issue_api_request(check_url) + if si['op_state'] == 'available': + poll = False + else: + eventlet.sleep(1) + if retry >= TIMEOUT: + raise exception.VolumeDriverException( + message=_('Resource not ready.')) + + def _update_qos(self, resource, policies): + url = URL_TEMPLATES['vol_inst']( + policies['default_storage_name'], + policies['default_volume_name']) + '/performance_policy' + url = url.format(_get_name(resource['id'])) + type_id = resource.get('volume_type_id', None) + if type_id is not None: + # Filter for just QOS policies in result. All of their keys + # should end with "max" + fpolicies = {k: int(v) for k, v in + policies.items() if k.endswith("max")} + # Filter all 0 values from being passed + fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) + if fpolicies: + self._issue_api_request(url, 'post', body=fpolicies) + def _get_ip_pool_for_string_ip(self, ip): """Takes a string ipaddress and return the ip_pool API object dict """ pool = 'default' @@ -646,7 +1064,7 @@ class DateraDriver(san.SanISCSIDriver): current_retry = 0 while current_retry <= self.retry_attempts: LOG.debug("Datera 503 response, trying request again") - time.sleep(self.interval) + eventlet.sleep(self.interval) resp = self._request(connection_string, method, payload, @@ -660,16 +1078,13 @@ class DateraDriver(san.SanISCSIDriver): self._raise_response(response) @_authenticated - def _issue_api_request(self, resource_type, method='get', resource=None, - body=None, action=None, sensitive=False, - conflict_ok=False): + def _issue_api_request(self, resource_url, method='get', body=None, + sensitive=False, conflict_ok=False): """All API requests to Datera cluster go through this method. - :param resource_type: the type of the resource + :param resource_url: the url of the resource :param method: the request verb - :param resource: the identifier of the resource :param body: a dict with options for the action_type - :param action: the action to perform :returns: a dict of the response from the Datera cluster """ host = self.configuration.san_ip @@ -680,7 +1095,8 @@ class DateraDriver(san.SanISCSIDriver): payload = json.dumps(body, ensure_ascii=False) payload.encode('utf-8') - header = {'Content-Type': 'application/json; charset=utf-8'} + header = {'Content-Type': 'application/json; charset=utf-8', + 'Datera-Driver': 'OpenStack-Cinder-{}'.format(self.VERSION)} protocol = 'http' if self.configuration.driver_use_ssl: @@ -698,12 +1114,7 @@ class DateraDriver(san.SanISCSIDriver): cert_data = (client_cert, client_cert_key) connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port, - api_version, resource_type) - - if resource is not None: - connection_string += '/%s' % resource - if action is not None: - connection_string += '/%s' % action + api_version, resource_url) response = self._request(connection_string, method, diff --git a/cinder/volume/drivers/dell/dell_storagecenter_api.py b/cinder/volume/drivers/dell/dell_storagecenter_api.py index 18309c372..76a102c66 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_api.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_api.py @@ -18,6 +18,7 @@ import os.path import eventlet from oslo_log import log as logging +from oslo_utils import excutils import requests from simplejson import scanner import six @@ -119,7 +120,7 @@ class HttpClient(object): def _get_header(self, async): if async: header = self.header.copy() - header['async'] = True + header['async'] = 'True' return header return self.header @@ -157,7 +158,11 @@ class HttpClient(object): # Object returned switches to one without objectType or with # a different objectType. if not StorageCenterApi._check_result(r): - LOG.debug('Async error: status_code: %s', r.status_code) + LOG.debug('Async error:\n' + '\tstatus_code: %(code)s\n' + '\ttext: %(text)s\n', + {'code': r.status_code, + 'text': r.text}) else: # In theory we have a good run. if r.content: @@ -199,11 +204,12 @@ class HttpClient(object): @utils.retry(exceptions=(requests.ConnectionError, exception.DellDriverRetryableException)) - def get(self, url, async=False): + def get(self, url): LOG.debug('get: %(url)s', {'url': url}) - rest_response = self._rest_ret(self.session.get( - self.__formatUrl(url), headers=self._get_header(async), - verify=self.verify), async) + rest_response = self.session.get(self.__formatUrl(url), + headers=self.header, + verify=self.verify) + if rest_response and rest_response.status_code == 400 and ( 'Unhandled Exception' in rest_response.text): raise exception.DellDriverRetryableException() @@ -265,8 +271,71 @@ class StorageCenterApiHelper(object): self.active_backend_id = active_backend_id self.primaryssn = self.config.dell_sc_ssn self.storage_protocol = storage_protocol + self.san_ip = self.config.san_ip + self.san_login = self.config.san_login + self.san_password = self.config.san_password + self.san_port = self.config.dell_sc_api_port self.apiversion = '2.0' + def _swap_credentials(self): + """Change out to our secondary credentials + + Or back to our primary creds. + :return: True if swapped. False if no alt credentials supplied. + """ + if self.san_ip == self.config.san_ip: + # Do we have a secondary IP and credentials? + if (self.config.secondary_san_ip and + self.config.secondary_san_login and + self.config.secondary_san_password): + self.san_ip = self.config.secondary_san_ip + self.san_login = self.config.secondary_san_login + self.san_password = self.config.secondary_san_password + else: + # Cannot swap. + return False + # Odds on this hasn't changed so no need to make setting this a + # requirement. + if self.config.secondary_sc_api_port: + self.san_port = self.config.secondary_sc_api_port + else: + # These have to be set. + self.san_ip = self.config.san_ip + self.san_login = self.config.san_login + self.san_password = self.config.san_password + self.san_port = self.config.dell_sc_api_port + return True + + def _setup_connection(self): + """Attempts to open a connection to the storage center.""" + connection = StorageCenterApi(self.san_ip, + self.san_port, + self.san_login, + self.san_password, + self.config.dell_sc_verify_cert, + self.apiversion) + # This instance is for a single backend. That backend has a + # few items of information we should save rather than passing them + # about. + connection.vfname = self.config.dell_sc_volume_folder + connection.sfname = self.config.dell_sc_server_folder + connection.excluded_domain_ips = self.config.excluded_domain_ip + if not connection.excluded_domain_ips: + connection.excluded_domain_ips = [] + # Our primary SSN doesn't change + connection.primaryssn = self.primaryssn + if self.storage_protocol == 'FC': + connection.protocol = 'FibreChannel' + # Set appropriate ssn and failover state. + if self.active_backend_id: + # active_backend_id is a string. Convert to int. + connection.ssn = int(self.active_backend_id) + else: + connection.ssn = self.primaryssn + # Make the actual connection to the DSM. + connection.open_connection() + return connection + def open_connection(self): """Creates the StorageCenterApi object. @@ -278,30 +347,17 @@ class StorageCenterApiHelper(object): {'ssn': self.primaryssn, 'ip': self.config.san_ip}) if self.primaryssn: - """Open connection to REST API.""" - connection = StorageCenterApi(self.config.san_ip, - self.config.dell_sc_api_port, - self.config.san_login, - self.config.san_password, - self.config.dell_sc_verify_cert, - self.apiversion) - # This instance is for a single backend. That backend has a - # few items of information we should save rather than passing them - # about. - connection.vfname = self.config.dell_sc_volume_folder - connection.sfname = self.config.dell_sc_server_folder - # Our primary SSN doesn't change - connection.primaryssn = self.primaryssn - if self.storage_protocol == 'FC': - connection.protocol = 'FibreChannel' - # Set appropriate ssn and failover state. - if self.active_backend_id: - # active_backend_id is a string. Convert to int. - connection.ssn = int(self.active_backend_id) - else: - connection.ssn = self.primaryssn - # Open connection. - connection.open_connection() + try: + """Open connection to REST API.""" + connection = self._setup_connection() + except Exception: + # If we have credentials to swap to we try it here. + if self._swap_credentials(): + connection = self._setup_connection() + else: + with excutils.save_and_reraise_exception(): + LOG.error(_LE('Failed to connect to the API. ' + 'No backup DSM provided.')) # Save our api version for next time. if self.apiversion != connection.apiversion: LOG.info(_LI('open_connection: Updating API version to %s'), @@ -333,10 +389,12 @@ class StorageCenterApi(object): 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. 3.0.0 - ProviderID utilized. - 3.1.0 - Failback Supported. + 3.1.0 - Failback supported. + 3.2.0 - Live Volume support. + 3.3.0 - Support for a secondary DSM. """ - APIDRIVERVERSION = '3.1.0' + APIDRIVERVERSION = '3.3.0' def __init__(self, host, port, user, password, verify, apiversion): """This creates a connection to Dell SC or EM. @@ -358,6 +416,7 @@ class StorageCenterApi(object): self.failed_over = False self.vfname = 'openstack' self.sfname = 'openstack' + self.excluded_domain_ips = [] self.legacypayloadfilters = False self.consisgroups = True self.protocol = 'Iscsi' @@ -381,7 +440,7 @@ class StorageCenterApi(object): :param rest_response: The result from a REST API call. :returns: ``True`` if success, ``False`` otherwise. """ - if rest_response: + if rest_response is not None: if 200 <= rest_response.status_code < 300: # API call was a normal success return True @@ -615,8 +674,7 @@ class StorageCenterApi(object): """ # We might be looking for another ssn. If not then # look for our default. - if ssn == -1: - ssn = self.ssn + ssn = self._vet_ssn(ssn) r = self.client.get('StorageCenter/StorageCenter') result = self._get_result(r, 'scSerialNumber', ssn) @@ -631,7 +689,7 @@ class StorageCenterApi(object): # Folder functions - def _create_folder(self, url, parent, folder): + def _create_folder(self, url, parent, folder, ssn=-1): """Creates folder under parent. This can create both to server and volume folders. The REST url @@ -644,10 +702,12 @@ class StorageCenterApi(object): :param folder: The folder name to be created. This is one level deep. :returns: The REST folder object. """ + ssn = self._vet_ssn(ssn) + scfolder = None payload = {} payload['Name'] = folder - payload['StorageCenter'] = self.ssn + payload['StorageCenter'] = ssn if parent != '': payload['Parent'] = parent payload['Notes'] = self.notes @@ -657,7 +717,7 @@ class StorageCenterApi(object): scfolder = self._first_result(r) return scfolder - def _create_folder_path(self, url, foldername): + def _create_folder_path(self, url, foldername, ssn=-1): """Creates a folder path from a fully qualified name. The REST url sent in defines the folder type being created on the Dell @@ -669,6 +729,8 @@ class StorageCenterApi(object): :param foldername: The full folder name with path. :returns: The REST folder object. """ + ssn = self._vet_ssn(ssn) + path = self._path_to_array(foldername) folderpath = '' instanceId = '' @@ -680,12 +742,12 @@ class StorageCenterApi(object): # If the last was found see if this part of the path exists too if found: listurl = url + '/GetList' - scfolder = self._find_folder(listurl, folderpath) + scfolder = self._find_folder(listurl, folderpath, ssn) if scfolder is None: found = False # We didn't find it so create it if found is False: - scfolder = self._create_folder(url, instanceId, folder) + scfolder = self._create_folder(url, instanceId, folder, ssn) # If we haven't found a folder or created it then leave if scfolder is None: LOG.error(_LE('Unable to create folder path %s'), folderpath) @@ -695,7 +757,7 @@ class StorageCenterApi(object): folderpath = folderpath + '/' return scfolder - def _find_folder(self, url, foldername): + def _find_folder(self, url, foldername, ssn=-1): """Find a folder on the SC using the specified url. Most of the time the folder will already have been created so @@ -712,8 +774,10 @@ class StorageCenterApi(object): :param foldername: Full path to the folder we are looking for. :returns: Dell folder object. """ + ssn = self._vet_ssn(ssn) + pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) + pf.append('scSerialNumber', ssn) basename = os.path.basename(foldername) pf.append('Name', basename) # If we have any kind of path we throw it into the filters. @@ -728,7 +792,7 @@ class StorageCenterApi(object): folder = self._get_result(r, 'folderPath', folderpath) return folder - def _find_volume_folder(self, create=False): + def _find_volume_folder(self, create=False, ssn=-1): """Looks for the volume folder where backend volumes will be created. Volume folder is specified in the cindef.conf. See __init. @@ -737,11 +801,11 @@ class StorageCenterApi(object): :returns: Folder object. """ folder = self._find_folder('StorageCenter/ScVolumeFolder/GetList', - self.vfname) + self.vfname, ssn) # Doesn't exist? make it if folder is None and create is True: folder = self._create_folder_path('StorageCenter/ScVolumeFolder', - self.vfname) + self.vfname, ssn) return folder def _init_volume(self, scvolume): @@ -1002,8 +1066,7 @@ class StorageCenterApi(object): :param ssn: SSN to search on. :return: Returns the scvolume list or None. """ - if ssn == -1: - ssn = self.ssn + ssn = self._vet_ssn(ssn) result = None # We need a name or a device ID to find a volume. if name or deviceid: @@ -1025,7 +1088,28 @@ class StorageCenterApi(object): # succeeded. It might be an empty list. return result - def find_volume(self, name, provider_id): + def _autofailback(self, lv): + # if we have a working replication state. + ret = False + if (lv['ReplicationState'] == 'Up' and + lv['failoverState'] == 'AutoFailedOver'): + ret = self.swap_roles_live_volume(lv) + return ret + + def _find_volume_primary(self, provider_id): + # if there is no live volume then we return our provider_id. + primary_id = provider_id + lv, swapped = self.get_live_volume(provider_id) + # if we swapped see if we can autofailback. Unless the admin + # failed us over, that is. + if swapped and not self.failed_over: + if self._autofailback(lv): + ls, swapped = self.get_live_volume(provider_id) + if lv: + primary_id = lv['primaryVolume']['instanceId'] + return primary_id + + def find_volume(self, name, provider_id, islivevol=False): """Find the volume by name or instanceId. We check if we can use provider_id before using it. If so then @@ -1036,12 +1120,17 @@ class StorageCenterApi(object): :param name: Volume name. :param provider_id: instanceId of the volume if known. + :param islivevol: Is this a live volume. :return: sc volume object or None. :raises VolumeBackendAPIException: if unable to import. """ scvolume = None - # If we have a provided_id just go get it. - if self._use_provider_id(provider_id): + if islivevol: + # Just get the primary from the sc live vol. + primary_id = self._find_volume_primary(provider_id) + scvolume = self.get_volume(primary_id) + elif self._use_provider_id(provider_id): + # just get our volume scvolume = self.get_volume(provider_id) # if we are failed over we need to check if we # need to import the failed over volume. @@ -1049,7 +1138,8 @@ class StorageCenterApi(object): if scvolume['name'] == self._repl_name(name): scvolume = self._import_one(scvolume, name) if not scvolume: - msg = _('Unable to complete failover of %s.') % name + msg = (_('Unable to complete failover of %s.') + % name) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI('Imported %(fail)s to %(guid)s.'), {'fail': self._repl_name(name), @@ -1145,7 +1235,7 @@ class StorageCenterApi(object): 'provider_id: %s'), provider_id) return True - def _find_server_folder(self, create=False): + def _find_server_folder(self, create=False, ssn=-1): """Looks for the server folder on the Dell Storage Center. This is the folder where a server objects for mapping volumes will be @@ -1154,11 +1244,13 @@ class StorageCenterApi(object): :param create: If True will create the folder if not found. :return: Folder object. """ + ssn = self._vet_ssn(ssn) + folder = self._find_folder('StorageCenter/ScServerFolder/GetList', - self.sfname) + self.sfname, ssn) if folder is None and create is True: folder = self._create_folder_path('StorageCenter/ScServerFolder', - self.sfname) + self.sfname, ssn) return folder def _add_hba(self, scserver, wwnoriscsiname): @@ -1186,7 +1278,7 @@ class StorageCenterApi(object): return False return True - def _find_serveros(self, osname='Red Hat Linux 6.x'): + def _find_serveros(self, osname='Red Hat Linux 6.x', ssn=-1): """Returns the serveros instance id of the specified osname. Required to create a Dell server object. @@ -1197,8 +1289,9 @@ class StorageCenterApi(object): :param osname: The name of the OS to look for. :returns: InstanceId of the ScServerOperatingSystem object. """ + ssn = self._vet_ssn(ssn) pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) + pf.append('scSerialNumber', ssn) r = self.client.post('StorageCenter/ScServerOperatingSystem/GetList', pf.payload) if self._check_result(r): @@ -1213,55 +1306,47 @@ class StorageCenterApi(object): return None - def create_server_multiple_hbas(self, wwns): + def create_server(self, wwnlist, ssn=-1): """Creates a server with multiple WWNS associated with it. Same as create_server except it can take a list of HBAs. - :param wwns: A list of FC WWNs or iSCSI IQNs associated with this - server. + :param wwnlist: A list of FC WWNs or iSCSI IQNs associated with this + server. :returns: Dell server object. """ - scserver = None - # Our instance names - for wwn in wwns: - if scserver is None: - # Use the fist wwn to create the server. - scserver = self.create_server(wwn) - else: - # Add the wwn to our server - self._add_hba(scserver, wwn) + # Find our folder or make it + folder = self._find_server_folder(True, ssn) + # Create our server. + scserver = self._create_server('Server_' + wwnlist[0], folder, ssn) + if not scserver: + return None + # Add our HBAs. + if scserver: + for wwn in wwnlist: + if not self._add_hba(scserver, wwn): + # We failed so log it. Delete our server and return None. + LOG.error(_LE('Error adding HBA %s to server'), wwn) + self._delete_server(scserver) + return None return scserver - def create_server(self, wwnoriscsiname): - """Creates a Dell server object on the the Storage Center. + def _create_server(self, servername, folder, ssn): + ssn = self._vet_ssn(ssn) - Adds the first HBA identified by wwnoriscsiname to it. - - :param wwnoriscsiname: FC WWN or iSCSI IQN associated with - this Dell server object. - :returns: Dell server object. - """ - - LOG.info(_LI('Creating server %s'), wwnoriscsiname) - - scserver = None + LOG.info(_LI('Creating server %s'), servername) payload = {} - payload['Name'] = 'Server_' + wwnoriscsiname - payload['StorageCenter'] = self.ssn + payload['Name'] = servername + payload['StorageCenter'] = ssn payload['Notes'] = self.notes # We pick Red Hat Linux 6.x because it supports multipath and # will attach luns to paths as they are found. - scserveros = self._find_serveros('Red Hat Linux 6.x') + scserveros = self._find_serveros('Red Hat Linux 6.x', ssn) if scserveros is not None: payload['OperatingSystem'] = scserveros - # Find our folder or make it - folder = self._find_server_folder(True) - - # At this point it doesn't matter if the folder was created or not. - # We just attempt to create the server. Let it be in the root if - # the folder creation fails. + # At this point it doesn't matter if we have a folder or not. + # Let it be in the root if the folder creation fails. if folder is not None: payload['ServerFolder'] = self._get_id(folder) @@ -1271,19 +1356,24 @@ class StorageCenterApi(object): # Server was created scserver = self._first_result(r) LOG.info(_LI('SC server created %s'), scserver) + return scserver + LOG.error(_LE('Unable to create SC server %s'), servername) + return None - # Add hba to our server - if scserver is not None: - if not self._add_hba(scserver, wwnoriscsiname): - LOG.error(_LE('Error adding HBA to server')) - # Can't have a server without an HBA - self._delete_server(scserver) - scserver = None + def _vet_ssn(self, ssn): + """Returns the default if a ssn was not set. - # Success or failure is determined by the caller - return scserver + Added to support live volume as we aren't always on the primary ssn + anymore - def find_server(self, instance_name): + :param ssn: ssn to check. + :return: Current ssn or the ssn sent down. + """ + if ssn == -1: + return self.ssn + return ssn + + def find_server(self, instance_name, ssn=-1): """Hunts for a server on the Dell backend by instance_name. The instance_name is the same as the server's HBA. This is the IQN or @@ -1293,17 +1383,20 @@ class StorageCenterApi(object): :param instance_name: instance_name is a FC WWN or iSCSI IQN from the connector. In cinder a server is identified by its HBA. + :param ssn: Storage center to search. :returns: Dell server object or None. """ + ssn = self._vet_ssn(ssn) + scserver = None # We search for our server by first finding our HBA - hba = self._find_serverhba(instance_name) + hba = self._find_serverhba(instance_name, ssn) # Once created hbas stay in the system. So it isn't enough # that we found one it actually has to be attached to a # server. if hba is not None and hba.get('server') is not None: pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) + pf.append('scSerialNumber', ssn) pf.append('instanceId', self._get_id(hba['server'])) r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) if self._check_result(r): @@ -1313,7 +1406,7 @@ class StorageCenterApi(object): LOG.debug('Server (%s) not found.', instance_name) return scserver - def _find_serverhba(self, instance_name): + def _find_serverhba(self, instance_name, ssn): """Hunts for a server HBA on the Dell backend by instance_name. Instance_name is the same as the IQN or WWN specified in the @@ -1321,12 +1414,13 @@ class StorageCenterApi(object): :param instance_name: Instance_name is a FC WWN or iSCSI IQN from the connector. + :param ssn: Storage center to search. :returns: Dell server HBA object. """ scserverhba = None # We search for our server by first finding our HBA pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) + pf.append('scSerialNumber', ssn) pf.append('instanceName', instance_name) r = self.client.post('StorageCenter/ScServerHba/GetList', pf.payload) if self._check_result(r): @@ -1400,6 +1494,7 @@ class StorageCenterApi(object): LOG.info(_LI('Volume mappings for %(name)s: %(mappings)s'), {'name': scvolume.get('name'), 'mappings': mappings}) + return mappings def _find_mapping_profiles(self, scvolume): @@ -1550,23 +1645,19 @@ class StorageCenterApi(object): 'Error finding configuration: %s'), cportid) return controllerport - def find_iscsi_properties(self, scvolume, ip=None, port=None): + def find_iscsi_properties(self, scvolume): """Finds target information for a given Dell scvolume object mapping. The data coming back is both the preferred path and all the paths. :param scvolume: The dell sc volume object. - :param ip: The preferred target portal ip. - :param port: The preferred target portal port. :returns: iSCSI property dictionary. :raises: VolumeBackendAPIException """ LOG.debug('find_iscsi_properties: scvolume: %s', scvolume) # Our mutable process object. pdata = {'active': -1, - 'up': -1, - 'ip': ip, - 'port': port} + 'up': -1} # Our output lists. portals = [] luns = [] @@ -1587,16 +1678,10 @@ class StorageCenterApi(object): controller or not. :return: Nothing """ - portals.append(address + ':' + six.text_type(port)) - iqns.append(iqn) - luns.append(lun) - - # We've all the information. We need to find - # the best single portal to return. So check - # this one if it is on the right IP, port and - # if the access and status are correct. - if ((pdata['ip'] is None or pdata['ip'] == address) and - (pdata['port'] is None or pdata['port'] == port)): + if self.excluded_domain_ips.count(address) == 0: + portals.append(address + ':' + six.text_type(port)) + iqns.append(iqn) + luns.append(lun) # We need to point to the best link. # So state active and status up is preferred @@ -1623,6 +1708,11 @@ class StorageCenterApi(object): isvpmode = self._is_virtualport_mode() # Trundle through our mappings. for mapping in mappings: + # Don't return remote sc links. + msrv = mapping.get('server') + if msrv and msrv.get('objectType') == 'ScRemoteStorageCenter': + continue + # The lun, ro mode and status are in the mapping. LOG.debug('find_iscsi_properties: mapping: %s', mapping) lun = mapping.get('lun') @@ -2556,8 +2646,7 @@ class StorageCenterApi(object): :param ssn: SSN to search on. :return: scqos node object. """ - if ssn == -1: - ssn = self.ssn + ssn = self._vet_ssn(ssn) pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) pf.append('name', qosnode) @@ -2779,6 +2868,9 @@ class StorageCenterApi(object): return self._check_result(r) return False + def find_replication_dest(self, instance_id, destssn): + pass + def break_replication(self, volumename, instance_id, destssn): """This just breaks the replication. @@ -2793,10 +2885,12 @@ class StorageCenterApi(object): """ replinstanceid = None scvolume = self.find_volume(volumename, instance_id) - screplication = self.get_screplication(scvolume, destssn) - # if we got our replication volume we can do this nicely. - if screplication: - replinstanceid = screplication['destinationVolume']['instanceId'] + if scvolume: + screplication = self.get_screplication(scvolume, destssn) + # if we got our replication volume we can do this nicely. + if screplication: + replinstanceid = ( + screplication['destinationVolume']['instanceId']) screplvol = self.find_repl_volume(self._repl_name(volumename), destssn, replinstanceid) # delete_replication fails to delete replication without also @@ -2953,3 +3047,138 @@ class StorageCenterApi(object): ' progress information returned: %s'), progress) return None, None + + def get_live_volume(self, primaryid): + """Get's the live ScLiveVolume object for the vol with primaryid. + + :param primaryid: InstanceId of the primary volume. + :return: ScLiveVolume object or None, swapped True/False. + """ + if primaryid: + r = self.client.get('StorageCenter/ScLiveVolume') + if self._check_result(r): + lvs = self._get_json(r) + for lv in lvs: + if (lv.get('primaryVolume') and + lv['primaryVolume']['instanceId'] == primaryid): + return lv, False + if (lv.get('secondaryVolume') and + lv['secondaryVolume']['instanceId'] == primaryid): + return lv, True + return None, False + + def _get_hbas(self, serverid): + # Helper to get the hba's of a given server. + r = self.client.get('StorageCenter/ScServer/%s/HbaList' % serverid) + if self._check_result(r): + return self._get_json(r) + return None + + def map_secondary_volume(self, sclivevol, scdestsrv): + """Map's the secondary volume or a LiveVolume to destsrv. + + :param sclivevol: ScLiveVolume object. + :param scdestsrv: ScServer object for the destination. + :return: ScMappingProfile object or None on failure. + """ + payload = {} + payload['Server'] = self._get_id(scdestsrv) + payload['Advanced'] = {'MapToDownServerHbas': True} + r = self.client.post('StorageCenter/ScLiveVolume/%s/MapSecondaryVolume' + % self._get_id(sclivevol), payload, True) + if self._check_result(r): + return self._get_json(r) + return None + + def create_live_volume(self, scvolume, remotessn, active=False, sync=False, + autofailover=False, primaryqos='CinderQOS', + secondaryqos='CinderQOS'): + """This create's a live volume instead of a replication. + + Servers are not created at this point so we cannot map up a remote + server immediately. + + :param scvolume: Source SC Volume + :param remotessn: Destination SSN. + :param active: Replicate the active replay boolean. + :param sync: Sync replication boolean. + :param autofailover: enable autofailover and failback boolean. + :param primaryqos: QOS node name for the primary side. + :param secondaryqos: QOS node name for the remote side. + :return: ScLiveVolume object or None on failure. + """ + destssn = self.find_sc(int(remotessn)) + pscqos = self._find_qos(primaryqos) + sscqos = self._find_qos(secondaryqos, destssn) + if not destssn: + LOG.error(_LE('create_live_volume: Unable to find remote %s'), + remotessn) + elif not pscqos: + LOG.error(_LE('create_live_volume: Unable to find or create ' + 'qos node %s'), primaryqos) + elif not sscqos: + LOG.error(_LE('create_live_volume: Unable to find or create remote' + ' qos node %(qos)s on %(ssn)s'), + {'qos': secondaryqos, 'ssn': destssn}) + else: + payload = {} + payload['PrimaryVolume'] = self._get_id(scvolume) + payload['PrimaryQosNode'] = self._get_id(pscqos) + payload['SecondaryQosNode'] = self._get_id(sscqos) + payload['SecondaryStorageCenter'] = destssn + payload['StorageCenter'] = self.ssn + # payload['Dedup'] = False + payload['FailoverAutomaticallyEnabled'] = autofailover + payload['RestoreAutomaticallyEnabled'] = autofailover + payload['SwapRolesAutomaticallyEnabled'] = False + payload['ReplicateActiveReplay'] = (active or autofailover) + if sync or autofailover: + payload['Type'] = 'Synchronous' + payload['SyncMode'] = 'HighAvailability' + else: + payload['Type'] = 'Asynchronous' + secondaryvolumeattributes = {} + secondaryvolumeattributes['CreateSourceVolumeFolderPath'] = True + secondaryvolumeattributes['Notes'] = self.notes + secondaryvolumeattributes['Name'] = scvolume['name'] + payload[ + 'SecondaryVolumeAttributes'] = secondaryvolumeattributes + + r = self.client.post('StorageCenter/ScLiveVolume', payload, True) + if self._check_result(r): + LOG.info(_LI('create_live_volume: Live Volume created from' + '%(svol)s to %(ssn)s'), + {'svol': self._get_id(scvolume), 'ssn': remotessn}) + return self._get_json(r) + LOG.error(_LE('create_live_volume: Failed to create Live Volume from' + '%(svol)s to %(ssn)s'), + {'svol': self._get_id(scvolume), 'ssn': remotessn}) + return None + + def delete_live_volume(self, sclivevolume, deletesecondaryvolume): + """Deletes the live volume. + + :param sclivevolume: ScLiveVolume object to be whacked. + :return: Boolean on success/fail. + """ + payload = {} + payload['ConvertToReplication'] = False + payload['DeleteSecondaryVolume'] = deletesecondaryvolume + payload['RecycleSecondaryVolume'] = deletesecondaryvolume + r = self.client.delete('StorageCenter/ScLiveVolume/%s' % + self._get_id(sclivevolume), payload, True) + if self._check_result(r): + return True + return False + + def swap_roles_live_volume(self, sclivevolume): + """Swap live volume roles. + + :param sclivevolume: Dell SC live volume object. + :return: True/False on success/failure. + """ + r = self.client.post('StorageCenter/ScLiveVolume/%s/SwapRoles' % + self._get_id(sclivevolume), {}, True) + if self._check_result(r): + return True + return False diff --git a/cinder/volume/drivers/dell/dell_storagecenter_common.py b/cinder/volume/drivers/dell/dell_storagecenter_common.py index a1dac3fe5..b333614b3 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_common.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_common.py @@ -14,6 +14,7 @@ import eventlet from oslo_config import cfg +from oslo_config import types from oslo_log import log as logging from oslo_utils import excutils import six @@ -42,7 +43,24 @@ common_opts = [ help='Name of the volume folder to use on the Storage Center'), cfg.BoolOpt('dell_sc_verify_cert', default=False, - help='Enable HTTPS SC certificate verification.') + help='Enable HTTPS SC certificate verification'), + cfg.StrOpt('secondary_san_ip', + default='', + help='IP address of secondary DSM controller'), + cfg.StrOpt('secondary_san_login', + default='Admin', + help='Secondary DSM user name'), + cfg.StrOpt('secondary_san_password', + default='', + help='Secondary DSM user password name', + secret=True), + cfg.PortOpt('secondary_sc_api_port', + default=3033, + help='Secondary Dell API port'), + cfg.MultiOpt('excluded_domain_ip', + item_type=types.IPAddress(), + default=None, + help='Domain IP to be excluded from iSCSI returns.') ] LOG = logging.getLogger(__name__) @@ -122,9 +140,9 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, 'replication_device %s not found') % replssn raise exception.InvalidHost(reason=msg) - def _get_volume_extra_specs(self, volume): - """Gets extra specs for the given volume.""" - type_id = volume.get('volume_type_id') + def _get_volume_extra_specs(self, obj): + """Gets extra specs for the given object.""" + type_id = obj.get('volume_type_id') if type_id: return volume_types.get_volume_type_extra_specs(type_id) @@ -144,24 +162,54 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, if profile: api.update_cg_volumes(profile, [volume]) - def _do_repl(self, api, volume): + def _get_replication_specs(self, obj): """Checks if we can do replication. Need the extra spec set and we have to be talking to EM. - :param api: Dell REST API object. - :param volume: Cinder Volume object. - :return: Boolean (True if replication enabled), Boolean (True if - replication type is sync. + :param obj: Cinder Volume or snapshot object. + :return: rinfo dict. """ - do_repl = False - sync = False + rinfo = {'enabled': False, 'sync': False, + 'live': False, 'active': False, + 'autofailover': False} # Repl does not work with direct connect. - if not self.failed_over and not self.is_direct_connect: - specs = self._get_volume_extra_specs(volume) - do_repl = specs.get('replication_enabled') == ' True' - sync = specs.get('replication_type') == ' sync' - return do_repl, sync + if not self.is_direct_connect: + specs = self._get_volume_extra_specs(obj) + if (not self.failed_over and + specs.get('replication_enabled') == ' True'): + rinfo['enabled'] = True + if specs.get('replication_type') == ' sync': + rinfo['sync'] = True + if specs.get('replication:livevolume') == ' True': + rinfo['live'] = True + if specs.get('replication:livevolume:autofailover') == ' True': + rinfo['autofailover'] = True + if specs.get('replication:activereplay') == ' True': + rinfo['active'] = True + + # Some quick checks. + if rinfo['enabled']: + replication_target_count = len(self.backends) + msg = None + if replication_target_count == 0: + msg = _( + 'Replication setup failure: replication has been ' + 'enabled but no replication target has been specified ' + 'for this backend.') + if rinfo['live'] and replication_target_count != 1: + msg = _('Replication setup failure: replication:livevolume' + ' has been enabled but more than one replication ' + 'target has been specified for this backend.') + if msg: + LOG.debug(msg) + raise exception.ReplicationError(message=msg) + # Got this far. Life is good. Return our data. + return rinfo + + def _is_live_vol(self, obj): + rspecs = self._get_replication_specs(obj) + return rspecs['enabled'] and rspecs['live'] def _create_replications(self, api, volume, scvolume): """Creates any appropriate replications for a given volume. @@ -175,23 +223,33 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, # for now we assume we have an array named backends. replication_driver_data = None # Replicate if we are supposed to. - do_repl, sync = self._do_repl(api, volume) - if do_repl: + rspecs = self._get_replication_specs(volume) + if rspecs['enabled']: for backend in self.backends: - # Check if we are to replicate the active replay or not. - specs = self._get_volume_extra_specs(volume) - replact = specs.get('replication:activereplay') == ' True' - if not api.create_replication(scvolume, - backend['target_device_id'], - backend.get('qosnode', - 'cinderqos'), - sync, - backend.get('diskfolder', None), - replact): + targetdeviceid = backend['target_device_id'] + primaryqos = backend.get('qosnode', 'cinderqos') + secondaryqos = backend.get('remoteqos', 'cinderqos') + diskfolder = backend.get('diskfolder', None) + obj = None + if rspecs['live']: + # We are rolling with a live volume. + obj = api.create_live_volume(scvolume, targetdeviceid, + rspecs['active'], + rspecs['sync'], + rspecs['autofailover'], + primaryqos, secondaryqos) + else: + # Else a regular replication. + obj = api.create_replication(scvolume, targetdeviceid, + primaryqos, rspecs['sync'], + diskfolder, rspecs['active']) + # This is either a ScReplication object or a ScLiveVolume + # object. So long as it isn't None we are fine. + if not obj: # Create replication will have printed a better error. msg = _('Replication %(name)s to %(ssn)s failed.') % { 'name': volume['id'], - 'ssn': backend['target_device_id']} + 'ssn': targetdeviceid} raise exception.VolumeBackendAPIException(data=msg) if not replication_driver_data: replication_driver_data = backend['target_device_id'] @@ -201,8 +259,9 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, # If we did something return model update. model_update = {} if replication_driver_data: - model_update = {'replication_status': 'enabled', - 'replication_driver_data': replication_driver_data} + model_update = { + 'replication_status': fields.ReplicationStatus.ENABLED, + 'replication_driver_data': replication_driver_data} return model_update @staticmethod @@ -282,6 +341,41 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, ssnstrings.append(ssnstring) return ssnstrings + def _delete_live_volume(self, api, volume): + """Delete live volume associated with volume. + + :param api:Dell REST API object. + :param volume: Cinder Volume object + :return: True if we actually deleted something. False for everything + else. + """ + # Live Volume was added after provider_id support. So just assume it is + # there. + replication_driver_data = volume.get('replication_driver_data') + # Do we have any replication driver data? + if replication_driver_data: + # Valid replication data? + ssnstrings = self._split_driver_data(replication_driver_data) + if ssnstrings: + ssn = int(ssnstrings[0]) + sclivevolume, swapped = api.get_live_volume( + volume.get('provider_id')) + # Have we found the live volume? + if (sclivevolume and + sclivevolume.get('secondaryScSerialNumber') == ssn and + api.delete_live_volume(sclivevolume, True)): + LOG.info(_LI('%(vname)s\'s replication live volume has ' + 'been deleted from storage Center %(sc)s,'), + {'vname': volume.get('id'), + 'sc': ssn}) + return True + # If we are here either we do not have a live volume, we do not have + # one on our configured SC or we were not able to delete it. + # Either way, warn and leave. + LOG.warning(_LW('Unable to delete %s live volume.'), + volume.get('id')) + return False + def _delete_replications(self, api, volume): """Delete replications associated with a given volume. @@ -291,26 +385,24 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, :param api: Dell REST API object. :param volume: Cinder Volume object - :return: + :return: None """ - do_repl, sync = self._do_repl(api, volume) - if do_repl: - replication_driver_data = volume.get('replication_driver_data') - if replication_driver_data: - ssnstrings = self._split_driver_data(replication_driver_data) - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - scvol = api.find_volume(volume_name, provider_id) - # This is just a string of ssns separated by commas. - # Trundle through these and delete them all. - for ssnstring in ssnstrings: - ssn = int(ssnstring) - if not api.delete_replication(scvol, ssn): - LOG.warning(_LW('Unable to delete replication of ' - 'Volume %(vname)s to Storage Center ' - '%(sc)s.'), - {'vname': volume_name, - 'sc': ssnstring}) + replication_driver_data = volume.get('replication_driver_data') + if replication_driver_data: + ssnstrings = self._split_driver_data(replication_driver_data) + volume_name = volume.get('id') + provider_id = volume.get('provider_id') + scvol = api.find_volume(volume_name, provider_id) + # This is just a string of ssns separated by commas. + # Trundle through these and delete them all. + for ssnstring in ssnstrings: + ssn = int(ssnstring) + # Are we a replication or a live volume? + if not api.delete_replication(scvol, ssn): + LOG.warning(_LW('Unable to delete replication of Volume ' + '%(vname)s to Storage Center %(sc)s.'), + {'vname': volume_name, + 'sc': ssnstring}) # If none of that worked or there was nothing to do doesn't matter. # Just move on. @@ -322,7 +414,12 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, LOG.debug('Deleting volume %s', volume_name) with self._client.open_connection() as api: try: - self._delete_replications(api, volume) + rspecs = self._get_replication_specs(volume) + if rspecs['enabled']: + if rspecs['live']: + self._delete_live_volume(api, volume) + else: + self._delete_replications(api, volume) deleted = api.delete_volume(volume_name, provider_id) except Exception: with excutils.save_and_reraise_exception(): @@ -339,25 +436,24 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, """Create snapshot""" # our volume name is the volume id volume_name = snapshot.get('volume_id') - # TODO(tswanson): Is there any reason to think this will be set - # before I create the snapshot? Doesn't hurt to try to get it. provider_id = snapshot.get('provider_id') snapshot_id = snapshot.get('id') LOG.debug('Creating snapshot %(snap)s on volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: - scvolume = api.find_volume(volume_name, provider_id) + scvolume = api.find_volume(volume_name, provider_id, + self._is_live_vol(snapshot)) if scvolume is not None: replay = api.create_replay(scvolume, snapshot_id, 0) if replay: - return {'status': 'available', + return {'status': fields.SnapshotStatus.AVAILABLE, 'provider_id': scvolume['instanceId']} else: LOG.warning(_LW('Unable to locate volume:%s'), volume_name) - snapshot['status'] = 'error_creating' + snapshot['status'] = fields.SnapshotStatus.ERROR msg = _('Failed to create snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) @@ -511,7 +607,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, if scvolume and api.delete_replay(scvolume, snapshot_id): return # if we are here things went poorly. - snapshot['status'] = 'error_deleting' + snapshot['status'] = fields.SnapshotStatus.ERROR_DELETING msg = _('Failed to delete snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) @@ -535,7 +631,8 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, LOG.debug('Checking existence of volume %s', volume_name) with self._client.open_connection() as api: try: - scvolume = api.find_volume(volume_name, provider_id) + scvolume = api.find_volume(volume_name, provider_id, + self._is_live_vol(volume)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to ensure export of volume %s'), @@ -594,6 +691,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, data['storage_protocol'] = self.storage_protocol data['reserved_percentage'] = 0 data['consistencygroup_support'] = True + data['thin_provisioning_support'] = True totalcapacity = storageusage.get('availableSpace') totalcapacitygb = self._bytes_to_gb(totalcapacity) data['total_capacity_gb'] = totalcapacitygb @@ -762,7 +860,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, 'status': fields.SnapshotStatus.AVAILABLE }) - model_update = {'status': 'available'} + model_update = {'status': fields.SnapshotStatus.AVAILABLE} return model_update, snapshot_updates @@ -802,7 +900,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.DELETED - model_update = {'status': 'deleted'} + model_update = {'status': fields.SnapshotStatus.DELETED} return model_update, snapshots @@ -993,7 +1091,8 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, scvolume) elif current == ' True': self._delete_replications(api, volume) - model_update = {'replication_status': 'disabled', + model_update = {'replication_status': + fields.ReplicationStatus.DISABLED, 'replication_driver_data': ''} # Active Replay @@ -1219,14 +1318,98 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, # be good. Else error/error. if item['status'] == 'available': model_update['status'] = 'available' - model_update['replication_status'] = 'enabled' + model_update['replication_status'] = ( + fields.ReplicationStatus.ENABLED) else: model_update['status'] = 'error' - model_update['replication_status'] = 'error' + model_update['replication_status'] = ( + fields.ReplicationStatus.ERROR) volume_updates.append({'volume_id': item['volume']['id'], 'updates': model_update}) return volume_updates + def _failback_replication(self, api, volume, qosnode): + """Sets up the replication failback. + + :param api: Dell SC API. + :param volume: Cinder Volume + :param qosnode: Dell QOS node object. + :return: replitem dict. + """ + LOG.info(_LI('failback_volumes: replicated volume')) + # Get our current volume. + cvol = api.find_volume(volume['id'], volume['provider_id']) + # Original volume on the primary. + ovol = api.find_repl_volume(volume['id'], api.primaryssn, + None, True, False) + # Delete our current mappings. + api.remove_mappings(cvol) + # If there is a replication to delete do so. + api.delete_replication(ovol, api.ssn, False) + # Replicate to a common replay. + screpl = api.replicate_to_common(cvol, ovol, 'tempqos') + # We made it this far. Update our status. + screplid = None + status = '' + if screpl: + screplid = screpl['instanceId'] + nvolid = screpl['destinationVolume']['instanceId'] + status = 'inprogress' + else: + LOG.error(_LE('Unable to restore %s'), volume['id']) + screplid = None + nvolid = None + status = 'error' + + # Save some information for the next step. + # nvol is the new volume created by replicate_to_common. + # We also grab our extra specs here. + replitem = { + 'volume': volume, + 'specs': self._parse_extraspecs(volume), + 'qosnode': qosnode, + 'screpl': screplid, + 'cvol': cvol['instanceId'], + 'ovol': ovol['instanceId'], + 'nvol': nvolid, + 'rdd': six.text_type(api.ssn), + 'status': status} + + return replitem + + def _failback_live_volume(self, api, id, provider_id): + """failback the live volume to its original + + :param api: Dell SC API + :param id: Volume ID + :param provider_id: Dell Instance ID + :return: model_update dict + """ + model_update = {} + sclivevolume, swapped = api.get_live_volume(provider_id) + if sclivevolume and api.swap_roles_live_volume(sclivevolume): + LOG.info(_LI('Success swapping sclivevolume roles %s'), id) + model_update = { + 'status': 'available', + 'replication_status': fields.ReplicationStatus.ENABLED, + 'provider_id': + sclivevolume['secondaryVolume']['instanceId']} + else: + LOG.info(_LI('Failure swapping roles %s'), id) + model_update = {'status': 'error'} + + return model_update + + def _finish_failback(self, api, replitems): + # Wait for replication to complete. + # This will also flip replication. + self._wait_for_replication(api, replitems) + # Replications are done. Attach to any additional replication + # backends. + self._reattach_remaining_replications(api, replitems) + self._fixup_types(api, replitems) + return self._volume_updates(replitems) + def failback_volumes(self, volumes): """This is a generic volume failback. @@ -1244,72 +1427,83 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, volume_updates = [] replitems = [] - screplid = None - status = '' + # Trundle through the volumes. Update non replicated to alive again # and reverse the replications for the remaining volumes. for volume in volumes: LOG.info(_LI('failback_volumes: starting volume: %s'), volume) model_update = {} if volume.get('replication_driver_data'): - LOG.info(_LI('failback_volumes: replicated volume')) - # Get our current volume. - cvol = api.find_volume(volume['id'], volume['provider_id']) - # Original volume on the primary. - ovol = api.find_repl_volume(volume['id'], api.primaryssn, - None, True, False) - # Delete our current mappings. - api.remove_mappings(cvol) - # If there is a replication to delete do so. - api.delete_replication(ovol, api.ssn, False) - # Replicate to a common replay. - screpl = api.replicate_to_common(cvol, ovol, 'tempqos') - # We made it this far. Update our status. - if screpl: - screplid = screpl['instanceId'] - nvolid = screpl['destinationVolume']['instanceId'] - status = 'inprogress' + rspecs = self._get_replication_specs(volume) + if rspecs['live']: + model_update = self._failback_live_volume( + api, volume['id'], volume['provider_id']) else: - LOG.error(_LE('Unable to restore %s'), volume['id']) - screplid = None - nvolid = None - status = 'error' + replitem = self._failback_replication(api, volume, + qosnode) - # Save some information for the next step. - # nvol is the new volume created by replicate_to_common. - # We also grab our extra specs here. - replitems.append( - {'volume': volume, - 'specs': self._parse_extraspecs(volume), - 'qosnode': qosnode, - 'screpl': screplid, - 'cvol': cvol['instanceId'], - 'ovol': ovol['instanceId'], - 'nvol': nvolid, - 'rdd': six.text_type(api.ssn), - 'status': status}) + # Save some information for the next step. + # nvol is the new volume created by + # replicate_to_common. We also grab our + # extra specs here. + replitems.append(replitem) else: # Not replicated. Just set it to available. model_update = {'status': 'available'} - # Either we are failed over or our status is now error. + + # Save our update + if model_update: volume_updates.append({'volume_id': volume['id'], 'updates': model_update}) + # Let's do up to 5 replications at once. + if len(replitems) == 5: + volume_updates += self._finish_failback(api, replitems) + replitems = [] + # Finish any leftover items if replitems: - # Wait for replication to complete. - # This will also flip replication. - self._wait_for_replication(api, replitems) - # Replications are done. Attach to any additional replication - # backends. - self._reattach_remaining_replications(api, replitems) - self._fixup_types(api, replitems) - volume_updates += self._volume_updates(replitems) + volume_updates += self._finish_failback(api, replitems) # Set us back to a happy state. # The only way this doesn't happen is if the primary is down. self._update_backend(None) return volume_updates + def _failover_replication(self, api, id, provider_id, destssn): + rvol = api.break_replication(id, provider_id, destssn) + model_update = {} + if rvol: + LOG.info(_LI('Success failing over volume %s'), id) + model_update = {'replication_status': + fields.ReplicationStatus.FAILED_OVER, + 'provider_id': rvol['instanceId']} + else: + LOG.info(_LI('Failed failing over volume %s'), id) + model_update = {'status': 'error'} + + return model_update + + def _failover_live_volume(self, api, id, provider_id): + model_update = {} + sclivevolume, swapped = api.get_live_volume(provider_id) + if sclivevolume: + # If we aren't swapped try it. If fail error out. + if not swapped and not api.swap_roles_live_volume(sclivevolume): + LOG.info(_LI('Failure swapping roles %s'), id) + model_update = {'status': 'error'} + return model_update + + LOG.info(_LI('Success swapping sclivevolume roles %s'), id) + sclivevolume, swapped = api.get_live_volume(provider_id) + model_update = { + 'replication_status': + fields.ReplicationStatus.FAILED_OVER, + 'provider_id': + sclivevolume['primaryVolume']['instanceId']} + + # Error and leave. + return model_update + def failover_host(self, context, volumes, secondary_id=None): """Failover to secondary. @@ -1327,7 +1521,6 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, 'replication_status': 'failed-over', 'replication_extended_status': 'whatever',...}},] """ - LOG.debug('failover-host') LOG.debug(self.failed_over) LOG.debug(self.active_backend_id) @@ -1352,21 +1545,15 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, for volume in volumes: model_update = {} if volume.get('replication_driver_data'): - rvol = api.break_replication( - volume['id'], volume.get('provider_id'), - destssn) - if rvol: - LOG.info(_LI('Success failing over volume %s'), - volume['id']) + rspecs = self._get_replication_specs(volume) + if rspecs['live']: + model_update = self._failover_live_volume( + api, volume['id'], + volume.get('provider_id')) else: - LOG.info(_LI('Failed failing over volume %s'), - volume['id']) - - # We should note that we are now failed over - # and that we have a new instanceId. - model_update = { - 'replication_status': 'failed-over', - 'provider_id': rvol['instanceId']} + model_update = self._failover_replication( + api, volume['id'], + volume.get('provider_id'), destssn) else: # Not a replicated volume. Try to unmap it. scvolume = api.find_volume( diff --git a/cinder/volume/drivers/dell/dell_storagecenter_fc.py b/cinder/volume/drivers/dell/dell_storagecenter_fc.py index 9fe94392c..b21501411 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_fc.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_fc.py @@ -18,7 +18,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _, _LE, _LW from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_common @@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__) class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, driver.FibreChannelDriver): - """Implements commands for Dell EqualLogic SAN ISCSI management. + """Implements commands for Dell Storage Center FC management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.dell.DellStorageCenterFCDriver @@ -53,11 +53,14 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. 3.0.0 - ProviderID utilized. - 3.1.0 - Failback Supported. + 3.1.0 - Failback supported. + 3.2.0 - Live Volume support. """ - VERSION = '3.1.0' + VERSION = '3.2.0' + + CI_WIKI_NAME = "Dell_Storage_CI" def __init__(self, *args, **kwargs): super(DellStorageCenterFCDriver, self).__init__(*args, **kwargs) @@ -81,22 +84,19 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, # known unique name. volume_name = volume.get('id') provider_id = volume.get('provider_id') + islivevol = self._is_live_vol(volume) LOG.debug('Initialize connection: %s', volume_name) with self._client.open_connection() as api: try: - # Find our server. - scserver = None wwpns = connector.get('wwpns') - for wwn in wwpns: - scserver = api.find_server(wwn) - if scserver is not None: - break + # Find our server. + scserver = self._find_server(api, wwpns) # No? Create it. if scserver is None: - scserver = api.create_server_multiple_hbas(wwpns) + scserver = api.create_server(wwpns) # Find the volume on the storage center. - scvolume = api.find_volume(volume_name, provider_id) + scvolume = api.find_volume(volume_name, provider_id, islivevol) if scserver is not None and scvolume is not None: mapping = api.map_volume(scvolume, scserver) if mapping is not None: @@ -105,6 +105,24 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, scvolume = api.get_volume(scvolume['instanceId']) lun, targets, init_targ_map = api.find_wwns(scvolume, scserver) + + # Do we have extra live volume work? + if islivevol: + # Get our volume and our swap state. + sclivevolume, swapped = api.get_live_volume( + provider_id) + # Do not map to a failed over volume. + if sclivevolume and not swapped: + # Now map our secondary. + lvlun, lvtargets, lvinit_targ_map = ( + self.initialize_secondary(api, + sclivevolume, + wwpns)) + # Unmapped. Add info to our list. + targets += lvtargets + init_targ_map.update(lvinit_targ_map) + + # Roll up our return data. if lun is not None and len(targets) > 0: data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': lun, @@ -124,25 +142,73 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, # We get here because our mapping is none so blow up. raise exception.VolumeBackendAPIException(_('Unable to map volume.')) + def _find_server(self, api, wwns, ssn=-1): + for wwn in wwns: + scserver = api.find_server(wwn, ssn) + if scserver is not None: + return scserver + return None + + def initialize_secondary(self, api, sclivevolume, wwns): + """Initialize the secondary connection of a live volume pair. + + :param api: Dell SC api object. + :param sclivevolume: Dell SC live volume object. + :param wwns: Cinder list of wwns from the connector. + :return: lun, targets and initiator target map. + """ + # Find our server. + secondary = self._find_server( + api, wwns, sclivevolume['secondaryScSerialNumber']) + + # No? Create it. + if secondary is None: + secondary = api.create_server( + wwns, sclivevolume['secondaryScSerialNumber']) + if secondary: + if api.map_secondary_volume(sclivevolume, secondary): + # Get mappings. + secondaryvol = api.get_volume( + sclivevolume['secondaryVolume']['instanceId']) + if secondaryvol: + return api.find_wwns(secondaryvol, secondary) + LOG.warning(_LW('Unable to map live volume secondary volume' + ' %(vol)s to secondary server wwns: %(wwns)r'), + {'vol': sclivevolume['secondaryVolume']['instanceName'], + 'wwns': wwns}) + return None, [], {} + @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, force=False, **kwargs): # Get our volume name volume_name = volume.get('id') provider_id = volume.get('provider_id') + islivevol = self._is_live_vol(volume) LOG.debug('Terminate connection: %s', volume_name) with self._client.open_connection() as api: try: - scserver = None wwpns = connector.get('wwpns') - for wwn in wwpns: - scserver = api.find_server(wwn) - if scserver is not None: - break + scserver = self._find_server(api, wwpns) # Find the volume on the storage center. - scvolume = api.find_volume(volume_name, provider_id) + scvolume = api.find_volume(volume_name, provider_id, islivevol) # Get our target map so we can return it to free up a zone. lun, targets, init_targ_map = api.find_wwns(scvolume, scserver) + + # Do we have extra live volume work? + if islivevol: + # Get our volume and our swap state. + sclivevolume, swapped = api.get_live_volume( + provider_id) + # Do not map to a failed over volume. + if sclivevolume and not swapped: + lvlun, lvtargets, lvinit_targ_map = ( + self.terminate_secondary(api, sclivevolume, wwpns)) + # Add to our return. + if lvlun: + targets += lvtargets + init_targ_map.update(lvinit_targ_map) + # If we have a server and a volume lets unmap them. if (scserver is not None and scvolume is not None and @@ -168,3 +234,24 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, LOG.error(_LE('Failed to terminate connection')) raise exception.VolumeBackendAPIException( _('Terminate connection unable to connect to backend.')) + + def terminate_secondary(self, api, sclivevolume, wwns): + # Find our server. + secondary = self._find_server( + api, wwns, sclivevolume['secondaryScSerialNumber']) + secondaryvol = api.get_volume( + sclivevolume['secondaryVolume']['instanceId']) + if secondary and secondaryvol: + # Get our map. + lun, targets, init_targ_map = api.find_wwns(secondaryvol, + secondary) + # If we have a server and a volume lets unmap them. + ret = api.unmap_volume(secondaryvol, secondary) + LOG.debug('terminate_secondary: secondary volume %(name)s unmap ' + 'to secondary server %(server)s result: %(result)r', + {'name': secondaryvol['name'], + 'server': secondary['name'], + 'result': ret}) + # return info for + return lun, targets, init_targ_map + return None, [], {} diff --git a/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py b/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py index 846b353d2..454223a27 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py @@ -18,7 +18,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _, _LE, _LI, _LW from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_common @@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__) class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, driver.ISCSIDriver): - """Implements commands for Dell StorageCenter ISCSI management. + """Implements commands for Dell Storage Center ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.dell.DellStorageCenterISCSIDriver @@ -53,10 +53,12 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, 2.5.0 - ManageableSnapshotsVD implemented. 3.0.0 - ProviderID utilized. 3.1.0 - Failback Supported. + 3.2.0 - Live Volume support. """ - VERSION = '3.1.0' + VERSION = '3.2.0' + CI_WIKI_NAME = "Dell_Storage_CI" def __init__(self, *args, **kwargs): super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs) @@ -81,41 +83,35 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, # known unique name. volume_name = volume.get('id') provider_id = volume.get('provider_id') + islivevol = self._is_live_vol(volume) initiator_name = connector.get('initiator') multipath = connector.get('multipath', False) - LOG.info(_LI('initialize_ connection: %(vol)s:%(initiator)s'), + LOG.info(_LI('initialize_ connection: %(vol)s:%(pid)s:' + '%(intr)s. Multipath is %(mp)r'), {'vol': volume_name, - 'initiator': initiator_name}) + 'pid': provider_id, + 'intr': initiator_name, + 'mp': multipath}) with self._client.open_connection() as api: try: # Find our server. - server = api.find_server(initiator_name) + scserver = api.find_server(initiator_name) # No? Create it. - if server is None: - server = api.create_server(initiator_name) + if scserver is None: + scserver = api.create_server([initiator_name]) # Find the volume on the storage center. scvolume = api.find_volume(volume_name, provider_id) # if we have a server and a volume lets bring them together. - if server is not None and scvolume is not None: - mapping = api.map_volume(scvolume, - server) + if scserver is not None and scvolume is not None: + mapping = api.map_volume(scvolume, scserver) if mapping is not None: # Since we just mapped our volume we had best update # our sc volume object. scvolume = api.get_volume(provider_id) # Our return. iscsiprops = {} - ip = None - port = None - if not multipath: - # We want to make sure we point to the specified - # ip address for our target_portal return. This - # isn't an issue with multipath since it should - # try all the alternate portal. - ip = self.configuration.iscsi_ip_address - port = self.configuration.iscsi_port # Three cases that should all be satisfied with the # same return of Target_Portal and Target_Portals. @@ -128,9 +124,24 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, # 3. OS brick is calling us in single path mode so # we want to return Target_Portal and # Target_Portals as alternates. - iscsiprops = (api.find_iscsi_properties(scvolume, - ip, - port)) + iscsiprops = api.find_iscsi_properties(scvolume) + + # If this is a live volume we need to map up our + # secondary volume. + if islivevol: + sclivevolume, swapped = api.get_live_volume( + provider_id) + # Only map if we are not swapped. + if sclivevolume and not swapped: + secondaryprops = self.initialize_secondary( + api, sclivevolume, initiator_name) + # Combine with iscsiprops + iscsiprops['target_iqns'] += ( + secondaryprops['target_iqns']) + iscsiprops['target_portals'] += ( + secondaryprops['target_portals']) + iscsiprops['target_luns'] += ( + secondaryprops['target_luns']) # Return our iscsi properties. iscsiprops['discard'] = True @@ -151,11 +162,50 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, raise exception.VolumeBackendAPIException( _('Unable to map volume')) + def initialize_secondary(self, api, sclivevolume, initiatorname): + """Initialize the secondary connection of a live volume pair. + + :param api: Dell SC api. + :param sclivevolume: Dell SC live volume object. + :param initiatorname: Cinder iscsi initiator from the connector. + :return: ISCSI properties. + """ + + # Find our server. + secondary = api.find_server(initiatorname, + sclivevolume['secondaryScSerialNumber']) + # No? Create it. + if secondary is None: + secondary = api.create_server( + [initiatorname], sclivevolume['secondaryScSerialNumber']) + if secondary: + if api.map_secondary_volume(sclivevolume, secondary): + # Get our volume and get our properties. + secondaryvol = api.get_volume( + sclivevolume['secondaryVolume']['instanceId']) + if secondaryvol: + return api.find_iscsi_properties(secondaryvol) + # Dummy return on failure. + data = {'target_discovered': False, + 'target_iqn': None, + 'target_iqns': [], + 'target_portal': None, + 'target_portals': [], + 'target_lun': None, + 'target_luns': [], + } + LOG.warning(_LW('Unable to map live volume secondary volume' + ' %(vol)s to secondary server intiator: %(init)r'), + {'vol': sclivevolume['secondaryVolume']['instanceName'], + 'init': initiatorname}) + return data + def terminate_connection(self, volume, connector, force=False, **kwargs): # Grab some initial info. initiator_name = connector.get('initiator') volume_name = volume.get('id') provider_id = volume.get('provider_id') + islivevol = self._is_live_vol(volume) LOG.debug('Terminate connection: %(vol)s:%(initiator)s', {'vol': volume_name, 'initiator': initiator_name}) @@ -165,6 +215,13 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, # Find the volume on the storage center. scvolume = api.find_volume(volume_name, provider_id) + # Unmap our secondary if it isn't swapped. + if islivevol: + sclivevolume, swapped = api.get_live_volume(provider_id) + if sclivevolume and not swapped: + self.terminate_secondary(api, sclivevolume, + initiator_name) + # If we have a server and a volume lets pull them apart. if (scserver is not None and scvolume is not None and @@ -179,3 +236,11 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, 'vol': volume_name}) raise exception.VolumeBackendAPIException( _('Terminate connection failed')) + + def terminate_secondary(self, api, sclivevolume, initiatorname): + # Find our server. + secondary = api.find_server(initiatorname, + sclivevolume['secondaryScSerialNumber']) + secondaryvol = api.get_volume( + sclivevolume['secondaryVolume']['instanceId']) + return api.unmap_volume(secondaryvol, secondary) diff --git a/cinder/volume/drivers/disco/disco.py b/cinder/volume/drivers/disco/disco.py index 6788b90d3..a9d352a77 100644 --- a/cinder/volume/drivers/disco/disco.py +++ b/cinder/volume/drivers/disco/disco.py @@ -88,6 +88,7 @@ class DiscoDriver(driver.VolumeDriver): """Execute commands related to DISCO Volumes.""" VERSION = "1.0" + CI_WIKI_NAME = "ITRI_DISCO_CI" def __init__(self, *args, **kwargs): """Init Disco driver : get configuration, create client.""" diff --git a/cinder/volume/drivers/dothill/dothill_client.py b/cinder/volume/drivers/dothill/dothill_client.py index 5eb0a7d08..dbcd5bdc9 100644 --- a/cinder/volume/drivers/dothill/dothill_client.py +++ b/cinder/volume/drivers/dothill/dothill_client.py @@ -14,12 +14,13 @@ # under the License. # -from hashlib import md5 +import hashlib import math import time from lxml import etree from oslo_log import log as logging +from oslo_utils import units import requests import six @@ -49,7 +50,7 @@ class DotHillClient(object): hash_ = "%s_%s" % (self._login, self._password) if six.PY3: hash_ = hash_.encode('utf-8') - hash_ = md5(hash_) + hash_ = hashlib.md5(hash_) digest = hash_.hexdigest() url = self._base_url + "/login/" + digest @@ -161,7 +162,7 @@ class DotHillClient(object): return False def _get_size(self, size): - return int(math.ceil(float(size) * 512 / (10 ** 9))) + return int(math.ceil(float(size) * 512 / (units.G))) def backend_stats(self, backend_name, backend_type): stats = {'free_capacity_gb': 0, diff --git a/cinder/volume/drivers/dothill/dothill_fc.py b/cinder/volume/drivers/dothill/dothill_fc.py index bc5da70d1..0b368b14a 100644 --- a/cinder/volume/drivers/dothill/dothill_fc.py +++ b/cinder/volume/drivers/dothill/dothill_fc.py @@ -41,6 +41,9 @@ class DotHillFCDriver(cinder.volume.driver.FibreChannelDriver): VERSION = "1.0" + # ThirdPartySystems CI wiki + CI_WIKI_NAME = "Vedams_DotHillDriver_CI" + def __init__(self, *args, **kwargs): super(DotHillFCDriver, self).__init__(*args, **kwargs) self.common = None diff --git a/cinder/volume/drivers/dothill/dothill_iscsi.py b/cinder/volume/drivers/dothill/dothill_iscsi.py index b92a71b01..bfd25542d 100644 --- a/cinder/volume/drivers/dothill/dothill_iscsi.py +++ b/cinder/volume/drivers/dothill/dothill_iscsi.py @@ -50,6 +50,9 @@ class DotHillISCSIDriver(cinder.volume.driver.ISCSIDriver): VERSION = "1.0" + # ThirdPartySystems CI wiki + CI_WIKI_NAME = "Vedams_DotHillDriver_CI" + def __init__(self, *args, **kwargs): super(DotHillISCSIDriver, self).__init__(*args, **kwargs) self.common = None diff --git a/cinder/volume/drivers/drbdmanagedrv.py b/cinder/volume/drivers/drbdmanagedrv.py index 01d0b2403..fc9e251ab 100644 --- a/cinder/volume/drivers/drbdmanagedrv.py +++ b/cinder/volume/drivers/drbdmanagedrv.py @@ -63,9 +63,15 @@ drbd_opts = [ cfg.StrOpt('drbdmanage_resource_policy', default='{"ratio": "0.51", "timeout": "60"}', help='Resource deployment completion wait policy.'), + cfg.StrOpt('drbdmanage_disk_options', + default='{"c-min-rate": "4M"}', + help='Disk options to set on new resources. ' + 'See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf' + ' for all the details.'), cfg.StrOpt('drbdmanage_net_options', default='{"connect-int": "4", "allow-two-primaries": "yes", ' - '"ko-count": "30"}', + '"ko-count": "30", "max-buffers": "20000", ' + '"ping-timeout": "100"}', help='Net options to set on new resources. ' 'See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf' ' for all the details.'), @@ -124,6 +130,9 @@ class DrbdManageBaseDriver(driver.VolumeDriver): drbdmanage_dbus_name = 'org.drbd.drbdmanaged' drbdmanage_dbus_interface = '/interface' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + def __init__(self, *args, **kwargs): self.empty_list = dbus.Array([], signature="a(s)") self.empty_dict = dbus.Array([], signature="a(ss)") @@ -158,6 +167,8 @@ class DrbdManageBaseDriver(driver.VolumeDriver): self.configuration.safe_get('drbdmanage_resource_options')) self.net_options = js_decoder.decode( self.configuration.safe_get('drbdmanage_net_options')) + self.disk_options = js_decoder.decode( + self.configuration.safe_get('drbdmanage_disk_options')) self.plugin_resource = self.configuration.safe_get( 'drbdmanage_resource_plugin') @@ -467,6 +478,28 @@ class DrbdManageBaseDriver(driver.VolumeDriver): message = _('Got bad path information from DRBDmanage! (%s)') % data raise exception.VolumeBackendAPIException(data=message) + def _push_drbd_options(self, d_res_name): + res_opt = {'resource': d_res_name, + 'target': 'resource', + 'type': 'reso'} + res_opt.update(self.resource_options) + res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) + self._check_result(res) + + res_opt = {'resource': d_res_name, + 'target': 'resource', + 'type': 'neto'} + res_opt.update(self.net_options) + res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) + self._check_result(res) + + res_opt = {'resource': d_res_name, + 'target': 'resource', + 'type': 'disko'} + res_opt.update(self.disk_options) + res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) + self._check_result(res) + def create_volume(self, volume): """Creates a DRBD resource. @@ -482,19 +515,7 @@ class DrbdManageBaseDriver(driver.VolumeDriver): self.empty_dict) self._check_result(res, ignore=[dm_exc.DM_EEXIST], ret=None) - res_opt = {'resource': d_res_name, - 'target': 'resource', - 'type': 'reso'} - res_opt.update(self.resource_options) - res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) - self._check_result(res) - - res_opt = {'resource': d_res_name, - 'target': 'resource', - 'type': 'neto'} - res_opt.update(self.net_options) - res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) - self._check_result(res) + self._push_drbd_options(d_res_name) # If we get DM_EEXIST, then the volume already exists, eg. because # deploy gave an error on a previous try (like ENOSPC). @@ -604,6 +625,8 @@ class DrbdManageBaseDriver(driver.VolumeDriver): v_props) self._check_result(res, ignore=[dm_exc.DM_ENOENT]) + self._push_drbd_options(d_res_name) + # TODO(PM): CG okay = self._call_policy_plugin(self.plugin_resource, self.policy_resource, diff --git a/cinder/volume/drivers/emc/emc_vmax_common.py b/cinder/volume/drivers/emc/emc_vmax_common.py index e24e14fad..a7bd8c886 100644 --- a/cinder/volume/drivers/emc/emc_vmax_common.py +++ b/cinder/volume/drivers/emc/emc_vmax_common.py @@ -102,7 +102,10 @@ class EMCVMAXCommon(object): pool_info = {'backend_name': None, 'config_file': None, - 'arrays_info': {}} + 'arrays_info': {}, + 'max_over_subscription_ratio': None, + 'reserved_percentage': None + } def __init__(self, prtcl, version, configuration=None): @@ -137,6 +140,10 @@ class EMCVMAXCommon(object): self.pool_info['backend_name'] = ( self.configuration.safe_get('volume_backend_name')) + self.pool_info['max_over_subscription_ratio'] = ( + self.configuration.safe_get('max_over_subscription_ratio')) + self.pool_info['reserved_percentage'] = ( + self.configuration.safe_get('reserved_percentage')) LOG.debug( "Updating volume stats on file %(emcConfigFileName)s on " "backend %(backendName)s.", @@ -378,6 +385,7 @@ class EMCVMAXCommon(object): """ portGroupName = None extraSpecs = self._initial_setup(volume) + is_multipath = connector.get('multipath', False) volumeName = volume['name'] LOG.info(_LI("Initialize connection: %(volume)s."), @@ -414,11 +422,13 @@ class EMCVMAXCommon(object): volume, connector, extraSpecs, maskingViewDict)) if self.protocol.lower() == 'iscsi': - return self._find_ip_protocol_endpoints( - self.conn, deviceInfoDict['storagesystem'], - portGroupName) - else: - return deviceInfoDict + deviceInfoDict['iscsi_ip_addresses'] = ( + self._find_ip_protocol_endpoints( + self.conn, deviceInfoDict['storagesystem'], + portGroupName)) + deviceInfoDict['is_multipath'] = is_multipath + + return deviceInfoDict def _attach_volume(self, volume, connector, extraSpecs, maskingViewDict, isLiveMigration=False): @@ -626,20 +636,27 @@ class EMCVMAXCommon(object): """Retrieve stats info.""" pools = [] backendName = self.pool_info['backend_name'] + max_oversubscription_ratio = ( + self.pool_info['max_over_subscription_ratio']) + reservedPercentage = self.pool_info['reserved_percentage'] + array_max_over_subscription = None + array_reserve_percent = None for arrayInfo in self.pool_info['arrays_info']: self._set_ecom_credentials(arrayInfo) # Check what type of array it is isV3 = self.utils.isArrayV3(self.conn, arrayInfo['SerialNumber']) if isV3: - location_info, total_capacity_gb, free_capacity_gb = ( - self._update_srp_stats(arrayInfo)) + (location_info, total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb, + array_reserve_percent) = self._update_srp_stats(arrayInfo) poolName = ("%(slo)s+%(poolName)s+%(array)s" % {'slo': arrayInfo['SLO'], 'poolName': arrayInfo['PoolName'], 'array': arrayInfo['SerialNumber']}) else: # This is V2 - location_info, total_capacity_gb, free_capacity_gb = ( + (location_info, total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb, array_max_over_subscription) = ( self._update_pool_stats(backendName, arrayInfo)) poolName = ("%(poolName)s+%(array)s" % {'poolName': arrayInfo['PoolName'], @@ -648,10 +665,25 @@ class EMCVMAXCommon(object): pool = {'pool_name': poolName, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, - 'reserved_percentage': 0, + 'provisioned_capacity_gb': provisioned_capacity_gb, 'QoS_support': False, 'location_info': location_info, - 'consistencygroup_support': True} + 'consistencygroup_support': True, + 'thin_provisioning_support': True, + 'thick_provisioning_support': False, + 'max_over_subscription_ratio': max_oversubscription_ratio + } + if array_max_over_subscription: + pool['max_over_subscription_ratio'] = ( + self.utils.override_ratio( + max_oversubscription_ratio, + array_max_over_subscription)) + + if array_reserve_percent and ( + array_reserve_percent > reservedPercentage): + pool['reserved_percentage'] = array_reserve_percent + else: + pool['reserved_percentage'] = reservedPercentage pools.append(pool) data = {'vendor_name': "EMC", @@ -662,6 +694,7 @@ class EMCVMAXCommon(object): # Use zero capacities here so we always use a pool. 'total_capacity_gb': 0, 'free_capacity_gb': 0, + 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, 'pools': pools} @@ -674,20 +707,24 @@ class EMCVMAXCommon(object): :returns: location_info :returns: totalManagedSpaceGbs :returns: remainingManagedSpaceGbs + :returns: provisionedManagedSpaceGbs + :returns: array_reserve_percent """ - totalManagedSpaceGbs, remainingManagedSpaceGbs = ( - self.provisionv3.get_srp_pool_stats(self.conn, - arrayInfo)) + (totalManagedSpaceGbs, remainingManagedSpaceGbs, + provisionedManagedSpaceGbs, array_reserve_percent) = ( + self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo)) LOG.info(_LI( "Capacity stats for SRP pool %(poolName)s on array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu"), + "free_capacity_gb=%(free_capacity_gb)lu, " + "provisioned_capacity_gb=%(provisioned_capacity_gb)lu"), {'poolName': arrayInfo['PoolName'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': totalManagedSpaceGbs, - 'free_capacity_gb': remainingManagedSpaceGbs}) + 'free_capacity_gb': remainingManagedSpaceGbs, + 'provisioned_capacity_gb': provisionedManagedSpaceGbs}) location_info = ("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s" % {'arrayName': arrayInfo['SerialNumber'], @@ -695,7 +732,9 @@ class EMCVMAXCommon(object): 'slo': arrayInfo['SLO'], 'workload': arrayInfo['Workload']}) - return location_info, totalManagedSpaceGbs, remainingManagedSpaceGbs + return (location_info, totalManagedSpaceGbs, + remainingManagedSpaceGbs, provisionedManagedSpaceGbs, + array_reserve_percent) def retype(self, ctxt, volume, new_type, diff, host): """Migrate volume to another host using retype. @@ -1270,6 +1309,7 @@ class EMCVMAXCommon(object): :returns: string -- configuration file """ extraSpecs = self.utils.get_volumetype_extraspecs(volume, volumeTypeId) + qosSpecs = self.utils.get_volumetype_qosspecs(volume, volumeTypeId) configGroup = None # If there are no extra specs then the default case is assumed. @@ -1277,8 +1317,7 @@ class EMCVMAXCommon(object): configGroup = self.configuration.config_group configurationFile = self._register_config_file_from_config_group( configGroup) - - return extraSpecs, configurationFile + return extraSpecs, configurationFile, qosSpecs def _get_ecom_connection(self): """Get the ecom connection. @@ -1715,7 +1754,7 @@ class EMCVMAXCommon(object): :raises: VolumeBackendAPIException """ try: - extraSpecs, configurationFile = ( + extraSpecs, configurationFile, qosSpecs = ( self._set_config_file_and_get_extra_specs( volume, volumeTypeId)) @@ -1741,6 +1780,9 @@ class EMCVMAXCommon(object): else: # V2 extra specs extraSpecs = self._set_v2_extra_specs(extraSpecs, poolRecord) + if (qosSpecs.get('qos_spec') + and qosSpecs['qos_specs']['consumer'] != "front-end"): + extraSpecs['qos'] = qosSpecs['qos_specs']['specs'] except Exception: import sys exceptionMessage = (_( @@ -2068,7 +2110,7 @@ class EMCVMAXCommon(object): self.utils.find_replication_service_capabilities(self.conn, storageSystem)) is_clone_license = self.utils.is_clone_licensed( - self.conn, repServCapabilityInstanceName) + self.conn, repServCapabilityInstanceName, extraSpecs[ISV3]) if is_clone_license is False: exceptionMessage = (_( @@ -2857,6 +2899,10 @@ class EMCVMAXCommon(object): LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) + # If qos exists, update storage group to reflect qos parameters + if 'qos' in extraSpecs: + self.utils.update_storagegroup_qos( + self.conn, defaultStorageGroupInstanceName, extraSpecs) self._add_volume_to_default_storage_group_on_create( volumeDict, volumeName, storageConfigService, @@ -2945,6 +2991,10 @@ class EMCVMAXCommon(object): sgInstanceName = self.provisionv3.create_storage_group_v3( self.conn, controllerConfigService, storageGroupName, poolName, slo, workload, extraSpecs) + # If qos exists, update storage group to reflect qos parameters + if 'qos' in extraSpecs: + self.utils.update_storagegroup_qos( + self.conn, sgInstanceName, extraSpecs) return sgInstanceName @@ -3193,7 +3243,8 @@ class EMCVMAXCommon(object): :param backendName: the backend name :param arrayInfo: the arrayInfo - :returns: location_info, total_capacity_gb, free_capacity_gb + :returns: location_info, total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb """ if arrayInfo['FastPolicy']: @@ -3216,7 +3267,8 @@ class EMCVMAXCommon(object): if (arrayInfo['FastPolicy'] is not None and isTieringPolicySupported is True): # FAST enabled - total_capacity_gb, free_capacity_gb = ( + (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, + array_max_over_subscription) = ( self.fast.get_capacities_associated_to_policy( self.conn, arrayInfo['SerialNumber'], arrayInfo['FastPolicy'])) @@ -3229,7 +3281,8 @@ class EMCVMAXCommon(object): 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb}) else: # NON-FAST - total_capacity_gb, free_capacity_gb = ( + (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, + array_max_over_subscription) = ( self.utils.get_pool_capacities(self.conn, arrayInfo['PoolName'], arrayInfo['SerialNumber'])) @@ -3247,7 +3300,8 @@ class EMCVMAXCommon(object): 'poolName': arrayInfo['PoolName'], 'policyName': arrayInfo['FastPolicy']}) - return location_info, total_capacity_gb, free_capacity_gb + return (location_info, total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb, array_max_over_subscription) def _set_v2_extra_specs(self, extraSpecs, poolRecord): """Set the VMAX V2 extra specs. @@ -4402,7 +4456,8 @@ class EMCVMAXCommon(object): ipaddress = ( self.utils.get_iscsi_ip_address( conn, ipendpointinstancename)) - foundipaddresses.append(ipaddress) + if ipaddress: + foundipaddresses.append(ipaddress) return foundipaddresses def _extend_v3_volume(self, volumeInstance, volumeName, newSize, diff --git a/cinder/volume/drivers/emc/emc_vmax_fast.py b/cinder/volume/drivers/emc/emc_vmax_fast.py index 5fc9b8ff0..3f88a5371 100644 --- a/cinder/volume/drivers/emc/emc_vmax_fast.py +++ b/cinder/volume/drivers/emc/emc_vmax_fast.py @@ -697,14 +697,19 @@ class EMCVMAXFast(object): :param policyName: the name of policy rule, a string value :returns: int -- total capacity in GB of all pools associated with the policy - :returns: int -- (total capacity-EMCSubscribedCapacity) in GB of all - pools associated with the policy + :returns: int -- real physical capacity in GB of all pools + available to be used + :returns: int -- (Provisioned capacity-EMCSubscribedCapacity) in GB + is the the capacity that has been provisioned + :returns: int -- the maximum oversubscription ration """ policyInstanceName = self.get_tier_policy_by_name( conn, arrayName, policyName) total_capacity_gb = 0 - allocated_capacity_gb = 0 + provisioned_capacity_gb = 0 + free_capacity_gb = 0 + array_max_over_subscription = None tierInstanceNames = self.get_associated_tier_from_tier_policy( conn, policyInstanceName) @@ -726,17 +731,25 @@ class EMCVMAXFast(object): break total_capacity_gb += self.utils.convert_bits_to_gbs( storagePoolInstance['TotalManagedSpace']) - allocated_capacity_gb += self.utils.convert_bits_to_gbs( + provisioned_capacity_gb += self.utils.convert_bits_to_gbs( storagePoolInstance['EMCSubscribedCapacity']) + free_capacity_gb += self.utils.convert_bits_to_gbs( + storagePoolInstance['RemainingManagedSpace']) + try: + array_max_over_subscription = ( + self.utils.get_ratio_from_max_sub_per( + storagePoolInstance['EMCMaxSubscriptionPercent'])) + except KeyError: + array_max_over_subscription = 65534 LOG.debug( "PolicyName:%(policyName)s, pool: %(poolInstanceName)s, " - "allocated_capacity_gb = %(allocated_capacity_gb)lu.", + "provisioned_capacity_gb = %(provisioned_capacity_gb)lu.", {'policyName': policyName, 'poolInstanceName': poolInstanceName, - 'allocated_capacity_gb': allocated_capacity_gb}) + 'provisioned_capacity_gb': provisioned_capacity_gb}) - free_capacity_gb = total_capacity_gb - allocated_capacity_gb - return (total_capacity_gb, free_capacity_gb) + return (total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb, array_max_over_subscription) def get_or_create_default_storage_group( self, conn, controllerConfigService, fastPolicyName, diff --git a/cinder/volume/drivers/emc/emc_vmax_fc.py b/cinder/volume/drivers/emc/emc_vmax_fc.py index dd2559a55..cefcfe9ed 100644 --- a/cinder/volume/drivers/emc/emc_vmax_fc.py +++ b/cinder/volume/drivers/emc/emc_vmax_fc.py @@ -67,10 +67,16 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver): - Replacement of EMCGetTargetEndpoints api (bug #1512791) - VMAX3 snapvx improvements (bug #1522821) - Operations and timeout issues (bug #1538214) - + 2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634) + - SnapVX licensing checks for VMAX3 (bug #1587017) + - VMAX oversubscription Support (blueprint vmax-oversubscription) + - QoS support (blueprint vmax-qos) """ - VERSION = "2.3.0" + VERSION = "2.4.0" + + # ThirdPartySystems wiki + CI_WIKI_NAME = "EMC_VMAX_CI" def __init__(self, *args, **kwargs): diff --git a/cinder/volume/drivers/emc/emc_vmax_iscsi.py b/cinder/volume/drivers/emc/emc_vmax_iscsi.py index a6de60d7c..a5df907db 100644 --- a/cinder/volume/drivers/emc/emc_vmax_iscsi.py +++ b/cinder/volume/drivers/emc/emc_vmax_iscsi.py @@ -73,10 +73,19 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): - Replacement of EMCGetTargetEndpoints api (bug #1512791) - VMAX3 snapvx improvements (bug #1522821) - Operations and timeout issues (bug #1538214) + 2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634) + - SnapVX licensing checks for VMAX3 (bug #1587017) + - VMAX oversubscription Support (blueprint vmax-oversubscription) + - QoS support (blueprint vmax-qos) + - VMAX2/VMAX3 iscsi multipath support (iscsi only) + https://blueprints.launchpad.net/cinder/+spec/vmax-iscsi-multipath """ - VERSION = "2.3.0" + VERSION = "2.4.0" + + # ThirdPartySystems wiki + CI_WIKI_NAME = "EMC_VMAX_CI" def __init__(self, *args, **kwargs): @@ -180,13 +189,32 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): 'volume_id': '12345678-1234-4321-1234-123456789012', } } - + Example return value (multipath is enabled):: + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', + 'iqn.2010-10.org.openstack:volume-00002'], + 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], + 'target_luns': [1, 1], + } + } """ - self.iscsi_ip_addresses = self.common.initialize_connection( + device_info = self.common.initialize_connection( volume, connector) + try: + self.iscsi_ip_addresses = device_info['iscsi_ip_addresses'] + is_multipath = device_info['is_multipath'] + except KeyError as ex: + exception_message = (_("Cannot get iSCSI ipaddresses or " + "multipath flag. Exception is %(ex)s. ") + % {'ex': ex}) + + raise exception.VolumeBackendAPIException(data=exception_message) iscsi_properties = self.smis_get_iscsi_properties( - volume, connector) + volume, connector, is_multipath) LOG.info(_LI("Leaving initialize_connection: %s"), iscsi_properties) return { @@ -201,9 +229,9 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): '-t', 'sendtargets', '-p', iscsi_ip_address, run_as_root=True) - return out, _err, False, None + return out, _err, None except Exception as ex: - return None, None, True, ex + return None, None, ex def smis_do_iscsi_discovery(self, volume): """Calls iscsiadm with each iscsi ip address in the list""" @@ -212,12 +240,13 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): if len(self.iscsi_ip_addresses) == 0: LOG.error(_LE("The list of iscsi_ip_addresses is empty")) return targets - + outList = [] for iscsi_ip_address in self.iscsi_ip_addresses: - out, _err, go_again, ex = self._call_iscsiadm(iscsi_ip_address) - if not go_again: - break - if not out: + out, _err, ex = self._call_iscsiadm(iscsi_ip_address) + if out: + outList.append(out) + + if len(outList) == 0: if ex: exception_message = (_("Unsuccessful iscsiadm. " "Exception is %(ex)s. ") @@ -229,74 +258,104 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): LOG.info(_LI( "smis_do_iscsi_discovery is: %(out)s."), {'out': out}) + for out in outList: + for target in out.splitlines(): + targets.append(target) - for target in out.splitlines(): - targets.append(target) + outTargets = self._parse_target_list(targets) + return outTargets - return targets + def _parse_target_list(self, targets): + """Parse target list into usable format. - def smis_get_iscsi_properties(self, volume, connector): + :param targets: list of all targets + :return: outTargets + """ + outTargets = [] + for target in targets: + results = target.split(" ") + properties = {} + properties['target_portal'] = results[0].split(",")[0] + properties['target_iqn'] = results[1] + outTargets.append(properties) + return outTargets + + def smis_get_iscsi_properties(self, volume, connector, is_multipath): """Gets iscsi configuration. We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: - - - `target_discovered` - boolean indicating whether discovery was - used - - `target_iqn` - the IQN of the iSCSI target - - `target_portal` - the portal of the iSCSI target - - `target_lun` - the lun of the iSCSI target - - `volume_id` - the UUID of the volume - - `auth_method`, `auth_username`, `auth_password` - the - authentication details. Right now, either auth_method is not - present meaning no authentication, or auth_method == `CHAP` - meaning use CHAP with the specified credentials. - + :target_discovered: boolean indicating whether discovery was used + :target_iqn: the IQN of the iSCSI target + :target_portal: the portal of the iSCSI target + :target_lun: the lun of the iSCSI target + :volume_id: the UUID of the volume + :auth_method:, :auth_username:, :auth_password: + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. """ - properties = {} - location = self.smis_do_iscsi_discovery(volume) - if not location: + targets = self.smis_do_iscsi_discovery(volume) + if len(targets) == 0: raise exception.InvalidVolume(_("Could not find iSCSI export " - " for volume %(volumeName)s.") + "for volume %(volumeName)s.") % {'volumeName': volume['name']}) - LOG.debug("ISCSI Discovery: Found %s", location) - properties['target_discovered'] = True + LOG.debug("ISCSI Discovery: Found %s", targets) device_info = self.common.find_device_number( volume, connector['host']) - if device_info is None or device_info['hostlunid'] is None: + isError = False + if device_info: + try: + lun_id = device_info['hostlunid'] + except KeyError: + isError = True + else: + isError = True + + if isError: + LOG.error(_LE("Unable to get the lun id")) exception_message = (_("Cannot find device number for volume " "%(volumeName)s.") % {'volumeName': volume['name']}) raise exception.VolumeBackendAPIException(data=exception_message) - device_number = device_info['hostlunid'] + properties = {'target_discovered': False, + 'target_iqn': 'unknown', + 'target_iqns': None, + 'target_portal': 'unknown', + 'target_portals': None, + 'target_lun': 'unknown', + 'target_luns': None, + 'volume_id': volume['id']} + + if len(self.iscsi_ip_addresses) > 0: + if len(self.iscsi_ip_addresses) > 1 and is_multipath: + properties['target_iqns'] = [t['target_iqn'] for t in targets] + properties['target_portals'] = ( + [t['target_portal'] for t in targets]) + properties['target_luns'] = [lun_id] * len(targets) + properties['target_discovered'] = True + properties['target_iqn'] = [t['target_iqn'] for t in targets][0] + properties['target_portal'] = ( + [t['target_portal'] for t in targets][0]) + properties['target_lun'] = lun_id + else: + LOG.error(_LE('Failed to find available iSCSI targets.')) LOG.info(_LI( - "location is: %(location)s"), {'location': location}) - - for loc in location: - results = loc.split(" ") - properties['target_portal'] = results[0].split(",")[0] - properties['target_iqn'] = results[1] - - properties['target_lun'] = device_number - - properties['volume_id'] = volume['id'] - + "ISCSI properties: %(properties)s."), {'properties': properties}) LOG.info(_LI( - "ISCSI properties: %(properties)s"), {'properties': properties}) - LOG.info(_LI( - "ISCSI volume is: %(volume)s"), {'volume': volume}) + "ISCSI volume is: %(volume)s."), {'volume': volume}) if 'provider_auth' in volume: auth = volume['provider_auth'] LOG.info(_LI( - "AUTH properties: %(authProps)s"), {'authProps': auth}) + "AUTH properties: %(authProps)s."), {'authProps': auth}) if auth is not None: (auth_method, auth_username, auth_secret) = auth.split() @@ -405,7 +464,10 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): return self.common.manage_existing_get_size(volume, external_ref) def unmanage(self, volume): - """Export VMAX volume and leave volume intact on the backend array.""" + """Export VMAX volume from Cinder. + + Leave the volume intact on the backend array. + """ return self.common.unmanage(volume) def update_consistencygroup(self, context, group, diff --git a/cinder/volume/drivers/emc/emc_vmax_provision_v3.py b/cinder/volume/drivers/emc/emc_vmax_provision_v3.py index 0160a5828..ece84b050 100644 --- a/cinder/volume/drivers/emc/emc_vmax_provision_v3.py +++ b/cinder/volume/drivers/emc/emc_vmax_provision_v3.py @@ -704,9 +704,13 @@ class EMCVMAXProvisionV3(object): :param arrayInfo: the array dict :returns: totalCapacityGb :returns: remainingCapacityGb + :returns: subscribedCapacityGb + :returns: array_reserve_percent """ totalCapacityGb = -1 remainingCapacityGb = -1 + subscribedCapacityGb = -1 + array_reserve_percent = -1 storageSystemInstanceName = self.utils.find_storageSystem( conn, arrayInfo['SerialNumber']) @@ -735,6 +739,15 @@ class EMCVMAXProvisionV3(object): remainingCapacityGb = ( self.utils.convert_bits_to_gbs( remainingManagedSpace)) + elif properties[0] == 'EMCSubscribedCapacity': + cimProperties = properties[1] + subscribedManagedSpace = cimProperties.value + subscribedCapacityGb = ( + self.utils.convert_bits_to_gbs( + subscribedManagedSpace)) + elif properties[0] == 'EMCPercentReservedCapacity': + cimProperties = properties[1] + array_reserve_percent = int(cimProperties.value) except Exception: pass remainingSLOCapacityGb = ( @@ -751,7 +764,8 @@ class EMCVMAXProvisionV3(object): "not be what you expect."), {'remainingCapacityGb': remainingCapacityGb}) - return totalCapacityGb, remainingCapacityGb + return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb, + array_reserve_percent) def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName, arrayInfo, systemName): diff --git a/cinder/volume/drivers/emc/emc_vmax_utils.py b/cinder/volume/drivers/emc/emc_vmax_utils.py index 698351a14..2495e6fda 100644 --- a/cinder/volume/drivers/emc/emc_vmax_utils.py +++ b/cinder/volume/drivers/emc/emc_vmax_utils.py @@ -41,6 +41,8 @@ except ImportError: STORAGEGROUPTYPE = 4 POSTGROUPTYPE = 3 CLONE_REPLICATION_TYPE = 10 +SYNC_SNAPSHOT_LOCAL = 6 +ASYNC_SNAPSHOT_LOCAL = 7 MAX_POOL_LENGTH = 16 MAX_FASTPOLICY_LENGTH = 14 @@ -976,7 +978,8 @@ class EMCVMAXUtils(object): :param conn: connection to the ecom server :param poolName: string value of the storage pool name :param storageSystemName: the storage system name - :returns: tuple -- (total_capacity_gb, free_capacity_gb) + :returns: tuple -- (total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb) """ LOG.debug( "Retrieving capacity for pool %(poolName)s on array %(array)s.", @@ -995,10 +998,17 @@ class EMCVMAXUtils(object): poolInstanceName, LocalOnly=False) total_capacity_gb = self.convert_bits_to_gbs( storagePoolInstance['TotalManagedSpace']) - allocated_capacity_gb = self.convert_bits_to_gbs( + provisioned_capacity_gb = self.convert_bits_to_gbs( storagePoolInstance['EMCSubscribedCapacity']) - free_capacity_gb = total_capacity_gb - allocated_capacity_gb - return (total_capacity_gb, free_capacity_gb) + free_capacity_gb = self.convert_bits_to_gbs( + storagePoolInstance['RemainingManagedSpace']) + try: + array_max_over_subscription = self.get_ratio_from_max_sub_per( + storagePoolInstance['EMCMaxSubscriptionPercent']) + except KeyError: + array_max_over_subscription = 65534 + return (total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb, array_max_over_subscription) def get_pool_by_name(self, conn, storagePoolName, storageSystemName): """Returns the instance name associated with a storage pool name. @@ -1073,6 +1083,28 @@ class EMCVMAXUtils(object): return extraSpecs + def get_volumetype_qosspecs(self, volume, volumeTypeId=None): + """Get the qos specs. + + :param volume: the volume dictionary + :param volumeTypeId: Optional override for volume['volume_type_id'] + :returns: dict -- qosSpecs - the qos specs + """ + qosSpecs = {} + + try: + if volumeTypeId: + type_id = volumeTypeId + else: + type_id = volume['volume_type_id'] + if type_id is not None: + qosSpecs = volume_types.get_volume_type_qos_specs(type_id) + + except Exception: + LOG.debug("Unable to get QoS specifications.") + + return qosSpecs + def get_volume_type_name(self, volume): """Get the volume type name. @@ -1692,7 +1724,7 @@ class EMCVMAXUtils(object): return foundRepServCapability - def is_clone_licensed(self, conn, capabilityInstanceName): + def is_clone_licensed(self, conn, capabilityInstanceName, isV3): """Check if the clone feature is licensed and enabled. :param conn: the connection to the ecom server @@ -1709,10 +1741,19 @@ class EMCVMAXUtils(object): LOG.debug("Found supported replication types: " "%(repTypes)s", {'repTypes': repTypes}) - if CLONE_REPLICATION_TYPE in repTypes: - # Clone is a supported replication type. - LOG.debug("Clone is licensed and enabled.") - return True + if isV3: + if (SYNC_SNAPSHOT_LOCAL in repTypes or + ASYNC_SNAPSHOT_LOCAL in repTypes): + # Snapshot is a supported replication type. + LOG.debug("Snapshot for VMAX3 is licensed and " + "enabled.") + return True + else: + if CLONE_REPLICATION_TYPE in repTypes: + # Clone is a supported replication type. + LOG.debug("Clone for VMAX2 is licensed and " + "enabled.") + return True return False def create_storage_hardwareId_instance_name( @@ -2587,3 +2628,90 @@ class EMCVMAXUtils(object): sgInstanceName = self.find_storage_masking_group( conn, controllerConfigService, storageGroupName) return storageGroupName, controllerConfigService, sgInstanceName + + def get_ratio_from_max_sub_per(self, max_subscription_percent): + """Get ratio from max subscription percent if it exists. + + Check if the max subscription is set on the pool, if it is convert + it to a ratio. + + :param max_subscription_percent: max subscription percent + :returns: max_over_subscription_ratio + """ + if max_subscription_percent == '0': + return None + try: + max_subscription_percent_int = int(max_subscription_percent) + except ValueError: + LOG.error(_LE("Cannot convert max subscription percent to int.")) + return None + return float(max_subscription_percent_int) / 100 + + def override_ratio(self, max_over_sub_ratio, max_sub_ratio_from_per): + """Override ratio if necessary + + The over subscription ratio will be overriden if the max subscription + percent is less than the user supplied max oversubscription ratio. + + :param max_over_sub_ratio: user supplied over subscription ratio + :param max_sub_ratio_from_per: property on the pool + :returns: max_over_sub_ratio + """ + if max_over_sub_ratio: + try: + max_over_sub_ratio = max(float(max_over_sub_ratio), + float(max_sub_ratio_from_per)) + except ValueError: + max_over_sub_ratio = float(max_sub_ratio_from_per) + elif max_sub_ratio_from_per: + max_over_sub_ratio = float(max_sub_ratio_from_per) + + return max_over_sub_ratio + + def update_storagegroup_qos(self, conn, storagegroup, extraspecs): + """Update the storagegroupinstance with qos details. + + If MaxIOPS or maxMBPS is in extraspecs, then DistributionType can be + modified in addition to MaxIOPS or/and maxMBPS + If MaxIOPS or maxMBPS is NOT in extraspecs, we check to see if + either is set in StorageGroup. If so, then DistributionType can be + modified + + :param conn: connection to the ecom server + :param storagegroup: the storagegroup instance name + :param extraSpecs: extra specifications + """ + if type(storagegroup) is pywbem.cim_obj.CIMInstance: + storagegroupInstance = storagegroup + else: + storagegroupInstance = conn.GetInstance(storagegroup) + propertylist = [] + if 'maxIOPS' in extraspecs.get('qos'): + maxiops = self.get_num(extraspecs.get('qos').get('maxIOPS'), '32') + if maxiops != storagegroupInstance['EMCMaximumIO']: + storagegroupInstance['EMCMaximumIO'] = maxiops + propertylist.append('EMCMaximumIO') + if 'maxMBPS' in extraspecs.get('qos'): + maxmbps = self.get_num(extraspecs.get('qos').get('maxMBPS'), '32') + if maxmbps != storagegroupInstance['EMCMaximumBandwidth']: + storagegroupInstance['EMCMaximumBandwidth'] = maxmbps + propertylist.append('EMCMaximumBandwidth') + if 'DistributionType' in extraspecs.get('qos') and ( + propertylist or ( + storagegroupInstance['EMCMaximumBandwidth'] != 0) or ( + storagegroupInstance['EMCMaximumIO'] != 0)): + dynamicdict = {'never': 1, 'onfailure': 2, 'always': 3} + dynamicvalue = dynamicdict.get( + extraspecs.get('qos').get('DistributionType').lower()) + if dynamicvalue: + distributiontype = self.get_num(dynamicvalue, '16') + if distributiontype != ( + storagegroupInstance['EMCMaxIODynamicDistributionType'] + ): + storagegroupInstance['EMCMaxIODynamicDistributionType'] = ( + distributiontype) + propertylist.append('EMCMaxIODynamicDistributionType') + if propertylist: + modifiedInstance = conn.ModifyInstance(storagegroupInstance, + PropertyList=propertylist) + return modifiedInstance diff --git a/cinder/volume/drivers/emc/scaleio.py b/cinder/volume/drivers/emc/scaleio.py index 43412aec4..0369ee290 100644 --- a/cinder/volume/drivers/emc/scaleio.py +++ b/cinder/volume/drivers/emc/scaleio.py @@ -68,7 +68,15 @@ scaleio_opts = [ cfg.StrOpt('sio_storage_pool_name', help='Storage Pool name.'), cfg.StrOpt('sio_storage_pool_id', - help='Storage Pool ID.') + help='Storage Pool ID.'), + cfg.FloatOpt('sio_max_over_subscription_ratio', + # This option exists to provide a default value for the + # ScaleIO driver which is different than the global default. + help='max_over_subscription_ratio setting for the ScaleIO ' + 'driver. If set, this takes precedence over the ' + 'general max_over_subscription_ratio option. If ' + 'None, the general option is used.' + 'Maximum value allowed for ScaleIO is 10.0.') ] CONF.register_opts(scaleio_opts) @@ -77,7 +85,8 @@ STORAGE_POOL_NAME = 'sio:sp_name' STORAGE_POOL_ID = 'sio:sp_id' PROTECTION_DOMAIN_NAME = 'sio:pd_name' PROTECTION_DOMAIN_ID = 'sio:pd_id' -PROVISIONING_KEY = 'sio:provisioning_type' +PROVISIONING_KEY = 'provisioning:type' +OLD_PROVISIONING_KEY = 'sio:provisioning_type' IOPS_LIMIT_KEY = 'sio:iops_limit' BANDWIDTH_LIMIT = 'sio:bandwidth_limit' QOS_IOPS_LIMIT_KEY = 'maxIOPS' @@ -88,9 +97,12 @@ QOS_BANDWIDTH_PER_GB = 'maxBWSperGB' BLOCK_SIZE = 8 OK_STATUS_CODE = 200 VOLUME_NOT_FOUND_ERROR = 79 +# This code belongs to older versions of ScaleIO +OLD_VOLUME_NOT_FOUND_ERROR = 78 VOLUME_NOT_MAPPED_ERROR = 84 VOLUME_ALREADY_MAPPED_ERROR = 81 MIN_BWS_SCALING_SIZE = 128 +SIO_MAX_OVERSUBSCRIPTION_RATIO = 10.0 @interface.volumedriver @@ -98,6 +110,10 @@ class ScaleIODriver(driver.VolumeDriver): """EMC ScaleIO Driver.""" VERSION = "2.0" + + # ThirdPartySystems wiki + CI_WIKI_NAME = "EMC_ScaleIO_CI" + scaleio_qos_keys = (QOS_IOPS_LIMIT_KEY, QOS_BANDWIDTH_LIMIT, QOS_IOPS_PER_GB, QOS_BANDWIDTH_PER_GB) @@ -159,7 +175,9 @@ class ScaleIODriver(driver.VolumeDriver): LOG.info(_LI( "Default provisioning type: %(provisioning_type)s."), {'provisioning_type': self.provisioning_type}) - + if self.configuration.sio_max_over_subscription_ratio is not None: + self.configuration.max_over_subscription_ratio = ( + self.configuration.sio_max_over_subscription_ratio) self.connector = connector.InitiatorConnector.factory( connector.SCALEIO, utils.get_root_helper(), device_scan_attempts= @@ -222,6 +240,15 @@ class ScaleIODriver(driver.VolumeDriver): "sio_storage_pools.")) raise exception.InvalidInput(reason=msg) + if (self.configuration.max_over_subscription_ratio is not None and + (self.configuration.max_over_subscription_ratio - + SIO_MAX_OVERSUBSCRIPTION_RATIO > 1)): + msg = (_("Max over subscription is configured to %(ratio)1f " + "while ScaleIO support up to %(sio_ratio)s.") % + {'sio_ratio': SIO_MAX_OVERSUBSCRIPTION_RATIO, + 'ratio': self.configuration.max_over_subscription_ratio}) + raise exception.InvalidInput(reason=msg) + def _find_storage_pool_id_from_storage_type(self, storage_type): # Default to what was configured in configuration file if not defined. return storage_type.get(STORAGE_POOL_ID, @@ -242,8 +269,25 @@ class ScaleIODriver(driver.VolumeDriver): self.protection_domain_name) def _find_provisioning_type(self, storage_type): - return storage_type.get(PROVISIONING_KEY, - self.provisioning_type) + new_provisioning_type = storage_type.get(PROVISIONING_KEY) + old_provisioning_type = storage_type.get(OLD_PROVISIONING_KEY) + if new_provisioning_type is None and old_provisioning_type is not None: + LOG.info(_LI("Using sio:provisioning_type for defining " + "thin or thick volume will be deprecated in the " + "Ocata release of OpenStack. Please use " + "provisioning:type configuration option.")) + provisioning_type = old_provisioning_type + else: + provisioning_type = new_provisioning_type + + if provisioning_type is not None: + if provisioning_type not in ('thick', 'thin'): + msg = _("Illegal provisioning type. The supported " + "provisioning types are 'thick' or 'thin'.") + raise exception.VolumeBackendAPIException(data=msg) + return provisioning_type + else: + return self.provisioning_type def _find_limit(self, storage_type, qos_key, extraspecs_key): qos_limit = (storage_type.get(qos_key) @@ -601,6 +645,9 @@ class ScaleIODriver(driver.VolumeDriver): return size return size + num - (size % num) + def _round_down_to_num_gran(self, size, num=8): + return size - (size % num) + def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" volume_id = src_vref['provider_id'] @@ -781,18 +828,18 @@ class ScaleIODriver(driver.VolumeDriver): stats['vendor_name'] = 'EMC' stats['driver_version'] = self.VERSION stats['storage_protocol'] = 'scaleio' - stats['total_capacity_gb'] = 'unknown' - stats['free_capacity_gb'] = 'unknown' stats['reserved_percentage'] = 0 stats['QoS_support'] = True stats['consistencygroup_support'] = True - + stats['thick_provisioning_support'] = True + stats['thin_provisioning_support'] = True pools = [] verify_cert = self._get_verify_cert() - max_free_capacity = 0 + free_capacity = 0 total_capacity = 0 + provisioned_capacity = 0 for sp_name in self.storage_pools: splitted_name = sp_name.split(':') @@ -874,8 +921,11 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/types/StoragePool/instances/action/" "querySelectedStatistics") % req_vars + # The 'Km' in thinCapacityAllocatedInKm is a bug in REST API params = {'ids': [pool_id], 'properties': [ - "capacityInUseInKb", "capacityLimitInKb"]} + "capacityAvailableForVolumeAllocationInKb", + "capacityLimitInKb", "spareCapacityInKb", + "thickCapacityInUseInKb", "thinCapacityAllocatedInKm"]} r = requests.post( request, data=json.dumps(params), @@ -887,37 +937,50 @@ class ScaleIODriver(driver.VolumeDriver): response = r.json() LOG.info(_LI("Query capacity stats response: %s."), response) for res in response.values(): - capacityInUse = res['capacityInUseInKb'] - capacityLimit = res['capacityLimitInKb'] - total_capacity_gb = capacityLimit / units.Mi - used_capacity_gb = capacityInUse / units.Mi - free_capacity_gb = total_capacity_gb - used_capacity_gb + # Divide by two because ScaleIO creates a copy for each volume + total_capacity_kb = ( + (res['capacityLimitInKb'] - res['spareCapacityInKb']) / 2) + total_capacity_gb = (self._round_down_to_num_gran + (total_capacity_kb / units.Mi)) + # This property is already rounded + # to 8 GB granularity in backend + free_capacity_gb = ( + res['capacityAvailableForVolumeAllocationInKb'] / units.Mi) + # Divide by two because ScaleIO creates a copy for each volume + provisioned_capacity = ( + ((res['thickCapacityInUseInKb'] + + res['thinCapacityAllocatedInKm']) / 2) / units.Mi) LOG.info(_LI( "free capacity of pool %(pool)s is: %(free)s, " - "total capacity: %(total)s."), + "total capacity: %(total)s, " + "provisioned capacity: %(prov)s"), {'pool': pool_name, 'free': free_capacity_gb, - 'total': total_capacity_gb}) + 'total': total_capacity_gb, + 'prov': provisioned_capacity}) pool = {'pool_name': sp_name, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'QoS_support': True, 'consistencygroup_support': True, - 'reserved_percentage': 0 + 'reserved_percentage': 0, + 'thin_provisioning_support': True, + 'thick_provisioning_support': True, + 'provisioned_capacity_gb': provisioned_capacity, + 'max_over_subscription_ratio': + self.configuration.max_over_subscription_ratio } pools.append(pool) - if free_capacity_gb > max_free_capacity: - max_free_capacity = free_capacity_gb - total_capacity = total_capacity + total_capacity_gb + free_capacity += free_capacity_gb + total_capacity += total_capacity_gb - # Use zero capacities here so we always use a pool. stats['total_capacity_gb'] = total_capacity - stats['free_capacity_gb'] = max_free_capacity + stats['free_capacity_gb'] = free_capacity LOG.info(_LI( "Free capacity for backend is: %(free)s, total capacity: " "%(total)s."), - {'free': max_free_capacity, + {'free': free_capacity, 'total': total_capacity}) stats['pools'] = pools @@ -970,8 +1033,8 @@ class ScaleIODriver(driver.VolumeDriver): connection_properties = dict(self.connection_properties) connection_properties['scaleIO_volname'] = self._id_to_base64( volume.id) + connection_properties['scaleIO_volume_id'] = volume.provider_id device_info = self.connector.connect_volume(connection_properties) - return device_info['path'] def _sio_detach_volume(self, volume): @@ -980,6 +1043,7 @@ class ScaleIODriver(driver.VolumeDriver): connection_properties = dict(self.connection_properties) connection_properties['scaleIO_volname'] = self._id_to_base64( volume.id) + connection_properties['scaleIO_volume_id'] = volume.provider_id self.connector.disconnect_volume(connection_properties, volume) def copy_image_to_volume(self, context, volume, image_service, image_id): @@ -1078,10 +1142,16 @@ class ScaleIODriver(driver.VolumeDriver): if r.status_code != OK_STATUS_CODE: response = r.json() - msg = (_("Error renaming volume %(vol)s: %(err)s.") % - {'vol': vol_id, 'err': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + if ((response['errorCode'] == VOLUME_NOT_FOUND_ERROR or + response['errorCode'] == OLD_VOLUME_NOT_FOUND_ERROR)): + LOG.info(_LI("Ignoring renaming action because the volume " + "%(vol)s is not a ScaleIO volume."), + {'vol': vol_id}) + else: + msg = (_("Error renaming volume %(vol)s: %(err)s.") % + {'vol': vol_id, 'err': response['message']}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI("ScaleIO volume %(vol)s was renamed to " "%(new_name)s."), diff --git a/cinder/volume/drivers/emc/vnx/adapter.py b/cinder/volume/drivers/emc/vnx/adapter.py index bfef035b9..2a10f68fe 100644 --- a/cinder/volume/drivers/emc/vnx/adapter.py +++ b/cinder/volume/drivers/emc/vnx/adapter.py @@ -41,8 +41,7 @@ LOG = logging.getLogger(__name__) class CommonAdapter(object): - VERSION = '08.00.00' - VENDOR = 'EMC' + VERSION = None def __init__(self, configuration, active_backend_id): self.config = configuration @@ -716,8 +715,6 @@ class CommonAdapter(object): stats = self.get_enabler_stats() stats['pools'] = self.get_pool_stats(stats) stats['storage_protocol'] = self.config.storage_protocol - stats['driver_version'] = self.VERSION - stats['vendor_name'] = self.VENDOR self.append_replication_stats(stats) return stats diff --git a/cinder/volume/drivers/emc/vnx/driver.py b/cinder/volume/drivers/emc/vnx/driver.py index d9193497d..e738da3d5 100644 --- a/cinder/volume/drivers/emc/vnx/driver.py +++ b/cinder/volume/drivers/emc/vnx/driver.py @@ -75,6 +75,11 @@ class EMCVNXDriver(driver.TransferVD, 8.0.0 - New VNX Cinder driver """ + VERSION = '08.00.00' + VENDOR = 'EMC' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "EMC_VNX_CI" + def __init__(self, *args, **kwargs): super(EMCVNXDriver, self).__init__(*args, **kwargs) utils.init_ops(self.configuration) @@ -89,6 +94,7 @@ class EMCVNXDriver(driver.TransferVD, else: self.adapter = adapter.ISCSIAdapter(self.configuration, self.active_backend_id) + self.adapter.VERSION = self.VERSION self.adapter.do_setup() def check_for_setup_error(self): @@ -220,6 +226,8 @@ class EMCVNXDriver(driver.TransferVD, """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") self._stats = self.adapter.update_volume_stats() + self._stats['driver_version'] = self.VERSION + self._stats['vendor_name'] = self.VENDOR def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. diff --git a/cinder/volume/drivers/emc/xtremio.py b/cinder/volume/drivers/emc/xtremio.py index 5d7b503f6..20541f8d8 100644 --- a/cinder/volume/drivers/emc/xtremio.py +++ b/cinder/volume/drivers/emc/xtremio.py @@ -45,7 +45,6 @@ from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import interface -from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import driver @@ -361,6 +360,10 @@ class XtremIOVolumeDriver(san.SanDriver): """Executes commands relating to Volumes.""" VERSION = '1.0.7' + + # ThirdPartySystems wiki + CI_WIKI_NAME = "EMC_XIO_CI" + driver_name = 'XtremIO' MIN_XMS_VERSION = [3, 0, 0] @@ -469,6 +472,27 @@ class XtremIOVolumeDriver(san.SanDriver): except exception.NotFound: LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id) + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + # as the volume name is used to id the volume we need to rename it + name_id = None + provider_location = None + current_name = new_volume['id'] + original_name = volume['id'] + try: + data = {'name': original_name} + self.client.req('volumes', 'PUT', data, name=current_name) + except exception.VolumeBackendAPIException: + LOG.error(_LE('Unable to rename the logical volume ' + 'for volume: %s'), original_name) + # If the rename fails, _name_id should be set to the new + # volume id and provider_location should be set to the + # one from the new volume as well. + name_id = new_volume['_name_id'] or new_volume['id'] + provider_location = new_volume['provider_location'] + + return {'_name_id': name_id, 'provider_location': provider_location} + def _update_volume_stats(self): sys = self.client.get_cluster() physical_space = int(sys["ud-ssd-space"]) / units.Mi @@ -502,22 +526,33 @@ class XtremIOVolumeDriver(san.SanDriver): self._update_volume_stats() return self._stats - def manage_existing(self, volume, existing_ref): + def manage_existing(self, volume, existing_ref, is_snapshot=False): """Manages an existing LV.""" lv_name = existing_ref['source-name'] # Attempt to locate the volume. try: vol_obj = self.client.req('volumes', name=lv_name)['content'] + if ( + is_snapshot and + (not vol_obj['ancestor-vol-id'] or + vol_obj['ancestor-vol-id'][XTREMIO_OID_NAME] != + volume.volume_id)): + kwargs = {'existing_ref': lv_name, + 'reason': 'Not a snapshot of vol %s' % + volume.volume_id} + raise exception.ManageExistingInvalidReference(**kwargs) except exception.NotFound: kwargs = {'existing_ref': lv_name, - 'reason': 'Specified logical volume does not exist.'} + 'reason': 'Specified logical %s does not exist.' % + 'snapshot' if is_snapshot else 'volume'} raise exception.ManageExistingInvalidReference(**kwargs) # Attempt to rename the LV to match the OpenStack internal name. self.client.req('volumes', 'PUT', data={'vol-name': volume['id']}, idx=vol_obj['index']) - def manage_existing_get_size(self, volume, existing_ref): + def manage_existing_get_size(self, volume, existing_ref, + is_snapshot=False): """Return size of an existing LV for manage_existing.""" # Check that the reference is valid if 'source-name' not in existing_ref: @@ -530,26 +565,37 @@ class XtremIOVolumeDriver(san.SanDriver): vol_obj = self.client.req('volumes', name=lv_name)['content'] except exception.NotFound: kwargs = {'existing_ref': lv_name, - 'reason': 'Specified logical volume does not exist.'} + 'reason': 'Specified logical %s does not exist.' % + 'snapshot' if is_snapshot else 'volume'} raise exception.ManageExistingInvalidReference(**kwargs) # LV size is returned in gigabytes. Attempt to parse size as a float # and round up to the next integer. - lv_size = int(math.ceil(int(vol_obj['vol-size']) / units.Mi)) + lv_size = int(math.ceil(float(vol_obj['vol-size']) / units.Mi)) return lv_size - def unmanage(self, volume): + def unmanage(self, volume, is_snapshot=False): """Removes the specified volume from Cinder management.""" # trying to rename the volume to [cinder name]-unmanged try: self.client.req('volumes', 'PUT', name=volume['id'], data={'vol-name': volume['name'] + '-unmanged'}) except exception.NotFound: - LOG.info(_LI("Volume with the name %s wasn't found," - " can't unmanage"), - volume['id']) + LOG.info(_LI("%(typ)s with the name %(name)s wasn't found, " + "can't unmanage") % + {'typ': 'Snapshot' if is_snapshot else 'Volume', + 'name': volume['id']}) raise exception.VolumeNotFound(volume_id=volume['id']) + def manage_existing_snapshot(self, snapshot, existing_ref): + self.manage_existing(snapshot, existing_ref, True) + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + return self.manage_existing_get_size(snapshot, existing_ref, True) + + def unmanage_snapshot(self, snapshot): + self.unmanage(snapshot, True) + def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" data = {'vol-size': six.text_type(new_size) + 'g'} @@ -732,30 +778,14 @@ class XtremIOVolumeDriver(san.SanDriver): 'snapshot-set-name': self._get_cgsnap_name(cgsnapshot)} self.client.req('snapshots', 'POST', data, ver='v2') - snapshots = objects.SnapshotList().get_all_for_cgsnapshot( - context, cgsnapshot['id']) - - for snapshot in snapshots: - snapshot.status = fields.SnapshotStatus.AVAILABLE - - model_update = {'status': 'available'} - - return model_update, snapshots + return None, None def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" - self.client.req('snapshot-sets', 'DELETE', + self.client.req('snapshot-sets', fields.SnapshotStatus.DELETED, name=self._get_cgsnap_name(cgsnapshot), ver='v2') - snapshots = objects.SnapshotList().get_all_for_cgsnapshot( - context, cgsnapshot['id']) - - for snapshot in snapshots: - snapshot.status = fields.SnapshotStatus.DELETED - - model_update = {'status': cgsnapshot.status} - - return model_update, snapshots + return None, None def _get_ig(self, name): try: diff --git a/cinder/volume/drivers/eqlx.py b/cinder/volume/drivers/eqlx.py index 81e1f2b64..4e2f24a6b 100644 --- a/cinder/volume/drivers/eqlx.py +++ b/cinder/volume/drivers/eqlx.py @@ -1,5 +1,5 @@ # Copyright (c) 2013 Dell Inc. -# Copyright 2013 OpenStack LLC +# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -51,6 +51,7 @@ eqlx_opts = [ 'specified in cinder/volume/drivers/san/san.py ' 'and will be removed in M release.'), cfg.IntOpt('eqlx_cli_max_retries', + min=0, default=5, help='Maximum retry count for reconnection. Default is 5.'), cfg.BoolOpt('eqlx_use_chap', @@ -162,6 +163,9 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver): VERSION = "1.3.0" + # ThirdPartySytems wiki page + CI_WIKI_NAME = "Dell_Storage_CI" + def __init__(self, *args, **kwargs): super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(eqlx_opts) @@ -292,10 +296,6 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver): def check_for_setup_error(self): super(DellEQLSanISCSIDriver, self).check_for_setup_error() - if self.configuration.eqlx_cli_max_retries < 0: - raise exception.InvalidInput( - reason=_("eqlx_cli_max_retries must be greater than or " - "equal to 0")) def _eql_execute(self, *args, **kwargs): return self._run_ssh( diff --git a/cinder/volume/drivers/falconstor/__init__.py b/cinder/volume/drivers/falconstor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/volume/drivers/falconstor/fc.py b/cinder/volume/drivers/falconstor/fc.py new file mode 100644 index 000000000..e79cb32c9 --- /dev/null +++ b/cinder/volume/drivers/falconstor/fc.py @@ -0,0 +1,113 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fibre channel Cinder volume driver for FalconStor FSS storage system. + +This driver requires FSS-8.00-8865 or later. +""" + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _, _LE +from cinder import interface +import cinder.volume.driver +from cinder.volume.drivers.falconstor import fss_common +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class FSSFCDriver(fss_common.FalconstorBaseDriver, + cinder.volume.driver.FibreChannelDriver): + """Implements commands for FalconStor FSS FC management. + + To enable the driver add the following line to the cinder configuration: + volume_driver=cinder.volume.drivers.falconstor.fc.FSSFCDriver + + Version history: + 1.0.0 - Initial driver + + """ + + VERSION = '1.0.0' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "FalconStor_CI" + + def __init__(self, *args, **kwargs): + super(FSSFCDriver, self).__init__(*args, **kwargs) + self.gateway_fc_wwns = [] + self._storage_protocol = "FC" + self._backend_name = ( + self.configuration.safe_get('volume_backend_name') or + self.__class__.__name__) + self._lookup_service = fczm_utils.create_lookup_service() + + def do_setup(self, context): + """Any initialization the driver does while starting.""" + super(FSSFCDriver, self).do_setup(context) + self.gateway_fc_wwns = self.proxy.list_fc_target_wwpn() + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + super(FSSFCDriver, self).check_for_setup_error() + if len(self.gateway_fc_wwns) == 0: + msg = _('No FC targets found') + raise exception.InvalidHost(reason=msg) + + def validate_connector(self, connector): + """Check connector for at least one enabled FC protocol.""" + if 'FC' == self._storage_protocol and 'wwpns' not in connector: + LOG.error(_LE('The connector does not contain the required ' + 'information.')) + raise exception.InvalidConnectorException(missing='wwpns') + + @fczm_utils.AddFCZone + def initialize_connection(self, volume, connector): + fss_hosts = [] + fss_hosts.append(self.configuration.san_ip) + target_info = self.proxy.fc_initialize_connection(volume, connector, + fss_hosts) + init_targ_map = self._build_initiator_target_map( + target_info['available_initiator']) + + fc_info = {'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': int(target_info['lun']), + 'target_discovered': True, + 'target_wwn': self.gateway_fc_wwns, + 'initiator_target_map': init_targ_map, + 'volume_id': volume['id'], + } + } + return fc_info + + def _build_initiator_target_map(self, initiator_wwns): + """Build the target_wwns and the initiator target map.""" + init_targ_map = dict.fromkeys(initiator_wwns, self.gateway_fc_wwns) + return init_targ_map + + @fczm_utils.RemoveFCZone + def terminate_connection(self, volume, connector, **kwargs): + host_id = self.proxy.fc_terminate_connection(volume, connector) + fc_info = {"driver_volume_type": "fibre_channel", "data": {}} + if self.proxy._check_fc_host_devices_empty(host_id): + available_initiator, fc_initiators_info = ( + self.proxy._get_fc_client_initiators(connector)) + init_targ_map = self._build_initiator_target_map( + available_initiator) + fc_info["data"] = {"target_wwn": self.gateway_fc_wwns, + "initiator_target_map": init_targ_map} + return fc_info diff --git a/cinder/volume/drivers/falconstor/fss_common.py b/cinder/volume/drivers/falconstor/fss_common.py new file mode 100644 index 000000000..9d8c9739a --- /dev/null +++ b/cinder/volume/drivers/falconstor/fss_common.py @@ -0,0 +1,399 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Volume driver for FalconStor FSS storage system. + +This driver requires FSS-8.00-8865 or later. +""" + +import math +import re + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units +import six + +from cinder import exception +from cinder.i18n import _, _LE, _LI, _LW +from cinder.image import image_utils +from cinder.volume.drivers.falconstor import rest_proxy +from cinder.volume.drivers.san import san + +LOG = logging.getLogger(__name__) + +FSS_OPTS = [ + cfg.IntOpt('fss_pool', + default='', + help='FSS pool id in which FalconStor volumes are stored.'), + cfg.BoolOpt('fss_debug', + default=False, + help="Enable HTTP debugging to FSS"), + cfg.StrOpt('additional_retry_list', + default='', + help='FSS additional retry list, separate by ;') +] + +CONF = cfg.CONF +CONF.register_opts(FSS_OPTS) + + +class FalconstorBaseDriver(san.SanDriver): + + def __init__(self, *args, **kwargs): + super(FalconstorBaseDriver, self).__init__(*args, **kwargs) + if self.configuration: + self.configuration.append_config_values(FSS_OPTS) + + self.proxy = rest_proxy.RESTProxy(self.configuration) + self._backend_name = ( + self.configuration.safe_get('volume_backend_name') or 'FalconStor') + self._storage_protocol = 'iSCSI' + + def do_setup(self, context): + self.proxy.do_setup() + LOG.info(_LI('Activate FalconStor cinder volume driver.')) + + def check_for_setup_error(self): + if self.proxy.session_id is None: + msg = (_('FSS cinder volume driver not ready: Unable to determine ' + 'session id.')) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.configuration.fss_pool: + msg = _('Pool is not available in the cinder configuration ' + 'fields.') + raise exception.InvalidHost(reason=msg) + + self._pool_checking(self.configuration.fss_pool) + + def _pool_checking(self, pool_id): + pool_count = 0 + try: + output = self.proxy.list_pool_info(pool_id) + if "name" in output['data']: + pool_count = len(re.findall(rest_proxy.GROUP_PREFIX, + output['data']['name'])) + if pool_count is 0: + msg = (_('The given pool info must include the storage pool ' + 'and naming start with OpenStack-')) + raise exception.VolumeBackendAPIException(data=msg) + except Exception: + msg = (_('Unexpected exception during pool checking.')) + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _check_multipath(self): + if self.configuration.use_multipath_for_image_xfer: + if not self.configuration.san_secondary_ip: + msg = (_('The san_secondary_ip param is null.')) + raise exception.VolumeBackendAPIException(data=msg) + output = self.proxy._check_iocluster_state() + if not output: + msg = (_('FSS do not support multipathing.')) + raise exception.VolumeBackendAPIException(data=msg) + return output + else: + return False + + def create_volume(self, volume): + """Creates a volume. + + We use the metadata of the volume to create variety volume. + + Create a thin provisioned volume : + [Usage] create --volume-type FSS --metadata thinprovisioned=true + thinsize= + + Create a LUN that is a Timeview of another LUN at a specified CDP tag: + [Usage] create --volume-type FSS --metadata timeview= + cdptag= volume-size + + Create a LUN that is a Timeview of another LUN at a specified Timemark: + [Usage] create --volume-type FSS --metadata timeview= + rawtimestamp= volume-size + + """ + + volume_metadata = self._get_volume_metadata(volume) + if not volume_metadata: + volume_name, fss_metadata = self.proxy.create_vdev(volume) + else: + if ("timeview" in volume_metadata and + ("cdptag" in volume_metadata) or + ("rawtimestamp" in volume_metadata)): + volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag( + volume_metadata, volume) + elif ("thinprovisioned" in volume_metadata and + "thinsize" in volume_metadata): + volume_name, fss_metadata = self.proxy.create_thin_vdev( + volume_metadata, volume) + else: + volume_name, fss_metadata = self.proxy.create_vdev(volume) + fss_metadata.update(volume_metadata) + + if type(volume['metadata']) is dict: + fss_metadata.update(volume['metadata']) + if volume['consistencygroup_id']: + self.proxy._add_volume_to_consistency_group( + volume['consistencygroup_id'], + volume_name + ) + return {'metadata': fss_metadata} + + def _get_volume_metadata(self, volume): + volume_metadata = {} + if 'volume_metadata' in volume: + for metadata in volume['volume_metadata']: + volume_metadata[metadata['key']] = metadata['value'] + return volume_metadata + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + new_vol_name = self.proxy._get_fss_volume_name(volume) + src_name = self.proxy._get_fss_volume_name(src_vref) + vol_size = volume["size"] + src_size = src_vref["size"] + fss_metadata = self.proxy.clone_volume(new_vol_name, src_name) + self.proxy.extend_vdev(new_vol_name, src_size, vol_size) + + if volume['consistencygroup_id']: + self.proxy._add_volume_to_consistency_group( + volume['consistencygroup_id'], + new_vol_name + ) + volume_metadata = self._get_volume_metadata(volume) + fss_metadata.update(volume_metadata) + + if type(volume['metadata']) is dict: + fss_metadata.update(volume['metadata']) + return {'metadata': fss_metadata} + + def extend_volume(self, volume, new_size): + """Extend volume to new_size.""" + volume_name = self.proxy._get_fss_volume_name(volume) + self.proxy.extend_vdev(volume_name, volume["size"], new_size) + + def delete_volume(self, volume): + """Disconnect all hosts and delete the volume""" + try: + self.proxy.delete_vdev(volume) + except rest_proxy.FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.warning(_LW("Volume deletion failed with message: %s"), + err.reason) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + snap_metadata = snapshot["metadata"] + metadata = self.proxy.create_snapshot(snapshot) + snap_metadata.update(metadata) + return {'metadata': snap_metadata} + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + try: + self.proxy.delete_snapshot(snapshot) + except rest_proxy.FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.error( + _LE("Snapshot deletion failed with message: %s"), + err.reason) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + volume_name, fss_metadata = self.proxy.create_volume_from_snapshot( + volume, snapshot) + + if vol_size != snap_size: + try: + extend_volume_name = self.proxy._get_fss_volume_name(volume) + self.proxy.extend_vdev(extend_volume_name, snap_size, vol_size) + except rest_proxy.FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.error(_LE( + "Resizing %(id)s failed with message: %(msg)s. " + "Cleaning volume."), {'id': volume["id"], + 'msg': err.reason}) + + if type(volume['metadata']) is dict: + fss_metadata.update(volume['metadata']) + + if volume['consistencygroup_id']: + self.proxy._add_volume_to_consistency_group( + volume['consistencygroup_id'], + volume_name) + return {'metadata': fss_metadata} + + def ensure_export(self, context, volume): + pass + + def create_export(self, context, volume, connector): + pass + + def remove_export(self, context, volume): + pass + + # Attach/detach volume to instance/host + def attach_volume(self, context, volume, instance_uuid, host_name, + mountpoint): + pass + + def detach_volume(self, context, volume, attachment=None): + pass + + def get_volume_stats(self, refresh=False): + total_capacity = 0 + free_space = 0 + if refresh: + try: + info = self.proxy._get_pools_info() + if info: + total_capacity = int(info['total_capacity_gb']) + used_space = int(info['used_gb']) + free_space = int(total_capacity - used_space) + + data = {"vendor_name": "FalconStor", + "volume_backend_name": self._backend_name, + "driver_version": self.VERSION, + "storage_protocol": self._storage_protocol, + "total_capacity_gb": total_capacity, + "free_capacity_gb": free_space, + "reserved_percentage": 0, + "consistencygroup_support": True + } + + self._stats = data + + except Exception as exc: + LOG.error(_LE('Cannot get volume status %(exc)s.'), + {'exc': exc}) + return self._stats + + def create_consistencygroup(self, context, group): + """Creates a consistencygroup.""" + self.proxy.create_group(group) + model_update = {'status': 'available'} + return model_update + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group.""" + self.proxy.destroy_group(group) + volume_updates = [] + for volume in volumes: + self.delete_volume(volume) + volume_updates.append({ + 'id': volume.id, + 'status': 'deleted' + }) + + model_update = {'status': group['status']} + return model_update, volume_updates + + def update_consistencygroup(self, context, group, + add_volumes=None, remove_volumes=None): + addvollist = [] + remvollist = [] + if add_volumes: + for volume in add_volumes: + addvollist.append(self.proxy._get_fss_volume_name(volume)) + if remove_volumes: + for volume in remove_volumes: + remvollist.append(self.proxy._get_fss_volume_name(volume)) + + self.proxy.set_group(group['id'], addvollist=addvollist, + remvollist=remvollist) + return None, None, None + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a cgsnapshot.""" + cgsnapshot_id = cgsnapshot['id'] + try: + self.proxy.create_cgsnapshot(cgsnapshot) + except Exception as e: + msg = _('Failed to create cg snapshot %(id)s ' + 'due to %(reason)s.') % {'id': cgsnapshot_id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + snapshot_updates = [] + for snapshot in snapshots: + snapshot_updates.append({ + 'id': snapshot.id, + 'status': 'available' + }) + model_update = {'status': 'available'} + return model_update, snapshot_updates + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a cgsnapshot.""" + cgsnapshot_id = cgsnapshot.id + try: + self.proxy.delete_cgsnapshot(cgsnapshot) + except Exception as e: + msg = _('Failed to delete cgsnapshot %(id)s ' + 'due to %(reason)s.') % {'id': cgsnapshot_id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + snapshot_updates = [] + for snapshot in snapshots: + snapshot_updates.append({ + 'id': snapshot.id, + 'status': 'deleted', + }) + model_update = {'status': cgsnapshot.status} + return model_update, snapshot_updates + + def manage_existing(self, volume, existing_ref): + """Convert an existing FSS volume to a Cinder volume. + + We expect a volume id in the existing_ref that matches one in FSS. + """ + volume_metadata = {} + self.proxy._get_existing_volume_ref_vid(existing_ref) + self.proxy._manage_existing_volume(existing_ref['source-id'], volume) + volume_metadata['FSS-vid'] = existing_ref['source-id'] + updates = {'metadata': volume_metadata} + return updates + + def manage_existing_get_size(self, volume, existing_ref): + """Get size of an existing FSS volume. + + We expect a volume id in the existing_ref that matches one in FSS. + """ + sizemb = self.proxy._get_existing_volume_ref_vid(existing_ref) + size = int(math.ceil(float(sizemb) / units.Ki)) + return size + + def unmanage(self, volume): + """Remove Cinder management from FSS volume""" + self.proxy.unmanage(volume) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + with image_utils.temporary_file() as tmp: + image_utils.fetch_verify_image(context, image_service, + image_id, tmp) + image_utils.fetch_to_raw(context, + image_service, + image_id, + tmp, + self.configuration.volume_dd_blocksize, + size=volume['size']) diff --git a/cinder/volume/drivers/falconstor/iscsi.py b/cinder/volume/drivers/falconstor/iscsi.py new file mode 100644 index 000000000..4f96c2713 --- /dev/null +++ b/cinder/volume/drivers/falconstor/iscsi.py @@ -0,0 +1,105 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Volume driver for FalconStor FSS storage system. + +This driver requires FSS-8.00-8865 or later. +""" + +from cinder import interface +import cinder.volume.driver +from cinder.volume.drivers.falconstor import fss_common + +DEFAULT_ISCSI_PORT = 3260 + + +@interface.volumedriver +class FSSISCSIDriver(fss_common.FalconstorBaseDriver, + cinder.volume.driver.ISCSIDriver): + + """Implements commands for FalconStor FSS ISCSI management. + + To enable the driver add the following line to the cinder configuration: + volume_driver=cinder.volume.drivers.falconstor.iscsi.FSSISCSIDriver + + Version history: + 1.0.0 - Initial driver + 1.0.1 - Fix copy_image_to_volume error. + 1.0.2 - Closes-Bug #1554184, add lun id type conversion in + initialize_connection + 1.03 - merge source code + 1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume() + metadata TypeError + 2.0.0 - Mitaka driver + -- fixed consisgroup commands error. + 2.0.1 -- fixed bugs + 2.0.2 -- support Multipath + 3.0.0 - Newton driver + + """ + + VERSION = '3.0.0' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "FalconStor_CI" + + def __init__(self, *args, **kwargs): + super(FSSISCSIDriver, self).__init__(*args, **kwargs) + self._storage_protocol = "iSCSI" + self._backend_name = ( + self.configuration.safe_get('volume_backend_name') or + self.__class__.__name__) + + def initialize_connection(self, volume, connector, initiator_data=None): + fss_hosts = [] + target_portal = [] + multipath = connector.get('multipath', False) + fss_hosts.append(self.configuration.san_ip) + + if multipath: + if self._check_multipath(): + fss_hosts.append(self.configuration.san_secondary_ip) + else: + multipath = False + + for host in fss_hosts: + iscsi_ip_port = "%s:%d" % (host, DEFAULT_ISCSI_PORT) + target_portal.append(iscsi_ip_port) + + target_info = self.proxy.initialize_connection_iscsi(volume, + connector, + fss_hosts) + properties = {} + properties['target_discovered'] = True + properties['discard'] = True + properties['encrypted'] = False + properties['qos_specs'] = None + properties['access_mode'] = 'rw' + properties['volume_id'] = volume['id'] + properties['target_iqn'] = target_info['iqn'] + properties['target_portal'] = target_portal[0] + properties['target_lun'] = int(target_info['lun']) + + if multipath: + properties['target_iqns'] = [target_info['iqn'], + target_info['iqn']] + properties['target_portals'] = target_portal + properties['target_luns'] = [int(target_info['lun']), + int(target_info['lun'])] + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate connection.""" + self.proxy.terminate_connection_iscsi(volume, connector) diff --git a/cinder/volume/drivers/falconstor/rest_proxy.py b/cinder/volume/drivers/falconstor/rest_proxy.py new file mode 100644 index 000000000..1604963b5 --- /dev/null +++ b/cinder/volume/drivers/falconstor/rest_proxy.py @@ -0,0 +1,1530 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import json +import random +import time +import uuid + +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units +from six.moves import http_client + +from cinder import exception +from cinder.i18n import _, _LI, _LW + + +FSS_BATCH = 'batch' +FSS_PHYSICALRESOURCE = 'physicalresource' +FSS_PHYSICALADAPTER = 'physicaladapter' +FSS_FCCLIENTINITIATORS = 'fcclientinitiators' +FSS_FC_TGT_WWPN = 'fctgtwwpn' +FSS_STORAGE_POOL = 'storagepool' +FSS_LOGICALRESOURCE = 'logicalresource' +FSS_SAN = 'sanresource' +FSS_MIRROR = 'mirror' +FSS_TIMEMARKPOLICY = 'timemarkpolicy' +FSS_TIMEMARK = 'timemark' +FSS_TIMEVIEW = 'timeview' +FSS_SNAPSHOT_RESOURCE = 'snapshotresource' +FSS_SNAPSHOT_GROUP = 'snapshotgroup' +FSS_CLIENT = 'client' +FSS_SANCLIENT = 'sanclient' +FSS_ISCSI_TARGET = 'iscsitarget' +FSS_ISCSI_CLIENT_INITIATORS = 'iscsiclientinitiators' +FSS_SERVER = 'server' +FSS_OPTIONS = 'options' +FSS_PORTAL = 'defaultiscsiportal' +FSS_PROPERTIES = 'properties' +FSS_HOST = 'host' +FSS_RETURN_CODE = 'rcs' +FSS_AUTH = 'auth' +FSS_LOGIN = 'login' +FSS_SINGLE_TYPE = 'single' + + +POST = 'POST' +GET = 'GET' +PUT = 'PUT' +DELETE = 'DELETE' +GROUP_PREFIX = 'OpenStack-' +PRODUCT_NAME = 'ipstor' +SESSION_COOKIE_NAME = 'session_id' +RETRY_LIST = ['107', '2147680512'] + +MAXSNAPSHOTS = 1000 +OPERATION_TIMEOUT = 60 * 60 +RETRY_CNT = 5 +RETRY_INTERVAL = 15 + +LOG = logging.getLogger(__name__) + + +class RESTProxy(object): + def __init__(self, config): + self.fss_host = config.san_ip + self.fss_username = config.san_login + self.fss_password = config.san_password + self.fss_defined_pool = config.fss_pool + if config.additional_retry_list: + RETRY_LIST.append(config.additional_retry_list) + + self.FSS = FSSRestCommon( + host=self.fss_host, + username=self.fss_username, + password=self.fss_password, + fss_debug=config.fss_debug) + self.session_id = None + + # naming + def _get_vol_name_from_snap(self, snapshot): + """Return the name of the snapshot that FSS will use.""" + return "cinder-%s" % snapshot["volume_id"] + + def _get_fss_volume_name(self, volume): + """Return the name of the volume FSS will use.""" + return "cinder-%s" % volume["id"] + + def _get_group_name_from_id(self, id): + return "cinder-consisgroup-%s" % id + + def _encode_name(self, name): + uuid_str = name.replace("-", "") + vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) + newuuid = (base64.urlsafe_b64encode(vol_uuid.bytes). + decode('utf-8').strip('=')) + return "cinder-%s" % newuuid + + def do_setup(self): + self.session_id = self.FSS.fss_login() + + def _convert_size_to_gb(self, size): + s = round(float(size) / units.Gi, 2) + if s > 0: + return s + else: + return 0 + + def _convert_size_to_mb(self, size): + return size * units.Ki + + def _get_pools_info(self): + qpools = [] + poolinfo = {} + try: + output = self.list_pool_info() + if "storagepools" in output['data']: + for item in output['data']['storagepools']: + if item['name'].startswith(GROUP_PREFIX) and ( + self.fss_defined_pool == item['id']): + poolid = int(item['id']) + qpools.append(poolid) + break + + if not qpools: + msg = _('The storage pool information is empty or not correct') + raise exception.DriverNotInitialized(msg) + + # Query pool detail information + for poolid in qpools: + output = self.list_pool_info(poolid) + poolinfo['pool_name'] = output['data']['name'] + poolinfo['total_capacity_gb'] = ( + self._convert_size_to_gb(output['data']['size'])) + poolinfo['used_gb'] = ( + self._convert_size_to_gb(output['data']['used'])) + poolinfo['QoS_support'] = False + poolinfo['reserved_percentage'] = 0 + except Exception: + msg = (_('Unexpected exception during get pools info.')) + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return poolinfo + + def list_pool_info(self, pool_id=None): + return self.FSS.list_pool_info(pool_id) + + def list_physicaladapter_info(self, adapter_id=None): + return self.FSS.list_physicaladapter_info(adapter_id) + + def _checking_adapter_type(self, id): + adapter_type = '' + output = self.list_physicaladapter_info() + if "physicaladapters" in output['data']: + physicaladapters = output['data']['physicaladapters'] + if physicaladapters['id'] == id: + adapter_type = physicaladapters['type'] + return adapter_type + + def create_vdev(self, volume): + sizemb = self._convert_size_to_mb(volume["size"]) + volume_name = self._get_fss_volume_name(volume) + params = dict(storagepoolid=self.fss_defined_pool, + category="virtual", + sizemb=sizemb, + name=volume_name) + return volume_name, self.FSS.create_vdev(params) + + def create_tv_from_cdp_tag(self, volume_metadata, volume): + tv_vid = '' + cdp_tag = '' + + if 'cdptag' in volume_metadata: + tv_vid = str(volume_metadata['timeview']) + '_0' + cdp_tag = str(volume_metadata['cdptag']) + + if 'rawtimestamp' in volume_metadata: + tv_vid = '{0}_{1}'.format(str(volume_metadata['timeview']), + str(volume_metadata['rawtimestamp'])) + volume_name = self._get_fss_volume_name(volume) + sizemb = self._convert_size_to_mb(volume['size']) + params = dict(name=volume_name, + storage=dict(storagepoolid=self.fss_defined_pool, + sizemb=sizemb), + automaticexpansion=dict(enabled=False), + timeviewcopy=True) + if cdp_tag: + params.update(cdpjournaltag=cdp_tag) + + metadata = self.FSS.create_timeview(tv_vid, params) + return volume_name, metadata + + def create_thin_vdev(self, volume_metadata, volume): + thin_size = 0 + size = volume["size"] + sizemb = self._convert_size_to_mb(size) + params = dict(storagepoolid=self.fss_defined_pool, + category="virtual") + + if 'thinprovisioned' in volume_metadata: + if volume_metadata['thinprovisioned'] is False: + msg = (_('If you want to create a thin provisioning volume,' + ' this param must be True.')) + raise exception.VolumeBackendAPIException(msg) + + if 'thinsize' in volume_metadata: + thin_size = int(volume_metadata['thinsize']) + + if size < 10: + msg = _('The resource is a FSS thin device, minimum size is ' + '10240 MB.') + raise exception.VolumeBackendAPIException(msg) + else: + try: + if thin_size > size: + msg = _('The allocated size must less than total size.') + raise exception.VolumeBackendAPIException(msg) + except Exception: + msg = _('The resource is a thin device, thin size is invalid.') + raise exception.VolumeBackendAPIException(msg) + + thin_size = self._convert_size_to_mb(thin_size) + thin_disk = dict( + enabled=True, + fullsizemb=sizemb) + params.update(thinprovisioning=thin_disk) + params.update(sizemb=thin_size) + + volume_name = self._get_fss_volume_name(volume) + params.update(name=volume_name) + return volume_name, self.FSS.create_vdev(params) + + def _get_fss_vid_from_name(self, volume_name, fss_type=None): + vid = [] + output = self.FSS.list_fss_volume_info() + try: + if "virtualdevices" in output['data']: + for item in output['data']['virtualdevices']: + if item['name'] in volume_name: + vid.append(item['id']) + except Exception: + msg = (_('Can not find cinder volume - %(volumeName)s') % + {"volumeName": volume_name}) + raise exception.VolumeBackendAPIException(msg) + + if fss_type is not None and fss_type == FSS_SINGLE_TYPE: + vid = ''.join(str(x) for x in vid) + return vid + + def _get_fss_gid_from_name(self, group_name): + gid = '' + output = self.FSS.list_group_info() + if "snapshotgroups" in output['data']: + for item in output['data']['snapshotgroups']: + if item['name'] == group_name: + gid = item['id'] + break + if gid == '': + msg = (_('Can not find consistency group: %s.') % group_name) + raise exception.VolumeBackendAPIException(msg) + return gid + + def _get_fss_group_membercount(self, gid): + membercount = 0 + output = self.FSS.list_group_info(gid) + if "membercount" in output['data']: + membercount = output['data']['membercount'] + return membercount + + def _get_vdev_id_from_group_id(self, group_id): + vidlist = [] + output = self.FSS.list_group_info(group_id) + if "virtualdevices" in output['data']: + for item in output['data']['virtualdevices']: + vidlist.append(item['id']) + return vidlist + + def clone_volume(self, new_vol_name, source_volume_name): + params = dict(storagepoolid=self.fss_defined_pool) + volume_metadata = {} + new_vid = '' + vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE) + mirror_params = dict( + category='virtual', + selectioncriteria='anydrive', + mirrortarget="virtual" + ) + mirror_params.update(params) + ret1 = self.FSS.create_mirror(vid, mirror_params) + + if ret1: + if ret1['rc'] != 0: + failed_ret = self.FSS.get_fss_error_code(ret1['rc']) + raise exception.VolumeBackendAPIException(data=failed_ret) + + ret2 = self.FSS.sync_mirror(vid) + self.FSS._random_sleep() + if ret2['rc'] == 0: + self.FSS._check_mirror_sync_finished(vid, OPERATION_TIMEOUT) + ret3 = self.FSS.promote_mirror(vid, new_vol_name) + if ret3 and ret3['rc'] == 0: + new_vid = ret3['id'] + + volume_metadata['FSS-vid'] = new_vid + return volume_metadata + + def delete_vdev(self, volume): + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if vid: + return self.FSS.delete_vdev(vid) + else: + msg = _('vid is null. FSS failed to delete volume.') + raise exception.VolumeBackendAPIException(data=msg) + + def create_snapshot(self, snapshot): + snap_metadata = {} + volume_name = self._get_vol_name_from_snap(snapshot) + snap_name = snapshot["display_name"] + size = snapshot['volume_size'] + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = _('vid is null. FSS failed to create snapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + (snap, tm_policy, vdev_size) = (self.FSS. + _check_if_snapshot_tm_exist(vid)) + + if not snap: + self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) + if not tm_policy: + self.FSS.create_timemark_policy( + vid, storagepoolid=self.fss_defined_pool) + if not snap_name: + snap_name = "snap-%s" % time.strftime('%Y%m%d%H%M%S') + + self.FSS.create_timemark(vid, snap_name) + snap_metadata['fss_tm_comment'] = snap_name + return snap_metadata + + def delete_snapshot(self, snapshot): + volume_name = self._get_vol_name_from_snap(snapshot) + snap_name = snapshot["display_name"] + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = _('vid is null. FSS failed to delete snapshot') + raise exception.VolumeBackendAPIException(data=msg) + if not snap_name: + if ('metadata' in snapshot and 'fss_tm_comment' in + snapshot['metadata']): + snap_name = snapshot['metadata']['fss_tm_comment'] + + tm_info = self.FSS.get_timemark(vid) + rawtimestamp = self._get_timestamp(tm_info, snap_name) + if rawtimestamp: + timestamp = '%s_%s' % (vid, rawtimestamp) + self.FSS.delete_timemark(timestamp) + + final_tm_data = self.FSS.get_timemark(vid) + if "timemark" in final_tm_data['data']: + if not final_tm_data['data']['timemark']: + self.FSS.delete_timemark_policy(vid) + self.FSS.delete_vdev_snapshot(vid) + + def _get_timestamp(self, tm_data, encode_snap_name): + timestamp = '' + if "timemark" in tm_data['data']: + for item in tm_data['data']['timemark']: + if "comment" in item and item['comment'] == encode_snap_name: + timestamp = item['rawtimestamp'] + break + return timestamp + + def create_volume_from_snapshot(self, volume, snapshot): + volume_metadata = {} + volume_name = self._get_vol_name_from_snap(snapshot) + snap_name = snapshot["display_name"] + new_vol_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = _('vid is null. FSS failed to create_volume_from_snapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + if not snap_name: + if ('metadata' in snapshot) and ('fss_tm_comment' + in snapshot['metadata']): + snap_name = snapshot['metadata']['fss_tm_comment'] + + tm_info = self.FSS.get_timemark(vid) + rawtimestamp = self._get_timestamp(tm_info, snap_name) + if not rawtimestamp: + msg = _('rawtimestamp is null. FSS failed to ' + 'create_volume_from_snapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + timestamp = '%s_%s' % (vid, rawtimestamp) + output = self.FSS.copy_timemark( + timestamp, storagepoolid=self.fss_defined_pool, name=new_vol_name) + if output['rc'] == 0: + vid = output['id'] + self.FSS._random_sleep() + if self.FSS._check_tm_copy_finished(vid, OPERATION_TIMEOUT): + volume_metadata['FSS-vid'] = vid + return volume_name, volume_metadata + + def extend_vdev(self, volume_name, vol_size, new_size): + if new_size > vol_size: + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + size = self._convert_size_to_mb(new_size - vol_size) + params = dict( + action='expand', + sizemb=size + ) + return self.FSS.extend_vdev(vid, params) + + def list_volume_info(self, vid): + return self.FSS.list_fss_volume_info(vid) + + def rename_vdev(self, vid, new_vol_name): + params = dict( + action='update', + name=new_vol_name + ) + return self.FSS.rename_vdev(vid, params) + + def assign_iscsi_vdev(self, client_id, target_id, vid): + params = dict( + action="assign", + virtualdeviceids=[vid], + iscsi=dict(target=target_id) + ) + return self.FSS.assign_vdev(client_id, params) + + def assign_fc_vdev(self, client_id, vid): + params = dict( + action="assign", + virtualdeviceids=[vid], + fc=dict( + fcmapping='alltoall', + accessmode='readwritenonexclusive') + ) + return self.FSS.assign_vdev(client_id, params) + + def unassign_vdev(self, client_id, vid): + params = dict( + action="unassign", + virtualdeviceid=vid + ) + return self.FSS.unassign_vdev(client_id, params) + + def _create_vdev_snapshot(self, volume_name, size): + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) + + def create_vdev_snapshot(self, vid, size): + params = dict( + idlist=[vid], + selectioncriteria='anydrive', + policy='alwayswrite', + sizemb=size, + storagepoolid=self.fss_defined_pool + ) + return self.FSS.create_vdev_snapshot(params) + + def create_group(self, group): + group_name = self._get_group_name_from_id(group['id']) + params = dict( + name=group_name + ) + return self.FSS.create_group(params) + + def destroy_group(self, group): + group_name = self._get_group_name_from_id(group['id']) + gid = self._get_fss_gid_from_name(group_name) + return self.FSS.destroy_group(gid) + + def _add_volume_to_consistency_group(self, group_id, vol_name): + self.set_group(group_id, addvollist=[vol_name]) + + def set_group(self, group_id, **kwargs): + group_name = self._get_group_name_from_id(group_id) + gid = self._get_fss_gid_from_name(group_name) + + join_params = dict() + leave_params = dict() + if kwargs.get('addvollist'): + joing_vid = self._get_fss_vid_from_name(kwargs['addvollist']) + join_params.update( + action='join', + virtualdevices=joing_vid + ) + if kwargs.get('remvollist'): + leave_vid = self._get_fss_vid_from_name(kwargs['remvollist']) + leave_params.update( + action='leave', + virtualdevices=leave_vid + ) + return self.FSS.set_group(gid, join_params, leave_params) + + def create_cgsnapshot(self, cgsnapshot): + group_name = self._get_group_name_from_id( + cgsnapshot['consistencygroup_id']) + gsnap_name = self._encode_name(cgsnapshot['id']) + gid = self._get_fss_gid_from_name(group_name) + vidlist = self._get_vdev_id_from_group_id(gid) + + for vid in vidlist: + (snap, tm_policy, sizemb) = (self.FSS. + _check_if_snapshot_tm_exist(vid)) + if not snap: + self.create_vdev_snapshot(vid, sizemb) + if not tm_policy: + self.FSS.create_timemark_policy( + vid, storagepoolid=self.fss_defined_pool) + + group_tm_policy = self.FSS._check_if_group_tm_enabled(gid) + if not group_tm_policy: + self.create_group_timemark_policy(gid) + + self.create_group_timemark(gid, gsnap_name) + + def create_group_timemark_policy(self, gid): + tm_params = dict( + automatic=dict(enabled=False), + maxtimemarkcount=MAXSNAPSHOTS + ) + return self.FSS.create_group_timemark_policy(gid, tm_params) + + def create_group_timemark(self, gid, gsnap_name): + params = dict( + comment=gsnap_name, + priority='medium', + snapshotnotification=False + ) + return self.FSS.create_group_timemark(gid, params) + + def delete_cgsnapshot(self, cgsnapshot): + group_name = self._get_group_name_from_id( + cgsnapshot['consistencygroup_id']) + encode_snap_name = self._encode_name(cgsnapshot['id']) + gid = self._get_fss_gid_from_name(group_name) + + if not gid: + msg = _('gid is null. FSS failed to delete cgsnapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + if self._get_fss_group_membercount(gid) != 0: + tm_info = self.FSS.get_group_timemark(gid) + rawtimestamp = self._get_timestamp(tm_info, encode_snap_name) + timestamp = '%s_%s' % (gid, rawtimestamp) + self.delete_group_timemark(timestamp) + + final_tm_data = self.FSS.get_group_timemark(gid) + if "timemark" in final_tm_data['data']: + if not final_tm_data['data']['timemark']: + self.FSS.delete_group_timemark_policy(gid) + + def delete_group_timemark(self, timestamp): + params = dict( + deleteallbefore=False + ) + return self.FSS.delete_group_timemark(timestamp, params) + + def _check_iscsi_option(self): + output = self.FSS.get_server_options() + if "iscsitarget" in output['data']: + if not output['data']['iscsitarget']: + self.FSS.set_server_options('iscsitarget') + + def _check_fc_target_option(self): + output = self.FSS.get_server_options() + if "fctarget" in output['data']: + if not output['data']['fctarget']: + self.FSS.set_server_options('fctarget') + + def _check_iocluster_state(self): + output = self.FSS.get_server_options() + if 'iocluster' not in output['data']: + msg = _('No iocluster information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return output['data']['iocluster'] + + def list_fc_target_wwpn(self): + return self.FSS.list_fc_target_wwpn() + + def list_fc_client_initiators(self): + return self.FSS.list_fc_client_initiators() + + def create_fc_client(self, cinder_host_name, free_initiator_wwpns): + client_id = 0 + params = dict( + name=cinder_host_name, + protocoltype=["fc"], + ipaddress=self.fss_host, + ostype='linux', + fcpolicy=dict( + initiators=[free_initiator_wwpns], + vsaenabled=False + ) + ) + client_info = self.FSS.create_client(params) + if client_info and client_info['rc'] == 0: + client_id = client_info['id'] + return client_id + + def list_iscsi_target_info(self, target_id=None): + return self.FSS.list_iscsi_target_info(target_id) + + def _check_fc_host_devices_empty(self, client_id): + is_empty = False + output = self.FSS.list_sanclient_info(client_id) + if 'data' not in output: + msg = _('No target in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if 'fcdevices' not in output['data']: + msg = _('No fcdevices in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if len(output['data']['fcdevices']) == 0: + is_empty = True + self.FSS.delete_client(client_id) + return is_empty + + def create_iscsi_client(self, cinder_host_name, initiator): + params = dict( + name=cinder_host_name, + protocoltype=["iscsi"], + ipaddress=self.fss_host, + ostype='linux', + iscsipolicy=dict( + initiators=[initiator], + authentication=dict(enabled=False, + mutualchap=dict(enabled=False)) + ) + ) + return self.FSS.create_client(params) + + def create_iscsitarget(self, client_id, initiator, fss_hosts): + params = dict( + clientid=client_id, + name=initiator, + ipaddress=fss_hosts, + accessmode='readwritenonexclusive' + ) + return self.FSS.create_iscsitarget(params) + + def _get_iscsi_host(self, connector): + target_info = self.list_iscsi_target_info() + if 'data' not in target_info: + msg = _('No data information in return info.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsitargets' not in target_info['data']: + msg = _('No iscsitargets in return info.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if target_info['data']['iscsitargets']: + iscsitargets = target_info['data']['iscsitargets'] + for iscsitarget in iscsitargets: + if connector["initiator"] in iscsitarget["name"]: + target_id = iscsitarget["id"] + client_id = iscsitarget["clientid"] + return client_id, target_id + return None, None + + def _create_iscsi_host(self, host_name, initiator, fss_hosts): + client_id = '' + target_id = '' + client_info = self.create_iscsi_client(host_name, initiator) + if client_info and client_info['rc'] == 0: + client_id = client_info['id'] + + target_info = self.create_iscsitarget(client_id, initiator, fss_hosts) + if target_info['rc'] == 0: + target_id = target_info['id'] + return client_id, target_id + + def _get_fc_client_initiators(self, connector): + fc_initiators_assigned = [] + fc_available_initiator = [] + fc_initiators_info = self.list_fc_client_initiators() + if 'data' not in fc_initiators_info: + raise ValueError(_('No data information in return info.')) + + if fc_initiators_info['data']: + fc_initiators = fc_initiators_info['data'] + for fc_initiator in fc_initiators: + if fc_initiator['wwpn'] in connector['wwpns']: + fc_available_initiator.append(str(fc_initiator['wwpn'])) + fc_initiators_assigned.append(dict( + wwpn=str(fc_initiator['wwpn']), + assigned=fc_initiator['assigned'])) + return fc_available_initiator, fc_initiators_assigned + + def fc_initialize_connection(self, volume, connector, fss_hosts): + """Connect the host and volume; return dict describing connection.""" + vid = 0 + fc_target_info = {} + free_fc_initiator = None + + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = (_('Can not find cinder volume - %s.') % volume_name) + raise exception.VolumeBackendAPIException(msg) + + available_initiator, fc_initiators_info = ( + self._get_fc_client_initiators(connector)) + + if fc_initiators_info is None: + msg = _('No FC initiator can be added to host.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for fc_initiator in fc_initiators_info: + value = fc_initiator['assigned'] + if len(value) == 0: + free_fc_initiator = fc_initiator['wwpn'] + + if free_fc_initiator is None: + msg = _('No free FC initiator can be assigned to host.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + initiator = connector["initiator"] + host_name = GROUP_PREFIX + '%s-' % connector["host"] + + initiator_name = initiator.split(':') + idx = len(initiator_name) - 1 + client_host_name = host_name + initiator_name[ + idx] + '_FC-wwpn-' + free_fc_initiator + + client_id = self.create_fc_client(client_host_name, free_fc_initiator) + + try: + self.assign_fc_vdev(client_id, vid) + time.sleep(3) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984845 and "XML_ERROR_CLIENT_EXIST" + in err.text): + ctxt.reraise = False + LOG.warning(_LW('Assign volume failed with message: %(msg)s.'), + {"msg": err.reason}) + finally: + lun = self.FSS._get_fc_client_info(client_id, vid) + + fc_target_info['lun'] = lun + fc_target_info['available_initiator'] = available_initiator + + if not fc_target_info: + msg = _('Failed to get iSCSI target info for the LUN: %s.') + raise exception.VolumeBackendAPIException(data=msg % volume_name) + return fc_target_info + + def fc_terminate_connection(self, volume, connector): + client_id = 0 + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + output = self.list_volume_info(vid) + if 'data' not in output: + msg = _('No vdev information in given data') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'clients' not in output['data']: + msg = _('No clients in vdev information.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + client_info = output['data']['clients'] + for fcclients in client_info: + client_id = int(fcclients['id']) + + if client_id == 0: + msg = _( + 'Can not find client id. The connection target name is %s.') + raise exception.VolumeBackendAPIException( + data=msg % connector["initiator"]) + try: + self.unassign_vdev(client_id, vid) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984988 and + "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" + in err.text): + ctxt.reraise = False + LOG.warning(_LW('Disconnection failed with message: ' + "%(msg)s."), {"msg": err.reason}) + return client_id + + def initialize_connection_iscsi(self, volume, connector, fss_hosts): + """Connect the host and volume; return dict describing connection.""" + vid = 0 + iscsi_target_info = {} + self._check_iscsi_option() + client_id, target_id = self._get_iscsi_host(connector) + + if target_id is None: + initiator = connector["initiator"] + host_name = GROUP_PREFIX + '%s-' % connector["host"] + + initiator_info = initiator.split(':') + idx = len(initiator_info) - 1 + client_host_name = host_name + initiator_info[idx] + + client_id, target_id = self._create_iscsi_host(client_host_name, + initiator, + fss_hosts) + volume_name = self._get_fss_volume_name(volume) + try: + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = (_('Can not find cinder volume - %(volumeName)s.') % + {"volumeName": volume_name}) + raise exception.VolumeBackendAPIException(msg) + + self.assign_iscsi_vdev(client_id, target_id, vid) + time.sleep(3) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984989 and + "XML_ERROR_VIRTUAL_DEV_ASSIGNED_TO_iSCSI_TARGET" in + err.text): + ctxt.reraise = False + LOG.warning(_LW("Assign volume failed with message: %(msg)s."), + {"msg": err.reason}) + finally: + (lun, target_name) = self.FSS._get_iscsi_target_info(client_id, + vid) + iscsi_target_info['lun'] = lun + iscsi_target_info['iqn'] = target_name + + if not iscsi_target_info: + msg = _('Failed to get iSCSI target info for the LUN: %s') + raise exception.VolumeBackendAPIException(data=msg % volume_name) + return iscsi_target_info + + def terminate_connection_iscsi(self, volume, connector): + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + client_id, target_id = self._get_iscsi_host(connector) + if not client_id: + msg = _('Can not find client id. The connection target name ' + 'is %s.') + raise exception.VolumeBackendAPIException( + data=msg % connector["initiator"]) + try: + self.unassign_vdev(client_id, vid) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984988 and + "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" + in err.text): + ctxt.reraise = False + LOG.warning(_LW("Disconnection failed with message: " + "%(msg)s."), {"msg": err.reason}) + finally: + is_empty = self.FSS._check_host_mapping_status(client_id, + target_id) + + if is_empty: + self.FSS.delete_iscsi_target(target_id) + self.FSS.delete_client(client_id) + + def _get_existing_volume_ref_vid(self, existing_ref): + if 'source-id' in existing_ref: + vid = existing_ref['source-id'] + else: + reason = _("FSSISCSIDriver manage_existing requires vid to " + "identify an existing volume.") + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=reason) + vdev_info = self.list_volume_info(vid) + if not vdev_info: + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, + reason=_("Unable to find volume with FSS vid =%s.") % vid) + + if 'data' not in vdev_info: + msg = _('No vdev information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'sizemb' not in vdev_info['data']: + msg = _('No vdev sizemb in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return vdev_info['data']['sizemb'] + + def _manage_existing_volume(self, vid, volume): + new_vol_name = self._get_fss_volume_name(volume) + try: + self.rename_vdev(vid, new_vol_name) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.warning(_LW("Volume manage_existing_volume was unable " + "to rename the volume, error message: %s."), + err.reason) + + def unmanage(self, volume): + volume_name = self._get_fss_volume_name(volume) + unmanaged_vol_name = volume_name + "-unmanaged" + try: + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + self.rename_vdev(vid, unmanaged_vol_name) + except FSSHTTPError as err: + LOG.warning(_LW("Volume unmanage was unable to rename the volume," + " error message: %(msg)s."), {"msg": err.reason}) + + +class FSSRestCommon(object): + def __init__(self, host, username, password, fss_debug): + self.hostip = host + self.username = username + self.password = password + self.session_id = None + self.fss_debug = fss_debug + + def _fss_request(self, method, path, data=None): + json_data = None + url = "http://%(ip)s/%(product)s/%(path)s" % { + "ip": self.hostip, "product": PRODUCT_NAME, "path": path} + headers = {"Content-Type": "application/json"} + if self.session_id is not None: + cookie = dict( + Cookie=SESSION_COOKIE_NAME + '=' + self.session_id + ) + headers.update(cookie) + + if data is not None: + request_body = json.dumps(data).encode("utf-8") + else: + request_body = None + + connection = http_client.HTTPConnection(self.hostip, 80, timeout=60) + + if self.fss_debug: + LOG.info(_LI("[FSS_RESTAPI]====%(method)s@url=%(url)s ====" + "@request_body=%(body)s===") % { + "method": method, + "url": url, + "body": request_body}) + + attempt = 1 + while True: + connection.request(method, url, request_body, headers) + response = connection.getresponse() + response_body = response.read() + if response_body: + try: + data = json.loads(response_body) + json_data = json.dumps(data) + json_data = json.loads(json_data.decode('utf8')) + except ValueError: + pass + + if self.fss_debug: + LOG.info(_LI("[FSS_RESTAPI]==@json_data: %s =="), json_data) + + if response.status == 200: + return json_data + elif response.status == 404: + msg = (_('FSS rest api return failed, method=%(method)s, ' + 'uri=%(url)s, response=%(response)s') % { + "method": method, + "url": url, + "response": response_body}) + raise exception.VolumeBackendAPIException(msg) + else: + err_code = json_data['rc'] + if (attempt > RETRY_CNT) or (str(err_code) not in RETRY_LIST): + err_target = ("method=%(method)s, url=%(url)s, " + "response=%(response)s" % + {"method": method, "url": url, + "response": response_body}) + err_response = self.get_fss_error_code(err_code) + err = dict( + code=err_code, + text=err_response['key'], + reason=err_response['message'] + ) + raise FSSHTTPError(err_target, err) + attempt += 1 + LOG.warning(_LW("Retry with rc: %s."), err_code) + self._random_sleep(RETRY_INTERVAL) + if err_code == 107: + self.fss_login() + + def _random_sleep(self, interval=60): + nsleep = random.randint(10, interval * 10) + value = round(float(nsleep) / 10, 2) + time.sleep(value) + + # + # REST API session management methods + # + def fss_login(self): + url = '%s/%s' % (FSS_AUTH, FSS_LOGIN) + params = dict( + username=self.username, + password=self.password, + server=self.hostip + ) + data = self._fss_request(POST, url, params) + if 'id' in data: + self.session_id = data['id'] + return self.session_id + + # + # Physical Adapters management methods + # + + def list_physicaladapter_info(self, adapter_id=None): + url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER) + if adapter_id is not None: + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, + FSS_PHYSICALADAPTER, adapter_id) + return self._fss_request(GET, url) + + def list_fc_target_wwpn(self): + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER, + FSS_FC_TGT_WWPN) + tgt_wwpn = [] + output = self._fss_request(GET, url) + if output['data']: + tgt_wwpns = output['data'] + for tgt_alias_wwpn in tgt_wwpns: + tgt_wwpn.append( + str(tgt_alias_wwpn['aliaswwpn'].replace('-', ''))) + return tgt_wwpn + + def list_fc_client_initiators(self): + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER, + FSS_FCCLIENTINITIATORS) + return self._fss_request(GET, url) + + # + # storage pool management methods + # + + def list_pool_info(self, pool_id=None): + url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_STORAGE_POOL) + if pool_id is not None: + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, + FSS_STORAGE_POOL, pool_id) + return self._fss_request(GET, url) + + # + # Volume and snapshot management methods + # + + def create_vdev(self, params): + metadata = {} + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN) + output = self._fss_request(POST, url, params) + if output: + if output['rc'] == 0: + metadata['FSS-vid'] = output['id'] + return metadata + + def _check_mirror_sync_finished(self, vid, timeout): + starttime = time.time() + while True: + self._random_sleep() + if time.time() > starttime + timeout: + msg = (_('FSS get mirror sync timeout on vid: %s ') % vid) + raise exception.VolumeBackendAPIException(data=msg) + elif self._check_mirror_sync_status(vid): + break + + def delete_vdev(self, vid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return self._fss_request(DELETE, url, dict(force=True)) + + def extend_vdev(self, vid, params): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return self._fss_request(PUT, url, params) + + def rename_vdev(self, vid, params): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return vid, self._fss_request(PUT, url, params) + + def list_fss_volume_info(self, vid=None): + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN) + if vid is not None: + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return self._fss_request(GET, url) + + def _get_fss_vid_from_name(self, volume_name, fss_type=None): + vid = [] + output = self.list_fss_volume_info() + try: + if "virtualdevices" in output['data']: + for item in output['data']['virtualdevices']: + if item['name'] in volume_name: + vid.append(item['id']) + except Exception: + msg = (_('Can not find cinder volume - %s') % volume_name) + raise exception.VolumeBackendAPIException(msg) + + if fss_type is not None and fss_type == FSS_SINGLE_TYPE: + vid = ''.join(str(x) for x in vid) + return vid + + def _check_if_snapshot_tm_exist(self, vid): + snapshotenabled = False + timemarkenabled = False + sizemb = 0 + output = self.list_fss_volume_info(vid) + if "snapshotenabled" in output['data']: + snapshotenabled = output['data']['snapshotenabled'] + if "timemarkenabled" in output['data']: + timemarkenabled = output['data']['timemarkenabled'] + if "sizemb" in output['data']: + sizemb = output['data']['sizemb'] + return (snapshotenabled, timemarkenabled, sizemb) + + def create_vdev_snapshot(self, params): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_RESOURCE) + return self._fss_request(POST, url, params) + + def create_timemark_policy(self, vid, **kwargs): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY) + params = dict( + idlist=[vid], + automatic=dict(enabled=False), + maxtimemarkcount=MAXSNAPSHOTS + ) + if kwargs.get('storagepoolid'): + params.update(kwargs) + return self._fss_request(POST, url, params) + + def create_timemark(self, vid, snap_name): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) + params = dict( + comment=snap_name, + priority='medium', + snapshotnotification=False + ) + return self._fss_request(POST, url, params) + + def get_timemark(self, vid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) + return self._fss_request(GET, url) + + def delete_timemark(self, timestamp): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp) + params = dict( + deleteallbefore=False + ) + return self._fss_request(DELETE, url, params) + + def delete_timemark_policy(self, vid): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY) + params = dict( + idlist=[vid] + ) + return self._fss_request(DELETE, url, params) + + def delete_vdev_snapshot(self, vid): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_RESOURCE) + params = dict( + idlist=[vid] + ) + return self._fss_request(DELETE, url, params) + + def copy_timemark(self, timestamp, **kwargs): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp) + params = dict( + action='copy', + includetimeviewdata=False + ) + params.update(kwargs) + return self._fss_request(PUT, url, params) + + def get_timemark_copy_status(self, vid): + url = '%s/%s/%s?type=operationstatus' % ( + FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) + return self._fss_request(GET, url) + + def _check_tm_copy_status(self, vid): + finished = False + output = self.get_timemark_copy_status(vid) + if output['timemarkoperationstatus']: + timemark_status = output['timemarkoperationstatus'] + if timemark_status['operation'] == "copy": + if timemark_status['status'] == 'completed': + finished = True + return finished + + def _check_tm_copy_finished(self, vid, timeout): + finished = False + starttime = time.time() + while True: + self._random_sleep() + if time.time() > starttime + timeout: + msg = (_('FSS get timemark copy timeout on vid: %s') % vid) + raise exception.VolumeBackendAPIException(data=msg) + elif self._check_tm_copy_status(vid): + finished = True + return finished + + # + # TimeView methods + # + + def create_timeview(self, tv_vid, params): + vid = '' + volume_metadata = {} + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEVIEW, tv_vid) + output = self._fss_request(POST, url, params) + if output and output['rc'] == 0: + if output['copyid'] == -1: + vid = output['id'] + else: + vid = output['copyid'] + volume_metadata['FSS-vid'] = vid + return volume_metadata + + # + # Mirror methods + # + + def create_mirror(self, vid, pool_id): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid) + params = dict( + category='virtual', + selectioncriteria='anydrive', + mirrortarget="virtual" + ) + params.update(pool_id) + return self._fss_request(POST, url, params) + + def get_mirror_sync_status(self, vid): + url = '%s/%s/%s?type=syncstatus' % ( + FSS_LOGICALRESOURCE, FSS_MIRROR, vid) + return self._fss_request(GET, url) + + def _check_mirror_sync_status(self, vid): + finished = False + output = self.get_mirror_sync_status(vid) + if output['mirrorsyncstatus']: + mirrorsyncstatus = output['mirrorsyncstatus'] + if mirrorsyncstatus['status'] == "insync": + if mirrorsyncstatus['percentage'] == 0: + finished = True + return finished + + def _set_mirror(self, vid, **kwargs): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid) + return self._fss_request(PUT, url, kwargs) + + def sync_mirror(self, vid): + return self._set_mirror(vid, action='sync') + + def promote_mirror(self, vid, new_volume_name): + return self._set_mirror(vid, action='promote', name=new_volume_name) + + # + # Host management methods + # + + def get_server_options(self): + url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) + return self._fss_request(GET, url) + + def set_server_options(self, action): + url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) + params = dict( + action=action, + enabled=True + ) + return self._fss_request(PUT, url, params) + + def get_server_name(self): + url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) + return self._fss_request(GET, url) + + # + # SAN Client management methods + # + + def list_client_initiators(self): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, + FSS_ISCSI_CLIENT_INITIATORS) + return self._fss_request(GET, url) + + def get_default_portal(self): + url = '%s/%s/%s' % (FSS_SERVER, FSS_OPTIONS, FSS_PORTAL) + return self._fss_request(GET, url) + + def create_client(self, params): + url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT) + return self._fss_request(POST, url, params) + + def list_sanclient_info(self, client_id=None): + url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT) + if client_id is not None: + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, + client_id) + return self._fss_request(GET, url) + + def assign_vdev(self, client_id, params): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) + return self._fss_request(PUT, url, params) + + def unassign_vdev(self, client_id, params): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) + return self._fss_request(PUT, url, params) + + def _get_iscsi_target_info(self, client_id, vid): + lun = 0 + target_name = None + output = self.list_sanclient_info(client_id) + + if 'data' not in output: + msg = _('No target information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsidevices' not in output['data']: + msg = _('No iscsidevices information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for iscsidevices in output['data']['iscsidevices']: + if int(vid) == int(iscsidevices['id']): + lun = iscsidevices['lun'] + iscsitarget_info = iscsidevices['iscsitarget'] + for key, value in iscsitarget_info.items(): + if key == 'name': + target_name = value + + return lun, target_name + + def _check_host_mapping_status(self, client_id, target_id): + is_empty = False + hosting_cnt = 0 + output = self.list_sanclient_info(client_id) + if 'data' not in output: + msg = _('No target in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsidevices' not in output['data']: + msg = _('No iscsidevices information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if len(output['data']['iscsidevices']) == 0: + is_empty = True + else: + for iscsidevices in output['data']['iscsidevices']: + iscsitarget_info = iscsidevices['iscsitarget'] + for key, value in iscsitarget_info.items(): + if key == 'id' and target_id == value: + hosting_cnt += 1 + + if hosting_cnt == 0: + is_empty = True + return is_empty + + def list_iscsi_target_info(self, target_id=None): + url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET) + if target_id is not None: + url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, + target_id) + return self._fss_request(GET, url) + + def _get_iscsi_target_id(self, initiator_iqn): + target_id = '' + client_id = '' + output = self.list_iscsi_target_info() + + if 'data' not in output: + msg = _('No target in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsitargets' not in output['data']: + msg = _('No iscsitargets for target.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for targets in output['data']['iscsitargets']: + if 'name' in targets: + if initiator_iqn in targets['name']: + target_id = str(targets['id']) + client_id = str(targets['clientid']) + break + return target_id, client_id + + def create_iscsitarget(self, params): + url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET) + return self._fss_request(POST, url, params) + + def delete_iscsi_target(self, target_id): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, target_id) + params = dict( + force=True + ) + return self._fss_request(DELETE, url, params) + + def delete_client(self, client_id): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) + return self._fss_request(DELETE, url) + + def _get_fc_client_info(self, client_id, vid): + lun = 0 + output = self.list_sanclient_info(client_id) + if 'data' not in output: + msg = _('No target information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'fcdevices' not in output['data']: + msg = _('No fcdevices information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for fcdevices in output['data']['fcdevices']: + if int(vid) == int(fcdevices['id']): + lun = fcdevices['lun'] + + return lun + + # + # Group related methods + # + + def create_group(self, params): + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP) + return self._fss_request(POST, url, params) + + def list_group_info(self, gid=None): + if gid is not None: + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + else: + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP) + return self._fss_request(GET, url) + + def set_group(self, gid, join_params=None, leave_params=None): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + if join_params: + self._fss_request(PUT, url, join_params) + if leave_params: + self._fss_request(PUT, url, leave_params) + + def create_group_timemark_policy(self, gid, params): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid) + return self._fss_request(POST, url, params) + + def _check_if_group_tm_enabled(self, gid): + timemarkenabled = False + output = self.list_group_info(gid) + if "timemarkenabled" in output['data']: + timemarkenabled = output['data']['timemarkenabled'] + return timemarkenabled + + def create_group_timemark(self, gid, params): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid) + return self._fss_request(POST, url, params) + + def get_group_timemark(self, gid): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid) + return self._fss_request(GET, url) + + def delete_group_timemark(self, timestamp, params): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, timestamp) + return self._fss_request(DELETE, url, params) + + def delete_group_timemark_policy(self, gid): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid) + return self._fss_request(DELETE, url) + + def delete_snapshot_group(self, gid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + return self._fss_request(DELETE, url) + + def destroy_group(self, gid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + return self._fss_request(DELETE, url) + + def get_fss_error_code(self, err_id): + try: + url = '%s/%s/%s' % (FSS_SERVER, FSS_RETURN_CODE, err_id) + output = self._fss_request(GET, url) + if output['rc'] == 0: + return output + except Exception: + msg = (_('Can not find this error code:%s.') % err_id) + raise exception.APIException(reason=msg) + + +class FSSHTTPError(Exception): + + def __init__(self, target, response): + super(FSSHTTPError, self).__init__() + self.target = target + self.code = response['code'] + self.text = response['text'] + self.reason = response['reason'] + + def __str__(self): + msg = ("FSSHTTPError code {0} returned by REST at {1}: {2}\n{3}") + return msg.format(self.code, self.target, + self.reason, self.text) diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_fc.py b/cinder/volume/drivers/fujitsu/eternus_dx_fc.py index 2d9c3ebba..722d892ae 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx_fc.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx_fc.py @@ -34,6 +34,10 @@ LOG = logging.getLogger(__name__) class FJDXFCDriver(driver.FibreChannelDriver): """FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Fujitsu_ETERNUS_CI" + VERSION = eternus_dx_common.FJDXCommon.VERSION + def __init__(self, *args, **kwargs): super(FJDXFCDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py b/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py index 45626960b..ca8782277 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py @@ -34,6 +34,10 @@ LOG = logging.getLogger(__name__) class FJDXISCSIDriver(driver.ISCSIDriver): """iSCSI Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Fujitsu_ETERNUS_CI" + VERSION = eternus_dx_common.FJDXCommon.VERSION + def __init__(self, *args, **kwargs): super(FJDXISCSIDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/fusionstorage/dsware.py b/cinder/volume/drivers/fusionstorage/dsware.py index b7fcc5bec..bd08f6723 100644 --- a/cinder/volume/drivers/fusionstorage/dsware.py +++ b/cinder/volume/drivers/fusionstorage/dsware.py @@ -69,6 +69,9 @@ class DSWAREDriver(driver.VolumeDriver): """Huawei FusionStorage Driver.""" VERSION = '1.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Huawei_volume_CI" + DSWARE_VOLUME_CREATE_SUCCESS_STATUS = 0 DSWARE_VOLUME_DUPLICATE_VOLUME = 6 DSWARE_VOLUME_CREATING_STATUS = 7 diff --git a/cinder/volume/drivers/glusterfs.py b/cinder/volume/drivers/glusterfs.py index 1b2c282d5..c0e963622 100644 --- a/cinder/volume/drivers/glusterfs.py +++ b/cinder/volume/drivers/glusterfs.py @@ -24,6 +24,7 @@ from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units +from cinder import coordination from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils @@ -49,7 +50,7 @@ CONF.register_opts(volume_opts) @interface.volumedriver -class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, +class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriverDistributed, driver.ExtendVD): """Gluster based cinder driver. @@ -65,6 +66,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, volume_backend_name = 'GlusterFS' VERSION = '1.3.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + def __init__(self, execute=processutils.execute, *args, **kwargs): self._remotefsclient = None super(GlusterfsDriver, self).__init__(*args, **kwargs) @@ -81,6 +85,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, """Any initialization the volume driver does while starting.""" super(GlusterfsDriver, self).do_setup(context) + LOG.warning(_LW("The GlusterFS volume driver is deprecated and " + "will be removed during the Ocata cycle.")) + config = self.configuration.glusterfs_shares_config if not config: msg = (_("There's no Gluster config file configured (%s)") % @@ -181,7 +188,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, self._stats = data - @remotefs_drv.locked_volume_id_operation + @coordination.synchronized('{self.driver_prefix}-{volume[id]}') def create_volume(self, volume): """Creates a volume.""" @@ -235,7 +242,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, self._set_rw_permissions_for_all(path_to_new_vol) - @remotefs_drv.locked_volume_id_operation + @coordination.synchronized('{self.driver_prefix}-{volume[id]}') def delete_volume(self, volume): """Deletes a logical volume.""" @@ -280,7 +287,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, def validate_connector(self, connector): pass - @remotefs_drv.locked_volume_id_operation + @coordination.synchronized('{self.driver_prefix}-{volume[id]}') def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" @@ -312,7 +319,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, """Disallow connection from connector.""" pass - @remotefs_drv.locked_volume_id_operation + @coordination.synchronized('{self.driver_prefix}-{volume[id]}') def extend_volume(self, volume, size_gb): volume_path = self._active_volume_path(volume) diff --git a/cinder/volume/drivers/hgst.py b/cinder/volume/drivers/hgst.py index 19865346d..d96396282 100644 --- a/cinder/volume/drivers/hgst.py +++ b/cinder/volume/drivers/hgst.py @@ -85,6 +85,10 @@ class HGSTDriver(driver.VolumeDriver): """ VERSION = '1.0.0' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "HGST_Solutions_CI" + VGCCLUSTER = 'vgc-cluster' SPACEGB = units.G - 16 * units.M # Workaround for shrinkage Bug 28320 BLOCKED = "BLOCKED" # Exit code when a command is blocked diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index afd2e0e6d..14eedd88e 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -49,6 +49,9 @@ CONF.register_opts(volume_opts) class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver): VERSION = common.VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"] + def __init__(self, *args, **kwargs): os.environ['LANG'] = 'C' super(HBSDFCDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 904290a81..263cf6732 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -57,6 +57,9 @@ CONF.register_opts(volume_opts) class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): VERSION = common.VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"] + def __init__(self, *args, **kwargs): os.environ['LANG'] = 'C' super(HBSDISCSIDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/hitachi/hnas_backend.py b/cinder/volume/drivers/hitachi/hnas_backend.py index 36506aaf0..76ddc9345 100644 --- a/cinder/volume/drivers/hitachi/hnas_backend.py +++ b/cinder/volume/drivers/hitachi/hnas_backend.py @@ -18,14 +18,12 @@ Hitachi Unified Storage (HUS-HNAS) platform. Backend operations. """ -import re - from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import units import six -from cinder.i18n import _, _LW, _LI, _LE +from cinder.i18n import _, _LE from cinder import exception from cinder import ssh_utils from cinder import utils @@ -34,727 +32,497 @@ LOG = logging.getLogger("cinder.volume.driver") HNAS_SSC_RETRIES = 5 -class HnasBackend(object): - """Back end. Talks to HUS-HNAS.""" - def __init__(self, drv_configs): - self.drv_configs = drv_configs +class HNASSSHBackend(object): + def __init__(self, backend_opts): + + self.mgmt_ip0 = backend_opts.get('mgmt_ip0') + self.hnas_cmd = backend_opts.get('ssc_cmd', 'ssc') + self.cluster_admin_ip0 = backend_opts.get('cluster_admin_ip0') + self.ssh_port = backend_opts.get('ssh_port', '22') + self.ssh_username = backend_opts.get('username') + self.ssh_pwd = backend_opts.get('password') + self.ssh_private_key = backend_opts.get('ssh_private_key') + self.storage_version = None self.sshpool = None + self.fslist = {} + self.tgt_list = {} @utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES, wait_random=True) - def run_cmd(self, cmd, ip0, user, pw, *args, **kwargs): - """Run a command on SMU or using SSH + def _run_cmd(self, *args, **kwargs): + """Runs a command on SMU using SSH. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :returns: formated string with version information + :returns: stdout and stderr of the command """ - LOG.debug('Enable ssh: %s', - six.text_type(self.drv_configs['ssh_enabled'])) + if self.cluster_admin_ip0 is None: + # Connect to SMU through SSH and run ssc locally + args = (self.hnas_cmd, 'localhost') + args + else: + args = (self.hnas_cmd, '--smuauth', self.cluster_admin_ip0) + args - if self.drv_configs['ssh_enabled'] != 'True': - # Direct connection via ssc - args = (cmd, '--user', user, '--password', pw, ip0) + args + utils.check_ssh_injection(args) + command = ' '.join(args) + command = command.replace('"', '\\"') + if not self.sshpool: + self.sshpool = ssh_utils.SSHPool(ip=self.mgmt_ip0, + port=int(self.ssh_port), + conn_timeout=None, + login=self.ssh_username, + password=self.ssh_pwd, + privatekey=self.ssh_private_key) + + with self.sshpool.item() as ssh: try: - out, err = utils.execute(*args, **kwargs) - LOG.debug("command %(cmd)s result: out = %(out)s - err = " - "%(err)s", {'cmd': cmd, 'out': out, 'err': err}) + out, err = putils.ssh_execute(ssh, command, + check_exit_code=True) + LOG.debug("command %(cmd)s result: out = " + "%(out)s - err = %(err)s", + {'cmd': self.hnas_cmd, 'out': out, 'err': err}) return out, err except putils.ProcessExecutionError as e: if 'Failed to establish SSC connection' in e.stderr: - LOG.debug("SSC connection error!") - msg = _("Failed to establish SSC connection.") + msg = _("Failed to establish SSC connection!") + LOG.exception(msg) raise exception.HNASConnError(msg) elif 'Connection reset' in e.stderr: - LOG.debug("HNAS connection reset!") - msg = _("HNAS has disconnected SSC") + msg = _("HNAS connection reset!") + LOG.exception(msg) raise exception.HNASConnError(msg) else: raise - else: - if self.drv_configs['cluster_admin_ip0'] is None: - # Connect to SMU through SSH and run ssc locally - args = (cmd, 'localhost') + args - else: - args = (cmd, '--smuauth', - self.drv_configs['cluster_admin_ip0']) + args - utils.check_ssh_injection(args) - command = ' '.join(args) - command = command.replace('"', '\\"') + def get_version(self): + """Gets version information from the storage unit. - if not self.sshpool: - server = self.drv_configs['mgmt_ip0'] - port = int(self.drv_configs['ssh_port']) - username = self.drv_configs['username'] - # We only accept private/public key auth - password = "" - privatekey = self.drv_configs['ssh_private_key'] - self.sshpool = ssh_utils.SSHPool(server, - port, - None, - username, - password=password, - privatekey=privatekey) - - with self.sshpool.item() as ssh: - - try: - out, err = putils.ssh_execute(ssh, command, - check_exit_code=True) - LOG.debug("command %(cmd)s result: out = " - "%(out)s - err = %(err)s", - {'cmd': cmd, 'out': out, 'err': err}) - return out, err - except putils.ProcessExecutionError as e: - if 'Failed to establish SSC connection' in e.stderr: - LOG.debug("SSC connection error!") - msg = _("Failed to establish SSC connection.") - raise exception.HNASConnError(msg) - else: - raise putils.ProcessExecutionError - - def get_version(self, cmd, ver, ip0, user, pw): - """Gets version information from the storage unit - - :param cmd: ssc command name - :param ver: string driver version - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :returns: formatted string with version information - """ - out, err = self.run_cmd(cmd, ip0, user, pw, "cluster-getmac", - check_exit_code=True) - hardware = out.split()[2] - - out, err = self.run_cmd(cmd, ip0, user, pw, "ver", - check_exit_code=True) - lines = out.split('\n') - - model = "" - for line in lines: - if 'Model:' in line: - model = line.split()[1] - if 'Software:' in line: - ver = line.split()[1] - - # If not using SSH, the local utility version can be different from the - # one used in HNAS - if self.drv_configs['ssh_enabled'] != 'True': - out, err = utils.execute(cmd, "-version", check_exit_code=True) - util = out.split()[1] - - out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 " - "RG: 0 RG_LU: 0 Utility_version: %(util)s" % - {'arr': hardware, 'mod': model, 'ver': ver, 'util': util}) - else: - out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 " - "RG: 0 RG_LU: 0" % - {'arr': hardware, 'mod': model, 'ver': ver}) - - LOG.debug('get_version: %(out)s -- %(err)s', {'out': out, 'err': err}) - return out - - def get_iscsi_info(self, cmd, ip0, user, pw): - """Gets IP addresses for EVSs, use EVSID as controller. - - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :returns: formated string with iSCSI information + :returns: dictionary with HNAS information + storage_version={ + 'mac': HNAS MAC ID, + 'model': HNAS model, + 'version': the software version, + 'hardware': the hardware version, + 'serial': HNAS serial number} """ + if not self.storage_version: + version_info = {} + out, err = self._run_cmd("cluster-getmac") + mac = out.split(':')[1].strip() + version_info['mac'] = mac - out, err = self.run_cmd(cmd, ip0, user, pw, - 'evsipaddr', '-l', - check_exit_code=True) - lines = out.split('\n') + out, err = self._run_cmd("ver") + split_out = out.split('\n') - newout = "" - for line in lines: + model = split_out[1].split(':')[1].strip() + version = split_out[3].split()[1] + hardware = split_out[5].split(':')[1].strip() + serial = split_out[12].split()[2] + + version_info['model'] = model + version_info['version'] = version + version_info['hardware'] = hardware + version_info['serial'] = serial + + self.storage_version = version_info + + LOG.debug("version_info: %(info)s", {'info': self.storage_version}) + return self.storage_version + + def get_evs_info(self): + """Gets the IP addresses of all EVSs in HNAS. + + :returns: dictionary with EVS information + evs_info={ + : {evs_number: number identifying the EVS1 on HNAS}, + : {evs_number: number identifying the EVS2 on HNAS}, + ... + } + """ + evs_info = {} + out, err = self._run_cmd("evsipaddr", "-l") + + out = out.split('\n') + for line in out: if 'evs' in line and 'admin' not in line: - inf = line.split() - (evsnum, ip) = (inf[1], inf[3]) - newout += "CTL: %s Port: 0 IP: %s Port: 3260 Link: Up\n" \ - % (evsnum, ip) + ip = line.split()[3].strip() + evs_info[ip] = {} + evs_info[ip]['evs_number'] = line.split()[1].strip() - LOG.debug('get_iscsi_info: %(out)s -- %(err)s', - {'out': out, 'err': err}) - return newout + return evs_info - def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None): - """Gets the list of filesystems and fsids. + def get_fs_info(self, fs_label): + """Gets the information of a given FS. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param fslabel: filesystem label we want to get info - :returns: formated string with filesystems and fsids + :param fs_label: Label of the filesystem + :returns: dictionary with FS information + fs_info={ + 'id': a Logical Unit ID, + 'label': a Logical Unit name, + 'evs_id': the ID of the EVS in which the filesystem is created + (not present if there is a single EVS), + 'total_size': the total size of the FS (in GB), + 'used_size': the size that is already used (in GB), + 'available_size': the free space (in GB) + } """ + def _convert_size(param): + size = float(param) * units.Mi + return six.text_type(size) - if fslabel is None: - out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a', - check_exit_code=True) - else: - out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-f', fslabel, - check_exit_code=True) - - lines = out.split('\n') + fs_info = {} single_evs = True + id, lbl, evs, t_sz, u_sz, a_sz = 0, 1, 2, 3, 5, 12 + t_sz_unit, u_sz_unit, a_sz_unit = 4, 6, 13 - LOG.debug("Parsing output: %s", lines) + out, err = self._run_cmd("df", "-af", fs_label) - newout = "" - for line in lines: - if 'Not mounted' in line or 'Not determined' in line: - continue - if 'not' not in line and 'EVS' in line: - single_evs = False - if 'GB' in line or 'TB' in line: - LOG.debug("Parsing output: %s", line) - inf = line.split() + invalid_outs = ['Not mounted', 'Not determined', 'not found'] - if not single_evs: - (fsid, fslabel, capacity) = (inf[0], inf[1], inf[3]) - (used, perstr) = (inf[5], inf[7]) - (availunit, usedunit) = (inf[4], inf[6]) - else: - (fsid, fslabel, capacity) = (inf[0], inf[1], inf[2]) - (used, perstr) = (inf[4], inf[6]) - (availunit, usedunit) = (inf[3], inf[5]) + for problem in invalid_outs: + if problem in out: + return {} - if usedunit == 'GB': - usedmultiplier = units.Ki - else: - usedmultiplier = units.Mi - if availunit == 'GB': - availmultiplier = units.Ki - else: - availmultiplier = units.Mi - m = re.match("\((\d+)\%\)", perstr) - if m: - percent = m.group(1) - else: - percent = 0 - newout += "HDP: %s %d MB %d MB %d %% LUs: 256 Normal %s\n" \ - % (fsid, int(float(capacity) * availmultiplier), - int(float(used) * usedmultiplier), - int(percent), fslabel) + if 'EVS' in out: + single_evs = False - LOG.debug('get_hdp_info: %(out)s -- %(err)s', - {'out': newout, 'err': err}) - return newout + fs_data = out.split('\n')[3].split() - def get_evs(self, cmd, ip0, user, pw, fsid): - """Gets the EVSID for the named filesystem. + # Getting only the desired values from the output. If there is a single + # EVS, its ID is not shown in the output and we have to decrease the + # indexes to get the right values. + fs_info['id'] = fs_data[id] + fs_info['label'] = fs_data[lbl] - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :returns: EVS id of the file system + if not single_evs: + fs_info['evs_id'] = fs_data[evs] + + fs_info['total_size'] = ( + (fs_data[t_sz]) if not single_evs else fs_data[t_sz - 1]) + fs_info['used_size'] = ( + fs_data[u_sz] if not single_evs else fs_data[u_sz - 1]) + fs_info['available_size'] = ( + fs_data[a_sz] if not single_evs else fs_data[a_sz - 1]) + + # Converting the sizes if necessary. + if not single_evs: + if fs_data[t_sz_unit] == 'TB': + fs_info['total_size'] = _convert_size(fs_info['total_size']) + if fs_data[u_sz_unit] == 'TB': + fs_info['used_size'] = _convert_size(fs_info['used_size']) + if fs_data[a_sz_unit] == 'TB': + fs_info['available_size'] = _convert_size( + fs_info['available_size']) + else: + if fs_data[t_sz_unit - 1] == 'TB': + fs_info['total_size'] = _convert_size(fs_info['total_size']) + if fs_data[u_sz_unit - 1] == 'TB': + fs_info['used_size'] = _convert_size(fs_info['used_size']) + if fs_data[a_sz_unit - 1] == 'TB': + fs_info['available_size'] = _convert_size( + fs_info['available_size']) + + LOG.debug("File system info of %(fs)s (sizes in GB): %(info)s.", + {'fs': fs_label, 'info': fs_info}) + + return fs_info + + def get_evs(self, fs_label): + """Gets the EVS ID for the named filesystem. + + :param fs_label: The filesystem label related to the EVS required + :returns: EVS ID of the filesystem """ + if not self.fslist: + self._get_fs_list() - out, err = self.run_cmd(cmd, ip0, user, pw, "evsfs", "list", - check_exit_code=True) - LOG.debug('get_evs: out %s.', out) + # When the FS is found in the list of known FS, returns the EVS ID + for key in self.fslist: + if fs_label == self.fslist[key]['label']: + LOG.debug("EVS ID for fs %(fs)s: %(id)s.", + {'fs': fs_label, 'id': self.fslist[key]['evsid']}) + return self.fslist[key]['evsid'] + LOG.debug("Can't find EVS ID for fs %(fs)s.", {'fs': fs_label}) - lines = out.split('\n') - for line in lines: - inf = line.split() - if fsid in line and (fsid == inf[0] or fsid == inf[1]): - return inf[3] + def _get_targets(self, evs_id, tgt_alias=None, refresh=False): + """Gets the target list of an EVS. - LOG.warning(_LW('get_evs: %(out)s -- No find for %(fsid)s'), - {'out': out, 'fsid': fsid}) - return 0 - - def _get_evsips(self, cmd, ip0, user, pw, evsid): - """Gets the EVS IPs for the named filesystem.""" - - out, err = self.run_cmd(cmd, ip0, user, pw, - 'evsipaddr', '-e', evsid, - check_exit_code=True) - - iplist = "" - lines = out.split('\n') - for line in lines: - inf = line.split() - if 'evs' in line: - iplist += inf[3] + ' ' - - LOG.debug('get_evsips: %s', iplist) - return iplist - - def _get_fsid(self, cmd, ip0, user, pw, fslabel): - """Gets the FSID for the named filesystem.""" - - out, err = self.run_cmd(cmd, ip0, user, pw, 'evsfs', 'list', - check_exit_code=True) - LOG.debug('get_fsid: out %s', out) - - lines = out.split('\n') - for line in lines: - inf = line.split() - if fslabel in line and fslabel == inf[1]: - LOG.debug('get_fsid: %s', line) - return inf[0] - - LOG.warning(_LW('get_fsid: %(out)s -- No info for %(fslabel)s'), - {'out': out, 'fslabel': fslabel}) - return 0 - - def _get_targets(self, cmd, ip0, user, pw, evsid, tgtalias=None): - """Get the target list of an EVS. - - Get the target list of an EVS. Optionally can return the target - list of a specific target. + Gets the target list of an EVS. Optionally can return the information + of a specific target. + :returns: Target list or Target info (EVS ID) or empty list """ + LOG.debug("Getting target list for evs %(evs)s, tgtalias: %(tgt)s.", + {'evs': evs_id, 'tgt': tgt_alias}) - LOG.debug("Getting target list for evs %s, tgtalias: %s.", - evsid, tgtalias) + if (refresh or + evs_id not in self.tgt_list.keys() or + tgt_alias is not None): + self.tgt_list[evs_id] = [] + out, err = self._run_cmd("console-context", "--evs", evs_id, + 'iscsi-target', 'list') - try: - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", evsid, 'iscsi-target', 'list', - check_exit_code=True) - except putils.ProcessExecutionError as e: - LOG.error(_LE('Error getting iSCSI target info ' - 'from EVS %(evs)s.'), {'evs': evsid}) - LOG.debug("_get_targets out: %(out)s, err: %(err)s.", - {'out': e.stdout, 'err': e.stderr}) - return [] + if 'No targets' in out: + LOG.debug("No targets found in EVS %(evsid)s.", + {'evsid': evs_id}) + return self.tgt_list[evs_id] - tgt_list = [] - if 'No targets' in out: - LOG.debug("No targets found in EVS %(evsid)s.", {'evsid': evsid}) - return tgt_list + tgt_raw_list = out.split('Alias')[1:] + for tgt_raw_info in tgt_raw_list: + tgt = {} + tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop() + tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop() + tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop() + tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop() + lus = [] + tgt_raw_info = tgt_raw_info.split('\n\n')[1] + tgt_raw_list = tgt_raw_info.split('\n')[2:] - tgt_raw_list = out.split('Alias')[1:] - for tgt_raw_info in tgt_raw_list: - tgt = {} - tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop() - tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop() - tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop() - tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop() - luns = [] - tgt_raw_info = tgt_raw_info.split('\n\n')[1] - tgt_raw_list = tgt_raw_info.split('\n')[2:] + for lu_raw_line in tgt_raw_list: + lu_raw_line = lu_raw_line.strip() + lu_raw_line = lu_raw_line.split(' ') + lu = {} + lu['id'] = lu_raw_line[0] + lu['name'] = lu_raw_line.pop() + lus.append(lu) - for lun_raw_line in tgt_raw_list: - lun_raw_line = lun_raw_line.strip() - lun_raw_line = lun_raw_line.split(' ') - lun = {} - lun['id'] = lun_raw_line[0] - lun['name'] = lun_raw_line.pop() - luns.append(lun) + tgt['lus'] = lus - tgt['luns'] = luns + if tgt_alias == tgt['alias']: + return tgt - if tgtalias == tgt['alias']: - return [tgt] + self.tgt_list[evs_id].append(tgt) - tgt_list.append(tgt) - - if tgtalias is not None: - # We tried to find 'tgtalias' but didn't find. Return an empty + if tgt_alias is not None: + # We tried to find 'tgtalias' but didn't find. Return a empty # list. LOG.debug("There's no target %(alias)s in EVS %(evsid)s.", - {'alias': tgtalias, 'evsid': evsid}) + {'alias': tgt_alias, 'evsid': evs_id}) return [] LOG.debug("Targets in EVS %(evs)s: %(tgtl)s.", - {'evs': evsid, 'tgtl': tgt_list}) - return tgt_list + {'evs': evs_id, 'tgtl': self.tgt_list[evs_id]}) - def _get_unused_lunid(self, cmd, ip0, user, pw, tgt_info): + return self.tgt_list[evs_id] - if len(tgt_info['luns']) == 0: + def _get_unused_luid(self, tgt_info): + """Gets a free logical unit id number to be used. + + :param tgt_info: dictionary with the target information + :returns: a free logical unit id number + """ + if len(tgt_info['lus']) == 0: return 0 - free_lun = 0 - for lun in tgt_info['luns']: - if int(lun['id']) == free_lun: - free_lun += 1 + free_lu = 0 + for lu in tgt_info['lus']: + if int(lu['id']) == free_lu: + free_lu += 1 - if int(lun['id']) > free_lun: - # Found a free LUN number + if int(lu['id']) > free_lu: + # Found a free LU number break - return free_lun + LOG.debug("Found the free LU ID: %(lu)s.", {'lu': free_lu}) - def get_nfs_info(self, cmd, ip0, user, pw): - """Gets information on each NFS export. + return free_lu - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :returns: formated string - """ - - out, err = self.run_cmd(cmd, ip0, user, pw, - 'for-each-evs', '-q', - 'nfs-export', 'list', - check_exit_code=True) - - lines = out.split('\n') - newout = "" - export = "" - path = "" - for line in lines: - inf = line.split() - if 'Export name' in line: - export = inf[2] - if 'Export path' in line: - path = inf[2] - if 'File system info' in line: - fs = "" - if 'File system label' in line: - fs = inf[3] - if 'Transfer setting' in line and fs != "": - fsid = self._get_fsid(cmd, ip0, user, pw, fs) - evsid = self.get_evs(cmd, ip0, user, pw, fsid) - ips = self._get_evsips(cmd, ip0, user, pw, evsid) - newout += "Export: %s Path: %s HDP: %s FSID: %s \ - EVS: %s IPS: %s\n" \ - % (export, path, fs, fsid, evsid, ips) - fs = "" - - LOG.debug('get_nfs_info: %(out)s -- %(err)s', - {'out': newout, 'err': err}) - return newout - - def create_lu(self, cmd, ip0, user, pw, hdp, size, name): + def create_lu(self, fs_label, size, lu_name): """Creates a new Logical Unit. If the operation can not be performed for some reason, utils.execute() throws an error and aborts the operation. Used for iSCSI only - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param hdp: data Pool the logical unit will be created - :param size: Size (Mb) of the new logical unit - :param name: name of the logical unit - :returns: formated string with 'LUN %d HDP: %d size: %s MB, is - successfully created' + :param fs_label: data pool the Logical Unit will be created + :param size: Size (GB) of the new Logical Unit + :param lu_name: name of the Logical Unit """ + evs_id = self.get_evs(fs_label) - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-lu', 'add', "-e", - name, hdp, - '/.cinder/' + name + '.iscsi', - size + 'M', - check_exit_code=True) + self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'add', + "-e", lu_name, fs_label, '/.cinder/' + lu_name + + '.iscsi', size + 'G') - out = "LUN %s HDP: %s size: %s MB, is successfully created" \ - % (name, hdp, size) + LOG.debug('Created %(size)s GB LU: %(name)s FS: %(fs)s.', + {'size': size, 'name': lu_name, 'fs': fs_label}) - LOG.debug('create_lu: %s.', out) - return out + def delete_lu(self, fs_label, lu_name): + """Deletes a Logical Unit. - def delete_lu(self, cmd, ip0, user, pw, hdp, lun): - """Delete an logical unit. Used for iSCSI only - - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param hdp: data Pool of the logical unit - :param lun: id of the logical unit being deleted - :returns: formated string 'Logical unit deleted successfully.' + :param fs_label: data pool of the Logical Unit + :param lu_name: id of the Logical Unit being deleted """ + evs_id = self.get_evs(fs_label) + self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'del', + '-d', '-f', lu_name) - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-lu', 'del', '-d', - '-f', lun, - check_exit_code=True) + LOG.debug('LU %(lu)s deleted.', {'lu': lu_name}) - LOG.debug('delete_lu: %(out)s -- %(err)s.', {'out': out, 'err': err}) - return out - - def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name): - """Clones a volume - - Clone primitive used to support all iSCSI snapshot/cloning functions. - Used for iSCSI only. - - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param hdp: data Pool of the logical unit - :param src_lun: id of the logical unit being deleted - :param size: size of the LU being cloned. Only for logging purposes - :returns: formated string - """ - - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-lu', 'clone', '-e', - src_lun, name, - '/.cinder/' + name + '.iscsi', - check_exit_code=True) - - out = "LUN %s HDP: %s size: %s MB, is successfully created" \ - % (name, hdp, size) - - LOG.debug('create_dup: %(out)s -- %(err)s.', {'out': out, 'err': err}) - return out - - def file_clone(self, cmd, ip0, user, pw, fslabel, src, name): - """Clones NFS files to a new one named 'name' + def file_clone(self, fs_label, src, name): + """Clones NFS files to a new one named 'name'. Clone primitive used to support all NFS snapshot/cloning functions. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param fslabel: file system label of the new file + :param fs_label: file system label of the new file :param src: source file :param name: target path of the new created file - :returns: formated string """ + fs_list = self._get_fs_list() + fs = fs_list.get(fs_label) + if not fs: + LOG.error(_LE("Can't find file %(file)s in FS %(label)s"), + {'file': src, 'label': fs_label}) + msg = _('FS label: %(fs_label)s') % {'fs_label': fs_label} + raise exception.InvalidParameterValue(err=msg) - _fsid = self._get_fsid(cmd, ip0, user, pw, fslabel) - _evsid = self.get_evs(cmd, ip0, user, pw, _fsid) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'file-clone-create', '-f', fslabel, - src, name, - check_exit_code=True) + self._run_cmd("console-context", "--evs", fs['evsid'], + 'file-clone-create', '-f', fs_label, src, name) + LOG.debug('file_clone: fs:%(fs_label)s %(src)s/src: -> %(name)s/dst', + {'fs_label': fs_label, 'src': src, 'name': name}) - out = "LUN %s HDP: %s Clone: %s -> %s" % (name, _fsid, src, name) + def extend_lu(self, fs_label, new_size, lu_name): + """Extends an iSCSI volume. - LOG.debug('file_clone: %(out)s -- %(err)s.', {'out': out, 'err': err}) - return out - - def extend_vol(self, cmd, ip0, user, pw, hdp, lun, new_size, name): - """Extend a iSCSI volume. - - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param hdp: data Pool of the logical unit - :param lun: id of the logical unit being extended - :param new_size: new size of the LU - :param name: formated string + :param fs_label: data pool of the Logical Unit + :param new_size: new size of the Logical Unit + :param lu_name: name of the Logical Unit """ + evs_id = self.get_evs(fs_label) + size = six.text_type(new_size) + self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'expand', + lu_name, size + 'G') - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-lu', 'expand', - name, new_size + 'M', - check_exit_code=True) - - out = ("LUN: %s successfully extended to %s MB" % (name, new_size)) - - LOG.debug('extend_vol: %s.', out) - return out + LOG.debug('LU %(lu)s extended.', {'lu': lu_name}) @utils.retry(putils.ProcessExecutionError, retries=HNAS_SSC_RETRIES, wait_random=True) - def add_iscsi_conn(self, cmd, ip0, user, pw, lun_name, hdp, - port, tgtalias, initiator): - """Setup the lun on on the specified target port + def add_iscsi_conn(self, lu_name, fs_label, port, tgt_alias, initiator): + """Sets up the Logical Unit on the specified target port. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param lun_name: id of the logical unit being extended - :param hdp: data pool of the logical unit + :param lu_name: id of the Logical Unit being extended + :param fs_label: data pool of the Logical Unit :param port: iSCSI port - :param tgtalias: iSCSI qualified name + :param tgt_alias: iSCSI qualified name :param initiator: initiator address + :returns: dictionary (conn_info) with the connection information + conn_info={ + 'lu': Logical Unit ID, + 'iqn': iSCSI qualified name, + 'lu_name': Logical Unit name, + 'initiator': iSCSI initiator, + 'fs_label': File system to connect, + 'port': Port to make the iSCSI connection + } """ + conn_info = {} + lu_info = self.check_lu(lu_name, fs_label) + _evs_id = self.get_evs(fs_label) - LOG.debug('Adding %(lun)s to %(tgt)s returns %(tgt)s.', - {'lun': lun_name, 'tgt': tgtalias}) - found, lunid, tgt = self.check_lu(cmd, ip0, user, pw, lun_name, hdp) - evsid = self.get_evs(cmd, ip0, user, pw, hdp) + if not lu_info['mapped']: + tgt = self._get_targets(_evs_id, tgt_alias) + lu_id = self._get_unused_luid(tgt) + conn_info['lu_id'] = lu_id + conn_info['iqn'] = tgt['iqn'] - if found: - conn = (int(lunid), lun_name, initiator, int(lunid), tgt['iqn'], - int(lunid), hdp, port) - out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s " - "@ index: %d, and Target: %s @ index %d is " - "successfully paired @ CTL: %s, Port: %s.") % conn + # In busy situations where 2 or more instances of the driver are + # trying to map an LU, 2 hosts can retrieve the same 'lu_id', + # and try to map the LU in the same LUN. To handle that we + # capture the ProcessExecutionError exception, backoff for some + # seconds and retry it. + self._run_cmd("console-context", "--evs", _evs_id, 'iscsi-target', + 'addlu', tgt_alias, lu_name, six.text_type(lu_id)) else: - tgt = self._get_targets(cmd, ip0, user, pw, evsid, tgtalias) - lunid = self._get_unused_lunid(cmd, ip0, user, pw, tgt[0]) + conn_info['lu_id'] = lu_info['id'] + conn_info['iqn'] = lu_info['tgt']['iqn'] - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", evsid, - 'iscsi-target', 'addlu', - tgtalias, lun_name, six.text_type(lunid), - check_exit_code=True) + conn_info['lu_name'] = lu_name + conn_info['initiator'] = initiator + conn_info['fs'] = fs_label + conn_info['port'] = port - conn = (int(lunid), lun_name, initiator, int(lunid), tgt[0]['iqn'], - int(lunid), hdp, port) - out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s " - "@ index: %d, and Target: %s @ index %d is " - "successfully paired @ CTL: %s, Port: %s.") % conn + LOG.debug('add_iscsi_conn: LU %(lu)s added to %(tgt)s.', + {'lu': lu_name, 'tgt': tgt_alias}) + LOG.debug('conn_info: %(conn_info)s', {'conn_info': conn_info}) - LOG.debug('add_iscsi_conn: returns %s.', out) - return out + return conn_info - def del_iscsi_conn(self, cmd, ip0, user, pw, evsid, iqn, hlun): - """Remove the lun on on the specified target port + def del_iscsi_conn(self, evs_id, iqn, lu_id): + """Removes the Logical Unit on the specified target port. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param evsid: EVSID for the file system + :param evs_id: EVSID for the file system :param iqn: iSCSI qualified name - :param hlun: logical unit id - :returns: formated string + :param lu_id: Logical Unit id """ + found = False + out, err = self._run_cmd("console-context", "--evs", evs_id, + 'iscsi-target', 'list', iqn) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", evsid, - 'iscsi-target', 'list', iqn, - check_exit_code=True) - + # see if LU is already detached lines = out.split('\n') - out = ("H-LUN: %d already deleted from target %s" % (int(hlun), iqn)) - # see if lun is already detached for line in lines: if line.startswith(' '): - lunline = line.split()[0] - if lunline[0].isdigit() and lunline == hlun: - out = "" + lu_line = line.split()[0] + if lu_line[0].isdigit() and lu_line == lu_id: + found = True break - if out != "": - # hlun wasn't found - LOG.info(_LI('del_iscsi_conn: hlun not found %s.'), out) - return out + # LU wasn't found + if not found: + LOG.debug("del_iscsi_conn: LU already deleted from " + "target %(iqn)s", {'lu': lu_id, 'iqn': iqn}) + return # remove the LU from the target - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", evsid, - 'iscsi-target', 'dellu', - '-f', iqn, hlun, - check_exit_code=True) + self._run_cmd("console-context", "--evs", evs_id, 'iscsi-target', + 'dellu', '-f', iqn, lu_id) - out = "H-LUN: %d successfully deleted from target %s" \ - % (int(hlun), iqn) + LOG.debug("del_iscsi_conn: LU: %(lu)s successfully deleted from " + "target %(iqn)s", {'lu': lu_id, 'iqn': iqn}) - LOG.debug('del_iscsi_conn: %s.', out) - return out - - def get_targetiqn(self, cmd, ip0, user, pw, targetalias, hdp, secret): - """Obtain the targets full iqn + def get_target_iqn(self, tgt_alias, fs_label): + """Obtains the target full iqn Returns the target's full iqn rather than its alias. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param targetalias: alias of the target - :param hdp: data pool of the logical unit - :param secret: CHAP secret of the target + + :param tgt_alias: alias of the target + :param fs_label: data pool of the Logical Unit :returns: string with full IQN """ - - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'list', targetalias, - check_exit_code=True) - - if "does not exist" in out: - if secret == "": - secret = '""' - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'add', - targetalias, secret, - check_exit_code=True) - else: - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'add', - targetalias, secret, - check_exit_code=True) - if "success" in out: - return targetalias + _evs_id = self.get_evs(fs_label) + out, err = self._run_cmd("console-context", "--evs", _evs_id, + 'iscsi-target', 'list', tgt_alias) lines = out.split('\n') # returns the first iqn for line in lines: - if 'Alias' in line: - fulliqn = line.split()[2] - return fulliqn + if 'Globally unique name' in line: + full_iqn = line.split()[3] + LOG.debug('get_target_iqn: %(iqn)s', {'iqn': full_iqn}) + return full_iqn + LOG.debug("Could not find iqn for alias %(alias)s on fs %(fs_label)s", + {'alias': tgt_alias, 'fs_label': fs_label}) - def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret): + def set_target_secret(self, targetalias, fs_label, secret): """Sets the chap secret for the specified target. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array :param targetalias: alias of the target - :param hdp: data pool of the logical unit + :param fs_label: data pool of the Logical Unit :param secret: CHAP secret of the target """ + _evs_id = self.get_evs(fs_label) + self._run_cmd("console-context", "--evs", _evs_id, 'iscsi-target', + 'mod', '-s', secret, '-a', 'enable', targetalias) - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'list', - targetalias, - check_exit_code=False) + LOG.debug("set_target_secret: Secret set on target %(tgt)s.", + {'tgt': targetalias}) - if "does not exist" in out: - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'add', - targetalias, secret, - check_exit_code=True) - else: - LOG.info(_LI('targetlist: %s'), targetalias) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'mod', - '-s', secret, '-a', 'enable', - targetalias, - check_exit_code=True) + def get_target_secret(self, targetalias, fs_label): + """Gets the chap secret for the specified target. - def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp): - """Returns the chap secret for the specified target. - - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array :param targetalias: alias of the target - :param hdp: data pool of the logical unit - :return secret: CHAP secret of the target + :param fs_label: data pool of the Logical Unit + :returns: CHAP secret of the target """ - - _evsid = self.get_evs(cmd, ip0, user, pw, hdp) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", - "--evs", _evsid, - 'iscsi-target', 'list', targetalias, - check_exit_code=True) + _evs_id = self.get_evs(fs_label) + out, err = self._run_cmd("console-context", "--evs", _evs_id, + 'iscsi-target', 'list', targetalias) enabled = "" secret = "" @@ -771,106 +539,336 @@ class HnasBackend(object): else: return "" - def check_target(self, cmd, ip0, user, pw, hdp, target_alias): - """Checks if a given target exists and gets its info + def check_target(self, fs_label, target_alias): + """Checks if a given target exists and gets its info. - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param hdp: pool name used + :param fs_label: pool name used :param target_alias: alias of the target - :returns: True if target exists - :returns: list with the target info + :returns: dictionary (tgt_info) + tgt_info={ + 'alias': The alias of the target, + 'found': boolean to inform if the target was found or not, + 'tgt': dictionary with the target information + } """ + tgt_info = {} + _evs_id = self.get_evs(fs_label) + _tgt_list = self._get_targets(_evs_id) - LOG.debug("Checking if target %(tgt)s exists.", {'tgt': target_alias}) - evsid = self.get_evs(cmd, ip0, user, pw, hdp) - tgt_list = self._get_targets(cmd, ip0, user, pw, evsid) - - for tgt in tgt_list: + for tgt in _tgt_list: if tgt['alias'] == target_alias: - attached_luns = len(tgt['luns']) - LOG.debug("Target %(tgt)s has %(lun)s volumes.", - {'tgt': target_alias, 'lun': attached_luns}) - return True, tgt + attached_lus = len(tgt['lus']) + tgt_info['found'] = True + tgt_info['tgt'] = tgt + LOG.debug("Target %(tgt)s has %(lu)s volumes.", + {'tgt': target_alias, 'lu': attached_lus}) + return tgt_info - LOG.debug("Target %(tgt)s does not exist.", {'tgt': target_alias}) - return False, None + tgt_info['found'] = False + tgt_info['tgt'] = None - def check_lu(self, cmd, ip0, user, pw, volume_name, hdp): - """Checks if a given LUN is already mapped + LOG.debug("check_target: Target %(tgt)s does not exist.", + {'tgt': target_alias}) - :param cmd: ssc command name - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param volume_name: number of the LUN - :param hdp: storage pool of the LUN - :returns: True if the lun is attached - :returns: the LUN id - :returns: Info related to the target + return tgt_info + + def check_lu(self, vol_name, fs_label): + """Checks if a given LU is already mapped + + :param vol_name: name of the LU + :param fs_label: storage pool of the LU + :returns: dictionary (lu_info) with LU information + lu_info={ + 'mapped': LU state (mapped or not), + 'id': ID of the LU, + 'tgt': the iSCSI target alias + } """ - - LOG.debug("Checking if vol %s (hdp: %s) is attached.", - volume_name, hdp) - evsid = self.get_evs(cmd, ip0, user, pw, hdp) - tgt_list = self._get_targets(cmd, ip0, user, pw, evsid) + lu_info = {} + evs_id = self.get_evs(fs_label) + tgt_list = self._get_targets(evs_id, refresh=True) for tgt in tgt_list: - if len(tgt['luns']) == 0: + if len(tgt['lus']) == 0: continue - for lun in tgt['luns']: - lunid = lun['id'] - lunname = lun['name'] - if lunname[:29] == volume_name[:29]: - LOG.debug("LUN %(lun)s attached on %(lunid)s, " + for lu in tgt['lus']: + lu_id = lu['id'] + lu_name = lu['name'] + if lu_name[:29] == vol_name[:29]: + lu_info['mapped'] = True + lu_info['id'] = lu_id + lu_info['tgt'] = tgt + LOG.debug("LU %(lu)s attached on %(luid)s, " "target: %(tgt)s.", - {'lun': volume_name, 'lunid': lunid, 'tgt': tgt}) - return True, lunid, tgt + {'lu': vol_name, 'luid': lu_id, 'tgt': tgt}) + return lu_info - LOG.debug("LUN %(lun)s not attached.", {'lun': volume_name}) - return False, 0, None + lu_info['mapped'] = False + lu_info['id'] = 0 + lu_info['tgt'] = None - def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun): - """Returns the information for the specified Logical Unit. + LOG.debug("LU %(lu)s not attached. lu_info: %(lu_info)s", + {'lu': vol_name, 'lu_info': lu_info}) + + return lu_info + + def get_existing_lu_info(self, lu_name, fs_label=None, evs_id=None): + """Gets the information for the specified Logical Unit. Returns the information of an existing Logical Unit on HNAS, according to the name provided. - :param cmd: the command that will be run on SMU - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param fslabel: label of the file system - :param lun: label of the logical unit + :param lu_name: label of the Logical Unit + :param fs_label: label of the file system + :param evs_id: ID of the EVS where the LU is located + :returns: dictionary (lu_info) with LU information + lu_info={ + 'name': A Logical Unit name, + 'comment': A comment about the LU, not used for Cinder, + 'path': Path to LU inside filesystem, + 'size': Logical Unit size returned always in GB (volume size), + 'filesystem': File system where the Logical Unit was created, + 'fs_mounted': Information about the state of file system + (mounted or not), + 'lu_mounted': Information about the state of Logical Unit + (mounted or not) + } """ + lu_info = {} + if evs_id is None: + evs_id = self.get_evs(fs_label) - evs = self.get_evs(cmd, ip0, user, pw, fslabel) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", - evs, 'iscsi-lu', 'list', lun) + lu_name = "'{}'".format(lu_name) + out, err = self._run_cmd("console-context", "--evs", evs_id, + 'iscsi-lu', 'list', lu_name) - return out + if 'does not exist.' not in out: + aux = out.split('\n') + lu_info['name'] = aux[0].split(':')[1].strip() + lu_info['comment'] = aux[1].split(':')[1].strip() + lu_info['path'] = aux[2].split(':')[1].strip() + lu_info['size'] = aux[3].split(':')[1].strip() + lu_info['filesystem'] = aux[4].split(':')[1].strip() + lu_info['fs_mounted'] = aux[5].split(':')[1].strip() + lu_info['lu_mounted'] = aux[6].split(':')[1].strip() - def rename_existing_lu(self, cmd, ip0, user, pw, fslabel, - new_name, vol_name): + if 'TB' in lu_info['size']: + sz_convert = float(lu_info['size'].split()[0]) * units.Ki + lu_info['size'] = sz_convert + else: + lu_info['size'] = float(lu_info['size'].split()[0]) + + LOG.debug('get_existing_lu_info: LU info: %(lu)s', {'lu': lu_info}) + + return lu_info + + def rename_existing_lu(self, fs_label, vol_name, new_name): """Renames the specified Logical Unit. Renames an existing Logical Unit on HNAS according to the new name provided. - :param cmd: command that will be run on SMU - :param ip0: string IP address of controller - :param user: string user authentication for array - :param pw: string password authentication for array - :param fslabel: label of the file system - :param new_name: new name to the existing volume + :param fs_label: label of the file system :param vol_name: current name of the existing volume + :param new_name: new name to the existing volume """ - evs = self.get_evs(cmd, ip0, user, pw, fslabel) - out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", - evs, "iscsi-lu", "mod", "-n", new_name, - vol_name) - return out + new_name = "'{}'".format(new_name) + evs_id = self.get_evs(fs_label) + self._run_cmd("console-context", "--evs", evs_id, "iscsi-lu", "mod", + "-n", new_name, vol_name) + + LOG.debug('rename_existing_lu_info:' + 'LU %(old)s was renamed to %(new)s', + {'old': vol_name, 'new': new_name}) + + def _get_fs_list(self): + """Gets a list of file systems configured on the backend. + + :returns: a list with the Filesystems configured on HNAS + """ + if not self.fslist: + fslist_out, err = self._run_cmd('evsfs', 'list') + list_raw = fslist_out.split('\n')[3:-2] + + for fs_raw in list_raw: + fs = {} + + fs_raw = fs_raw.split() + fs['id'] = fs_raw[0] + fs['label'] = fs_raw[1] + fs['permid'] = fs_raw[2] + fs['evsid'] = fs_raw[3] + fs['evslabel'] = fs_raw[4] + self.fslist[fs['label']] = fs + + return self.fslist + + def _get_evs_list(self): + """Gets a list of EVS configured on the backend. + + :returns: a list of the EVS configured on HNAS + """ + evslist_out, err = self._run_cmd('evs', 'list') + + evslist = {} + idx = 0 + for evs_raw in evslist_out.split('\n'): + idx += 1 + if 'Service' in evs_raw and 'Online' in evs_raw: + evs = {} + evs_line = evs_raw.split() + evs['node'] = evs_line[0] + evs['id'] = evs_line[1] + evs['label'] = evs_line[3] + evs['ips'] = [] + evs['ips'].append(evs_line[6]) + # Each EVS can have a list of IPs that are displayed in the + # next lines of the evslist_out. We need to check if the next + # lines is a new EVS entry or and IP of this current EVS. + for evs_ip_raw in evslist_out.split('\n')[idx:]: + if 'Service' in evs_ip_raw or not evs_ip_raw.split(): + break + ip = evs_ip_raw.split()[0] + evs['ips'].append(ip) + + evslist[evs['label']] = evs + + return evslist + + def get_export_list(self): + """Gets information on each NFS export. + + :returns: a list of the exports configured on HNAS + """ + nfs_export_out, _ = self._run_cmd('for-each-evs', '-q', 'nfs-export', + 'list') + fs_list = self._get_fs_list() + evs_list = self._get_evs_list() + + export_list = [] + + for export_raw_data in nfs_export_out.split("Export name:")[1:]: + export_info = {} + export_data = export_raw_data.split('\n') + + export_info['name'] = export_data[0].strip() + export_info['path'] = export_data[1].split(':')[1].strip() + export_info['fs'] = export_data[2].split(':')[1].strip() + + if "*** not available ***" in export_raw_data: + export_info['size'] = -1 + export_info['free'] = -1 + else: + evslbl = fs_list[export_info['fs']]['evslabel'] + export_info['evs'] = evs_list[evslbl]['ips'] + + size = export_data[3].split(':')[1].strip().split()[0] + multiplier = export_data[3].split(':')[1].strip().split()[1] + if multiplier == 'TB': + export_info['size'] = float(size) * units.Ki + else: + export_info['size'] = float(size) + + free = export_data[4].split(':')[1].strip().split()[0] + fmultiplier = export_data[4].split(':')[1].strip().split()[1] + if fmultiplier == 'TB': + export_info['free'] = float(free) * units.Ki + else: + export_info['free'] = float(free) + + export_list.append(export_info) + + LOG.debug("get_export_list: %(exp_list)s", {'exp_list': export_list}) + return export_list + + def create_cloned_lu(self, src_lu, fs_label, clone_name): + """Clones a Logical Unit + + Clone primitive used to support all iSCSI snapshot/cloning functions. + + :param src_lu: id of the Logical Unit being deleted + :param fs_label: data pool of the Logical Unit + :param clone_name: name of the snapshot + """ + evs_id = self.get_evs(fs_label) + self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'clone', + '-e', src_lu, clone_name, + '/.cinder/' + clone_name + '.iscsi') + + LOG.debug('LU %(lu)s cloned.', {'lu': clone_name}) + + def create_target(self, tgt_alias, fs_label, secret): + """Creates a new iSCSI target + + :param tgt_alias: the alias with which the target will be created + :param fs_label: the label of the file system to create the target + :param secret: the secret for authentication of the target + """ + _evs_id = self.get_evs(fs_label) + self._run_cmd("console-context", "--evs", _evs_id, + 'iscsi-target', 'add', tgt_alias, secret) + + self._get_targets(_evs_id, refresh=True) + LOG.debug("create_target: alias: %(alias)s fs_label: %(fs_label)s", + {'alias': tgt_alias, 'fs_label': fs_label}) + + def _get_file_handler(self, volume_path, _evs_id, fs_label): + out, err = self._run_cmd("console-context", "--evs", _evs_id, + 'file-clone-stat', '-f', fs_label, + volume_path) + + if "File is not a clone" in out: + msg = (_("%s is not a clone!"), volume_path) + raise exception.ManageExistingInvalidReference( + existing_ref=volume_path, reason=msg) + + lines = out.split('\n') + filehandle_list = [] + + for line in lines: + if "SnapshotFile:" in line and "FileHandle" in line: + item = line.split(':') + handler = item[1][:-1].replace(' FileHandle[', "") + filehandle_list.append(handler) + LOG.debug("Volume handler found: %(fh)s. Adding to list...", + {'fh': handler}) + + return filehandle_list + + def check_snapshot_parent(self, volume_path, snap_name, fs_label): + _evs_id = self.get_evs(fs_label) + + file_handler_list = self._get_file_handler(volume_path, _evs_id, + fs_label) + + for file_handler in file_handler_list: + out, err = self._run_cmd("console-context", "--evs", _evs_id, + 'file-clone-stat-snapshot-file', + '-f', fs_label, file_handler) + + lines = out.split('\n') + + for line in lines: + if snap_name in line: + LOG.debug("Snapshot %(snap)s found in children list from " + "%(vol)s!", {'snap': snap_name, + 'vol': volume_path}) + return True + + LOG.debug("Snapshot %(snap)s was not found in children list from " + "%(vol)s, probably it is not the parent!", + {'snap': snap_name, 'vol': volume_path}) + return False + + def get_export_path(self, export, fs_label): + evs_id = self.get_evs(fs_label) + out, err = self._run_cmd("console-context", "--evs", evs_id, + 'nfs-export', 'list', export) + + lines = out.split('\n') + + for line in lines: + if 'Export path:' in line: + return line.split('Export path:')[1].strip() diff --git a/cinder/volume/drivers/hitachi/hnas_iscsi.py b/cinder/volume/drivers/hitachi/hnas_iscsi.py index 36e41fe43..b95857505 100644 --- a/cinder/volume/drivers/hitachi/hnas_iscsi.py +++ b/cinder/volume/drivers/hitachi/hnas_iscsi.py @@ -17,313 +17,193 @@ """ iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform. """ -import os -import re -import six -from xml.etree import ElementTree as ETree from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging -from oslo_utils import units - +import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _, _LE, _LI from cinder import interface + from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.hitachi import hnas_backend +from cinder.volume.drivers.hitachi import hnas_utils from cinder.volume import utils -from cinder.volume import volume_types -HDS_HNAS_ISCSI_VERSION = '4.3.0' + +HNAS_ISCSI_VERSION = '5.0.0' LOG = logging.getLogger(__name__) iSCSI_OPTS = [ cfg.StrOpt('hds_hnas_iscsi_config_file', default='/opt/hds/hnas/cinder_iscsi_conf.xml', - help='Configuration file for HDS iSCSI cinder plugin')] + help='Legacy configuration file for HNAS iSCSI Cinder ' + 'plugin. This is not needed if you fill all ' + 'configuration on cinder.conf', + deprecated_for_removal=True), + cfg.BoolOpt('hnas_chap_enabled', + default=True, + help='Whether the chap authentication is enabled in the ' + 'iSCSI target or not.'), + cfg.IPOpt('hnas_svc0_iscsi_ip', + help='Service 0 iSCSI IP'), + cfg.IPOpt('hnas_svc1_iscsi_ip', + help='Service 1 iSCSI IP'), + cfg.IPOpt('hnas_svc2_iscsi_ip', + help='Service 2 iSCSI IP'), + cfg.IPOpt('hnas_svc3_iscsi_ip', + help='Service 3 iSCSI IP') +] + CONF = cfg.CONF CONF.register_opts(iSCSI_OPTS) -HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', - 'chap_enabled': 'True', - 'ssh_port': '22'} +HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', + 'chap_enabled': True, + 'ssh_port': 22} MAX_HNAS_ISCSI_TARGETS = 32 - - -def factory_bend(drv_configs): - return hnas_backend.HnasBackend(drv_configs) - - -def _loc_info(loc): - """Parse info from location string.""" - - LOG.info(_LI("Parse_loc: %s"), loc) - info = {} - tup = loc.split(',') - if len(tup) < 5: - info['id_lu'] = tup[0].split('.') - return info - info['id_lu'] = tup[2].split('.') - info['tgt'] = tup - return info - - -def _xml_read(root, element, check=None): - """Read an xml element.""" - - val = root.findtext(element) - - # mandatory parameter not found - if val is None and check: - raise exception.ParameterNotFound(param=element) - - # tag not found - if val is None: - return None - - svc_tag_pattern = re.compile("svc_[0-3]$") - # tag found but empty parameter. - if not val.strip(): - # Service tags are empty - if svc_tag_pattern.search(element): - return "" - else: - raise exception.ParameterNotFound(param=element) - - LOG.debug(_LI("%(element)s: %(val)s"), - {'element': element, - 'val': val if element != 'password' else '***'}) - - return val.strip() - - -def _read_config(xml_config_file): - """Read hds driver specific xml config file.""" - - if not os.access(xml_config_file, os.R_OK): - msg = (_("Can't open config file: %s") % xml_config_file) - raise exception.NotFound(message=msg) - - try: - root = ETree.parse(xml_config_file).getroot() - except Exception: - msg = (_("Error parsing config file: %s") % xml_config_file) - raise exception.ConfigNotFound(message=msg) - - # mandatory parameters - config = {} - arg_prereqs = ['mgmt_ip0', 'username'] - for req in arg_prereqs: - config[req] = _xml_read(root, req, True) - - # optional parameters - opt_parameters = ['hnas_cmd', 'ssh_enabled', 'chap_enabled', - 'cluster_admin_ip0'] - for req in opt_parameters: - config[req] = _xml_read(root, req) - - if config['chap_enabled'] is None: - config['chap_enabled'] = HNAS_DEFAULT_CONFIG['chap_enabled'] - - if config['ssh_enabled'] == 'True': - config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True) - config['ssh_port'] = _xml_read(root, 'ssh_port') - config['password'] = _xml_read(root, 'password') - if config['ssh_port'] is None: - config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] - else: - # password is mandatory when not using SSH - config['password'] = _xml_read(root, 'password', True) - - if config['hnas_cmd'] is None: - config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd'] - - config['hdp'] = {} - config['services'] = {} - - # min one needed - for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: - if _xml_read(root, svc) is None: - continue - service = {'label': svc} - - # none optional - for arg in ['volume_type', 'hdp', 'iscsi_ip']: - service[arg] = _xml_read(root, svc + '/' + arg, True) - config['services'][service['volume_type']] = service - config['hdp'][service['hdp']] = service['hdp'] - - # at least one service required! - if config['services'].keys() is None: - raise exception.ParameterNotFound(param="No service found") - - return config +MAX_HNAS_LUS_PER_TARGET = 32 @interface.volumedriver -class HDSISCSIDriver(driver.ISCSIDriver): - """HDS HNAS volume driver. +class HNASISCSIDriver(driver.ISCSIDriver): + """HNAS iSCSI volume driver. Version history: - .. code-block:: none + code-block:: none - 1.0.0: Initial driver version - 2.2.0: Added support to SSH authentication - 3.2.0: Added pool aware scheduling - Fixed concurrency errors - 3.3.0: Fixed iSCSI target limitation error - 4.0.0: Added manage/unmanage features - 4.1.0: Fixed XML parser checks on blank options - 4.2.0: Fixed SSH and cluster_admin_ip0 verification - 4.3.0: Fixed attachment with os-brick 1.0.0 + Version 1.0.0: Initial driver version + Version 2.2.0: Added support to SSH authentication + Version 3.2.0: Added pool aware scheduling + Fixed concurrency errors + Version 3.3.0: Fixed iSCSI target limitation error + Version 4.0.0: Added manage/unmanage features + Version 4.1.0: Fixed XML parser checks on blank options + Version 4.2.0: Fixed SSH and cluster_admin_ip0 verification + Version 4.3.0: Fixed attachment with os-brick 1.0.0 + Version 5.0.0: Code cleaning up + New communication interface between the driver and HNAS + Removed the option to use local SSC (ssh_enabled=False) + Updated to use versioned objects + Changed the class name to HNASISCSIDriver + Deprecated XML config file +""" - """ + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Hitachi_HNAS_CI" + VERSION = HNAS_ISCSI_VERSION def __init__(self, *args, **kwargs): - """Initialize, read different config parameters.""" - - super(HDSISCSIDriver, self).__init__(*args, **kwargs) - self.driver_stats = {} + """Initializes and reads different config parameters.""" + self.configuration = kwargs.get('configuration', None) self.context = {} - self.configuration.append_config_values(iSCSI_OPTS) - self.config = _read_config( - self.configuration.hds_hnas_iscsi_config_file) - self.type = 'HNAS' + self.config = {} - self.platform = self.type.lower() - LOG.info(_LI("Backend type: %s"), self.type) - self.bend = factory_bend(self.config) + service_parameters = ['volume_type', 'hdp', 'iscsi_ip'] + optional_parameters = ['ssc_cmd', 'cluster_admin_ip0', + 'chap_enabled'] - def _array_info_get(self): - """Get array parameters.""" + if self.configuration: + self.configuration.append_config_values( + hnas_utils.drivers_common_opts) + self.configuration.append_config_values(iSCSI_OPTS) - out = self.bend.get_version(self.config['hnas_cmd'], - HDS_HNAS_ISCSI_VERSION, - self.config['mgmt_ip0'], - self.config['username'], - self.config['password']) - inf = out.split() + # Trying to get HNAS configuration from cinder.conf + self.config = hnas_utils.read_cinder_conf( + self.configuration, 'iscsi') - return inf[1], 'hnas_' + inf[1], inf[6] + # If HNAS configuration are not set on cinder.conf, tries to use + # the deprecated XML configuration file + if not self.config: + self.config = hnas_utils.read_xml_config( + self.configuration.hds_hnas_iscsi_config_file, + service_parameters, + optional_parameters) - def _get_iscsi_info(self): - """Validate array iscsi parameters.""" - - out = self.bend.get_iscsi_info(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password']) - lines = out.split('\n') - - # dict based on iSCSI portal ip addresses - conf = {} - for line in lines: - # only record up links - if 'CTL' in line and 'Up' in line: - inf = line.split() - (ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7]) - conf[ip] = {} - conf[ip]['ctl'] = ctl - conf[ip]['port'] = port - conf[ip]['iscsi_port'] = ipp - LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s", - {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port}) - - return conf + super(HNASISCSIDriver, self).__init__(*args, **kwargs) + self.backend = hnas_backend.HNASSSHBackend(self.config) def _get_service(self, volume): - """Get the available service parameters + """Gets the available service parameters. - Get the available service parametersfor a given volume using its - type. - :param volume: dictionary volume reference - :returns: HDP related to the service + Get the available service parameters for a given volume using its + type. + + :param volume: dictionary volume reference + :returns: HDP (file system) related to the service or error if no + configuration is found. + :raises: ParameterNotFound """ - - label = utils.extract_host(volume['host'], level='pool') - LOG.info(_LI("Using service label: %s"), label) + LOG.debug("Available services: %(svc)s.", + {'svc': self.config['services'].keys()}) + label = utils.extract_host(volume.host, level='pool') if label in self.config['services'].keys(): svc = self.config['services'][label] + LOG.info(_LI("Using service label: %(lbl)s."), {'lbl': label}) return svc['hdp'] else: - LOG.info(_LI("Available services: %s."), - self.config['services'].keys()) - LOG.error(_LE("No configuration found for service: %s."), label) + LOG.error(_LE("No configuration found for service: %(lbl)s."), + {'lbl': label}) raise exception.ParameterNotFound(param=label) def _get_service_target(self, volume): - """Get the available service parameters + """Gets the available service parameters - Get the available service parameters for a given volume using - its type. - :param volume: dictionary volume reference + Gets the available service parameters for a given volume using its + type. + :param volume: dictionary volume reference + :returns: service target information or raises error + :raises: NoMoreTargets """ + fs_label = self._get_service(volume) + evs_id = self.backend.get_evs(fs_label) - hdp = self._get_service(volume) - info = _loc_info(volume['provider_location']) - (arid, lun_name) = info['id_lu'] - - evsid = self.bend.get_evs(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - hdp) - svc_label = utils.extract_host(volume['host'], level='pool') + svc_label = utils.extract_host(volume.host, level='pool') svc = self.config['services'][svc_label] - LOG.info(_LI("_get_service_target hdp: %s."), hdp) - LOG.info(_LI("config[services]: %s."), self.config['services']) + lu_info = self.backend.check_lu(volume.name, fs_label) - mapped, lunid, tgt = self.bend.check_lu(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - lun_name, hdp) - - LOG.info(_LI("Target is %(map)s! Targetlist = %(tgtl)s."), - {'map': "mapped" if mapped else "not mapped", 'tgtl': tgt}) - - # The volume is already mapped to a LUN, so no need to create any + # The volume is already mapped to a LU, so no need to create any # targets - if mapped: - service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], - svc['port'], hdp, tgt['alias'], tgt['secret']) + if lu_info['mapped']: + service = ( + svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'], + fs_label, lu_info['tgt']['alias'], lu_info['tgt']['secret']) + LOG.info(_LI("Volume %(vol_name)s already mapped on target " + "%(tgt)s to LUN %(lunid)s."), + {'vol_name': volume.name, 'tgt': lu_info['tgt']['alias'], + 'lunid': lu_info['id']}) return service # Each EVS can have up to 32 targets. Each target can have up to 32 - # LUNs attached and have the name format 'evs-tgt<0-N>'. We run + # LUs attached and have the name format 'evs-tgt<0-N>'. We run # from the first 'evs1-tgt0' until we find a target that is not already - # created in the BE or is created but have slots to place new targets. - found_tgt = False + # created in the BE or is created but have slots to place new LUs. + tgt_alias = '' for i in range(0, MAX_HNAS_ISCSI_TARGETS): - tgt_alias = 'evs' + evsid + '-tgt' + six.text_type(i) - # TODO(erlon): we need to go to the BE 32 times here - tgt_exist, tgt = self.bend.check_target(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - hdp, tgt_alias) - if tgt_exist and len(tgt['luns']) < 32 or not tgt_exist: + tgt_alias = 'evs' + evs_id + '-tgt' + six.text_type(i) + tgt = self.backend.check_target(fs_label, tgt_alias) + + if (tgt['found'] and + len(tgt['tgt']['lus']) < MAX_HNAS_LUS_PER_TARGET or + not tgt['found']): # Target exists and has free space or, target does not exist # yet. Proceed and use the target or create a target using this # name. - found_tgt = True break - - # If we've got here and found_tgt is not True, we run out of targets, - # raise and go away. - if not found_tgt: + else: + # If we've got here, we run out of targets, raise and go away. LOG.error(_LE("No more targets available.")) raise exception.NoMoreTargets(param=tgt_alias) - LOG.info(_LI("Using target label: %s."), tgt_alias) + LOG.info(_LI("Using target label: %(tgt)s."), {'tgt': tgt_alias}) # Check if we have a secret stored for this target so we don't have to # go to BE on every query @@ -339,527 +219,102 @@ class HDSISCSIDriver(driver.ISCSIDriver): # see if the client supports CHAP authentication and if # iscsi_secret has already been set, retrieve the secret if # available, otherwise generate and store - if self.config['chap_enabled'] == 'True': - # It may not exist, create and set secret. + if self.config['chap_enabled']: + # CHAP support is enabled. Tries to get the target secret. if 'iscsi_secret' not in tgt_info.keys(): - LOG.info(_LI("Retrieving secret for service: %s."), - tgt_alias) - - out = self.bend.get_targetsecret(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - tgt_alias, hdp) + LOG.info(_LI("Retrieving secret for service: %(tgt)s."), + {'tgt': tgt_alias}) + out = self.backend.get_target_secret(tgt_alias, fs_label) tgt_info['iscsi_secret'] = out - if tgt_info['iscsi_secret'] == "": - randon_secret = utils.generate_password()[0:15] - tgt_info['iscsi_secret'] = randon_secret - self.bend.set_targetsecret(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - tgt_alias, hdp, - tgt_info['iscsi_secret']) - LOG.info(_LI("Set tgt CHAP secret for service: %s."), - tgt_alias) + # CHAP supported and the target has no secret yet. So, the + # secret is created for the target + if tgt_info['iscsi_secret'] == "": + random_secret = utils.generate_password()[0:15] + tgt_info['iscsi_secret'] = random_secret + + LOG.info(_LI("Set tgt CHAP secret for service: %(tgt)s."), + {'tgt': tgt_alias}) else: # We set blank password when the client does not # support CHAP. Later on, if the client tries to create a new - # target that does not exists in the backend, we check for this + # target that does not exist in the backend, we check for this # value and use a temporary dummy password. if 'iscsi_secret' not in tgt_info.keys(): # Warns in the first time LOG.info(_LI("CHAP authentication disabled.")) - tgt_info['iscsi_secret'] = "" + tgt_info['iscsi_secret'] = "''" + + # If the target does not exist, it should be created + if not tgt['found']: + self.backend.create_target(tgt_alias, fs_label, + tgt_info['iscsi_secret']) + elif (tgt['tgt']['secret'] == "" and + self.config['chap_enabled']): + # The target exists, has no secret and chap is enabled + self.backend.set_target_secret(tgt_alias, fs_label, + tgt_info['iscsi_secret']) if 'tgt_iqn' not in tgt_info: - LOG.info(_LI("Retrieving target for service: %s."), tgt_alias) + LOG.info(_LI("Retrieving IQN for service: %(tgt)s."), + {'tgt': tgt_alias}) - out = self.bend.get_targetiqn(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - tgt_alias, hdp, - tgt_info['iscsi_secret']) + out = self.backend.get_target_iqn(tgt_alias, fs_label) tgt_info['tgt_iqn'] = out self.config['targets'][tgt_alias] = tgt_info - service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], - svc['port'], hdp, tgt_alias, tgt_info['iscsi_secret']) + service = (svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'], + fs_label, tgt_alias, tgt_info['iscsi_secret']) return service def _get_stats(self): - """Get HDP stats from HNAS.""" + """Get FS stats from HNAS. + :returns: dictionary with the stats from HNAS + """ hnas_stat = {} be_name = self.configuration.safe_get('volume_backend_name') - hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver' - hnas_stat["vendor_name"] = 'HDS' - hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION + hnas_stat["volume_backend_name"] = be_name or 'HNASISCSIDriver' + hnas_stat["vendor_name"] = 'Hitachi' + hnas_stat["driver_version"] = HNAS_ISCSI_VERSION hnas_stat["storage_protocol"] = 'iSCSI' hnas_stat['reserved_percentage'] = 0 for pool in self.pools: - out = self.bend.get_hdp_info(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - pool['hdp']) + fs_info = self.backend.get_fs_info(pool['fs']) - LOG.debug('Query for pool %(pool)s: %(out)s.', - {'pool': pool['pool_name'], 'out': out}) - - (hdp, size, _ign, used) = out.split()[1:5] # in MB - pool['total_capacity_gb'] = int(size) / units.Ki - pool['free_capacity_gb'] = (int(size) - int(used)) / units.Ki - pool['allocated_capacity_gb'] = int(used) / units.Ki + pool['total_capacity_gb'] = (float(fs_info['total_size'])) + pool['free_capacity_gb'] = ( + float(fs_info['total_size']) - float(fs_info['used_size'])) + pool['allocated_capacity_gb'] = (float(fs_info['total_size'])) pool['QoS_support'] = 'False' pool['reserved_percentage'] = 0 hnas_stat['pools'] = self.pools - LOG.info(_LI("stats: stats: %s."), hnas_stat) + LOG.debug("stats: %(stat)s.", {'stat': hnas_stat}) return hnas_stat - def _get_hdp_list(self): - """Get HDPs from HNAS.""" + def _check_fs_list(self): + """Verifies the FSs list in HNAS. - out = self.bend.get_hdp_info(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password']) - - hdp_list = [] - for line in out.split('\n'): - if 'HDP' in line: - inf = line.split() - if int(inf[1]) >= units.Ki: - # HDP fsids start at units.Ki (1024) - hdp_list.append(inf[11]) - else: - # HDP pools are 2-digits max - hdp_list.extend(inf[1:2]) - - # returns a list of HDP IDs - LOG.info(_LI("HDP list: %s"), hdp_list) - return hdp_list - - def _check_hdp_list(self): - """Verify HDPs in HNAS array. - - Verify that all HDPs specified in the configuration files actually + Verify that all FSs specified in the configuration files actually exists on the storage. """ - - hdpl = self._get_hdp_list() - lst = self.config['hdp'].keys() - - for hdp in lst: - if hdp not in hdpl: - LOG.error(_LE("HDP not found: %s"), hdp) - err = "HDP not found: " + hdp - raise exception.ParameterNotFound(param=err) - # status, verify corresponding status is Normal - - def _id_to_vol(self, volume_id): - """Given the volume id, retrieve the volume object from database. - - :param volume_id: volume id string - """ - - vol = self.db.volume_get(self.context, volume_id) - - return vol - - def _update_vol_location(self, volume_id, loc): - """Update the provider location. - - :param volume_id: volume id string - :param loc: string provider location value - """ - - update = {'provider_location': loc} - self.db.volume_update(self.context, volume_id, update) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - - pass - - def do_setup(self, context): - """Setup and verify HDS HNAS storage connection.""" - - self.context = context - (self.arid, self.hnas_name, self.lumax) = self._array_info_get() - self._check_hdp_list() - - service_list = self.config['services'].keys() - for svc in service_list: - svc = self.config['services'][svc] - pool = {} - pool['pool_name'] = svc['volume_type'] - pool['service_label'] = svc['volume_type'] - pool['hdp'] = svc['hdp'] - - self.pools.append(pool) - - LOG.info(_LI("Configured pools: %s"), self.pools) - - iscsi_info = self._get_iscsi_info() - LOG.info(_LI("do_setup: %s"), iscsi_info) - for svc in self.config['services'].keys(): - svc_ip = self.config['services'][svc]['iscsi_ip'] - if svc_ip in iscsi_info.keys(): - LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip) - self.config['services'][svc]['port'] = \ - iscsi_info[svc_ip]['port'] - self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl'] - self.config['services'][svc]['iscsi_port'] = \ - iscsi_info[svc_ip]['iscsi_port'] - else: # config iscsi address not found on device! - LOG.error(_LE("iSCSI portal not found " - "for service: %s"), svc_ip) - raise exception.ParameterNotFound(param=svc_ip) - - def ensure_export(self, context, volume): - pass - - def create_export(self, context, volume, connector): - """Create an export. Moved to initialize_connection. - - :param context: - :param volume: volume reference - """ - - name = volume['name'] - LOG.debug("create_export %s", name) - - pass - - def remove_export(self, context, volume): - """Disconnect a volume from an attached instance. - - :param context: context - :param volume: dictionary volume reference - """ - - provider = volume['provider_location'] - name = volume['name'] - LOG.debug("remove_export provider %(provider)s on %(name)s", - {'provider': provider, 'name': name}) - - pass - - def create_volume(self, volume): - """Create a LU on HNAS. - - :param volume: dictionary volume reference - """ - - hdp = self._get_service(volume) - out = self.bend.create_lu(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - hdp, - '%s' % (int(volume['size']) * units.Ki), - volume['name']) - - LOG.info(_LI("create_volume: create_lu returns %s"), out) - - lun = self.arid + '.' + out.split()[1] - sz = int(out.split()[5]) - - # Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd - LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created."), - {'lun': lun, 'sz': sz}) - return {'provider_location': lun} - - def create_cloned_volume(self, dst, src): - """Create a clone of a volume. - - :param dst: ditctionary destination volume reference - :param src: ditctionary source volume reference - """ - - if src['size'] > dst['size']: - msg = 'Clone volume size must not be smaller than source volume' - raise exception.VolumeBackendAPIException(data=msg) - - hdp = self._get_service(dst) - size = int(src['size']) * units.Ki - source_vol = self._id_to_vol(src['id']) - (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] - out = self.bend.create_dup(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - slun, hdp, '%s' % size, - dst['name']) - - lun = self.arid + '.' + out.split()[1] - - if src['size'] < dst['size']: - size = dst['size'] - self.extend_volume(dst, size) - else: - size = int(out.split()[5]) - - LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.", - {'lun': lun, 'size': size}) - return {'provider_location': lun} - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: dictionary volume reference - :param new_size: int size in GB to extend - """ - - hdp = self._get_service(volume) - (arid, lun) = _loc_info(volume['provider_location'])['id_lu'] - self.bend.extend_vol(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - hdp, lun, - '%s' % (new_size * units.Ki), - volume['name']) - - LOG.info(_LI("LUN %(lun)s extended to %(size)s GB."), - {'lun': lun, 'size': new_size}) - - def delete_volume(self, volume): - """Delete an LU on HNAS. - - :param volume: dictionary volume reference - """ - - prov_loc = volume['provider_location'] - if prov_loc is None: - LOG.error(_LE("delete_vol: provider location empty.")) - return - info = _loc_info(prov_loc) - (arid, lun) = info['id_lu'] - if 'tgt' in info.keys(): # connected? - LOG.info(_LI("delete lun loc %s"), info['tgt']) - # loc = id.lun - (_portal, iqn, loc, ctl, port, hlun) = info['tgt'] - self.bend.del_iscsi_conn(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - ctl, iqn, hlun) - - name = self.hnas_name - - LOG.debug("delete lun %(lun)s on %(name)s", {'lun': lun, 'name': name}) - - hdp = self._get_service(volume) - self.bend.delete_lu(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - hdp, lun) - - @cinder_utils.synchronized('volume_mapping') - def initialize_connection(self, volume, connector): - """Map the created volume to connector['initiator']. - - :param volume: dictionary volume reference - :param connector: dictionary connector reference - """ - - LOG.info(_LI("initialize volume %(vol)s connector %(conn)s"), - {'vol': volume, 'conn': connector}) - - # connector[ip, host, wwnns, unititator, wwp/ - - service_info = self._get_service_target(volume) - (ip, ipp, ctl, port, _hdp, tgtalias, secret) = service_info - info = _loc_info(volume['provider_location']) - - if 'tgt' in info.keys(): # spurious repeat connection - # print info.keys() - LOG.debug("initiate_conn: tgt already set %s", info['tgt']) - (arid, lun_name) = info['id_lu'] - loc = arid + '.' + lun_name - # sps, use target if provided - try: - out = self.bend.add_iscsi_conn(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - lun_name, _hdp, port, tgtalias, - connector['initiator']) - except processutils.ProcessExecutionError: - msg = _("Error attaching volume %s. " - "Target limit might be reached!") % volume['id'] - raise exception.ISCSITargetAttachFailed(message=msg) - - hnas_portal = ip + ':' + ipp - # sps need hlun, fulliqn - hlun = out.split()[1] - fulliqn = out.split()[13] - tgt = hnas_portal + ',' + tgtalias + ',' + loc + ',' + ctl + ',' - tgt += port + ',' + hlun - - LOG.info(_LI("initiate: connection %s"), tgt) - - properties = {} - properties['provider_location'] = tgt - self._update_vol_location(volume['id'], tgt) - properties['target_discovered'] = False - properties['target_portal'] = hnas_portal - properties['target_iqn'] = fulliqn - properties['target_lun'] = int(hlun) - properties['volume_id'] = volume['id'] - properties['auth_username'] = connector['initiator'] - - if self.config['chap_enabled'] == 'True': - properties['auth_method'] = 'CHAP' - properties['auth_password'] = secret - - conn_info = {'driver_volume_type': 'iscsi', 'data': properties} - LOG.debug("initialize_connection: conn_info: %s.", conn_info) - return conn_info - - @cinder_utils.synchronized('volume_mapping') - def terminate_connection(self, volume, connector, **kwargs): - """Terminate a connection to a volume. - - :param volume: dictionary volume reference - :param connector: dictionary connector reference - """ - - info = _loc_info(volume['provider_location']) - if 'tgt' not in info.keys(): # spurious disconnection - LOG.warning(_LW("terminate_conn: provider location empty.")) - return - (arid, lun) = info['id_lu'] - (_portal, tgtalias, loc, ctl, port, hlun) = info['tgt'] - LOG.info(_LI("terminate: connection %s"), volume['provider_location']) - self.bend.del_iscsi_conn(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - ctl, tgtalias, hlun) - self._update_vol_location(volume['id'], loc) - - return {'provider_location': loc} - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot. - - :param volume: dictionary volume reference - :param snapshot: dictionary snapshot reference - """ - - size = int(snapshot['volume_size']) * units.Ki - (arid, slun) = _loc_info(snapshot['provider_location'])['id_lu'] - hdp = self._get_service(volume) - out = self.bend.create_dup(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - slun, hdp, '%s' % (size), - volume['name']) - lun = self.arid + '.' + out.split()[1] - sz = int(out.split()[5]) - - LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.", - {'lun': lun, 'sz': sz}) - return {'provider_location': lun} - - def create_snapshot(self, snapshot): - """Create a snapshot. - - :param snapshot: dictionary snapshot reference - """ - - source_vol = self._id_to_vol(snapshot['volume_id']) - hdp = self._get_service(source_vol) - size = int(snapshot['volume_size']) * units.Ki - (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] - out = self.bend.create_dup(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - slun, hdp, - '%s' % (size), - snapshot['name']) - lun = self.arid + '.' + out.split()[1] - size = int(out.split()[5]) - - LOG.debug("LUN %(lun)s of size %(size)s MB is created.", - {'lun': lun, 'size': size}) - return {'provider_location': lun} - - def delete_snapshot(self, snapshot): - """Delete a snapshot. - - :param snapshot: dictionary snapshot reference - """ - - loc = snapshot['provider_location'] - - # to take care of spurious input - if loc is None: - # which could cause exception. - return - - (arid, lun) = loc.split('.') - source_vol = self._id_to_vol(snapshot['volume_id']) - hdp = self._get_service(source_vol) - myid = self.arid - - if arid != myid: - LOG.error(_LE("Array mismatch %(myid)s vs %(arid)s"), - {'myid': myid, 'arid': arid}) - msg = 'Array id mismatch in delete snapshot' - raise exception.VolumeBackendAPIException(data=msg) - self.bend.delete_lu(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - hdp, lun) - - LOG.debug("LUN %s is deleted.", lun) - return - - def get_volume_stats(self, refresh=False): - """Get volume stats. If 'refresh', run update the stats first.""" - - if refresh: - self.driver_stats = self._get_stats() - - return self.driver_stats - - def get_pool(self, volume): - - if not volume['volume_type']: - return 'default' - else: - metadata = {} - type_id = volume['volume_type_id'] - if type_id is not None: - metadata = volume_types.get_volume_type_extra_specs(type_id) - if not metadata.get('service_label'): - return 'default' - else: - if metadata['service_label'] not in \ - self.config['services'].keys(): - return 'default' - else: - pass - return metadata['service_label'] + fs_list = self.config['fs'].keys() + + for fs in fs_list: + if not self.backend.get_fs_info(fs): + msg = (_("File system not found or not mounted: %(fs)s") % + {'fs': fs}) + LOG.error(msg) + raise exception.ParameterNotFound(param=msg) def _check_pool_and_fs(self, volume, fs_label): - """Validation of the pool and filesystem. + """Validates pool and file system of a volume being managed. Checks if the file system for the volume-type chosen matches the one passed in the volume reference. Also, checks if the pool @@ -867,26 +322,27 @@ class HDSISCSIDriver(driver.ISCSIDriver): :param volume: Reference to the volume. :param fs_label: Label of the file system. + :raises: ManageExistingVolumeTypeMismatch """ - pool_from_vol_type = self.get_pool(volume) + pool_from_vol_type = hnas_utils.get_pool(self.config, volume) - pool_from_host = utils.extract_host(volume['host'], level='pool') - - if self.config['services'][pool_from_vol_type]['hdp'] != fs_label: - msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen does not match the file system " - "passed in the volume reference."), - {'File System passed': fs_label, - 'File System for volume type': - self.config['services'][pool_from_vol_type]['hdp']}) + pool_from_host = utils.extract_host(volume.host, level='pool') + pool = self.config['services'][pool_from_vol_type]['hdp'] + if pool != fs_label: + msg = (_("Failed to manage existing volume because the " + "pool %(pool)s of the volume type chosen does not " + "match the file system %(fs_label)s passed in the " + "volume reference.") + % {'pool': pool, 'fs_label': fs_label}) + LOG.error(msg) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if pool_from_host != pool_from_vol_type: - msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen does not match the pool of " - "the host."), - {'Pool of the volume type': pool_from_vol_type, - 'Pool of the host': pool_from_host}) + msg = (_("Failed to manage existing volume because the pool " + "%(pool)s of the volume type chosen does not match the " + "pool %(pool_host)s of the host.") % + {'pool': pool_from_vol_type, 'pool_host': pool_from_host}) + LOG.error(msg) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) def _get_info_from_vol_ref(self, vol_ref): @@ -896,6 +352,8 @@ class HDSISCSIDriver(driver.ISCSIDriver): the volume reference. :param vol_ref: existing volume to take under management + :returns: the file system label and the volume name or raises error + :raises: ManageExistingInvalidReference """ vol_info = vol_ref.strip().split('/') @@ -905,50 +363,263 @@ class HDSISCSIDriver(driver.ISCSIDriver): return fs_label, vol_name else: - msg = (_("The reference to the volume in the backend should have " - "the format file_system/volume_name (volume_name cannot " - "contain '/')")) + msg = _("The reference to the volume in the backend should have " + "the format file_system/volume_name (volume_name cannot " + "contain '/')") + LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=msg) + def check_for_setup_error(self): + pass + + def do_setup(self, context): + """Sets up and verify Hitachi HNAS storage connection.""" + self.context = context + self._check_fs_list() + + version_info = self.backend.get_version() + LOG.info(_LI("HNAS iSCSI driver.")) + LOG.info(_LI("HNAS model: %(mdl)s"), {'mdl': version_info['model']}) + LOG.info(_LI("HNAS version: %(version)s"), + {'version': version_info['version']}) + LOG.info(_LI("HNAS hardware: %(hw)s"), + {'hw': version_info['hardware']}) + LOG.info(_LI("HNAS S/N: %(sn)s"), {'sn': version_info['serial']}) + service_list = self.config['services'].keys() + for svc in service_list: + svc = self.config['services'][svc] + pool = {} + pool['pool_name'] = svc['volume_type'] + pool['service_label'] = svc['volume_type'] + pool['fs'] = svc['hdp'] + + self.pools.append(pool) + + LOG.debug("Configured pools: %(pool)s", {'pool': self.pools}) + + evs_info = self.backend.get_evs_info() + LOG.info(_LI("Configured EVSs: %(evs)s"), {'evs': evs_info}) + + for svc in self.config['services'].keys(): + svc_ip = self.config['services'][svc]['iscsi_ip'] + if svc_ip in evs_info.keys(): + LOG.info(_LI("iSCSI portal found for service: %(svc_ip)s"), + {'svc_ip': svc_ip}) + self.config['services'][svc]['evs'] = ( + evs_info[svc_ip]['evs_number']) + self.config['services'][svc]['iscsi_port'] = '3260' + self.config['services'][svc]['port'] = '0' + else: + LOG.error(_LE("iSCSI portal not found " + "for service: %(svc)s"), {'svc': svc_ip}) + raise exception.InvalidParameterValue(err=svc_ip) + LOG.info(_LI("HNAS iSCSI Driver loaded successfully.")) + + def ensure_export(self, context, volume): + pass + + def create_export(self, context, volume, connector): + pass + + def remove_export(self, context, volume): + pass + + @cinder_utils.trace + def create_volume(self, volume): + """Creates a LU on HNAS. + + :param volume: dictionary volume reference + :returns: the volume provider location + """ + fs = self._get_service(volume) + size = six.text_type(volume.size) + + self.backend.create_lu(fs, size, volume.name) + + return {'provider_location': self._get_provider_location(volume)} + + @cinder_utils.trace + def create_cloned_volume(self, dst, src): + """Creates a clone of a volume. + + :param dst: dictionary destination volume reference + :param src: dictionary source volume reference + :returns: the provider location of the extended volume + """ + fs_label = self._get_service(dst) + + self.backend.create_cloned_lu(src.name, fs_label, dst.name) + + if src.size < dst.size: + LOG.debug("Increasing dest size from %(old_size)s to " + "%(new_size)s", + {'old_size': src.size, 'new_size': dst.size}) + self.extend_volume(dst, dst.size) + + return {'provider_location': self._get_provider_location(dst)} + + @cinder_utils.trace + def extend_volume(self, volume, new_size): + """Extends an existing volume. + + :param volume: dictionary volume reference + :param new_size: int size in GB to extend + """ + fs = self._get_service(volume) + self.backend.extend_lu(fs, new_size, volume.name) + + @cinder_utils.trace + def delete_volume(self, volume): + """Deletes the volume on HNAS. + + :param volume: dictionary volume reference + """ + fs = self._get_service(volume) + self.backend.delete_lu(fs, volume.name) + + @cinder_utils.synchronized('volume_mapping') + @cinder_utils.trace + def initialize_connection(self, volume, connector): + """Maps the created volume to connector['initiator']. + + :param volume: dictionary volume reference + :param connector: dictionary connector reference + :returns: The connection information + :raises: ISCSITargetAttachFailed + """ + service_info = self._get_service_target(volume) + (ip, ipp, evs, port, _fs, tgtalias, secret) = service_info + + try: + conn = self.backend.add_iscsi_conn(volume.name, _fs, port, + tgtalias, + connector['initiator']) + + except processutils.ProcessExecutionError: + msg = (_("Error attaching volume %(vol)s. " + "Target limit might be reached!") % {'vol': volume.id}) + LOG.error(msg) + raise exception.ISCSITargetAttachFailed(volume_id=volume.id) + + hnas_portal = ip + ':' + ipp + lu_id = six.text_type(conn['lu_id']) + fulliqn = conn['iqn'] + tgt = (hnas_portal + ',' + tgtalias + ',' + + volume.provider_location + ',' + evs + ',' + + port + ',' + lu_id) + + LOG.info(_LI("initiate: connection %(tgt)s"), {'tgt': tgt}) + + properties = {} + properties['provider_location'] = tgt + properties['target_discovered'] = False + properties['target_portal'] = hnas_portal + properties['target_iqn'] = fulliqn + properties['target_lu'] = int(lu_id) + properties['volume_id'] = volume.id + properties['auth_username'] = connector['initiator'] + + if self.config['chap_enabled']: + properties['auth_method'] = 'CHAP' + properties['auth_password'] = secret + + conn_info = {'driver_volume_type': 'iscsi', 'data': properties} + + return conn_info + + @cinder_utils.synchronized('volume_mapping') + @cinder_utils.trace + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume. + + :param volume: dictionary volume reference + :param connector: dictionary connector reference + """ + service_info = self._get_service_target(volume) + (ip, ipp, evs, port, fs, tgtalias, secret) = service_info + lu_info = self.backend.check_lu(volume.name, fs) + + self.backend.del_iscsi_conn(evs, tgtalias, lu_info['id']) + + @cinder_utils.trace + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + :param volume: dictionary volume reference + :param snapshot: dictionary snapshot reference + :returns: the provider location of the snapshot + """ + fs = self._get_service(volume) + + self.backend.create_cloned_lu(snapshot.name, fs, volume.name) + + return {'provider_location': self._get_provider_location(snapshot)} + + @cinder_utils.trace + def create_snapshot(self, snapshot): + """Creates a snapshot. + + :param snapshot: dictionary snapshot reference + :returns: the provider location of the snapshot + """ + fs = self._get_service(snapshot.volume) + + self.backend.create_cloned_lu(snapshot.volume_name, fs, snapshot.name) + + return {'provider_location': self._get_provider_location(snapshot)} + + @cinder_utils.trace + def delete_snapshot(self, snapshot): + """Deletes a snapshot. + + :param snapshot: dictionary snapshot reference + """ + fs = self._get_service(snapshot.volume) + self.backend.delete_lu(fs, snapshot.name) + + def get_volume_stats(self, refresh=False): + """Gets the volume driver stats. + + :param refresh: if refresh is True, the driver_stats is updated + :returns: the driver stats + """ + if refresh: + self.driver_stats = self._get_stats() + + return self.driver_stats + + @cinder_utils.trace def manage_existing_get_size(self, volume, existing_vol_ref): """Gets the size to manage_existing. Returns the size of volume to be managed by manage_existing. - :param volume: cinder volume to manage + :param volume: cinder volume to manage :param existing_vol_ref: existing volume to take under management + :returns: the size of the volume to be managed or raises error + :raises: ManageExistingInvalidReference """ - # Check that the reference is valid. + # Check if the reference is valid. if 'source-name' not in existing_vol_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_vol_ref, reason=reason) - ref_name = existing_vol_ref['source-name'] - fs_label, vol_name = self._get_info_from_vol_ref(ref_name) + fs_label, vol_name = ( + self._get_info_from_vol_ref(existing_vol_ref['source-name'])) LOG.debug("File System: %(fs_label)s " "Volume name: %(vol_name)s.", {'fs_label': fs_label, 'vol_name': vol_name}) - vol_name = "'{}'".format(vol_name) + if utils.check_already_managed_volume(vol_name): + raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name) - lu_info = self.bend.get_existing_lu_info(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - fs_label, vol_name) + lu_info = self.backend.get_existing_lu_info(vol_name, fs_label) - if fs_label in lu_info: - aux = lu_info.split('\n')[3] - size = aux.split(':')[1] - size_unit = size.split(' ')[2] - - if size_unit == 'TB': - return int(size.split(' ')[1]) * units.k - else: - return int(size.split(' ')[1]) + if lu_info != {}: + return lu_info['size'] else: raise exception.ManageExistingInvalidReference( existing_ref=existing_vol_ref, @@ -956,6 +627,7 @@ class HDSISCSIDriver(driver.ISCSIDriver): 'If your volume name contains "/", please rename it ' 'and try to manage again.')) + @cinder_utils.trace def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. @@ -966,54 +638,53 @@ class HDSISCSIDriver(driver.ISCSIDriver): e.g., openstack/vol_to_manage :param volume: cinder volume to manage - :param existing_vol_ref: driver-specific information used to identify a + :param existing_vol_ref: driver specific information used to identify a volume + :returns: the provider location of the volume managed """ - ref_name = existing_vol_ref['source-name'] - fs_label, vol_name = self._get_info_from_vol_ref(ref_name) + LOG.info(_LI("Asked to manage ISCSI volume %(vol)s, with vol " + "ref %(ref)s."), {'vol': volume.id, + 'ref': existing_vol_ref['source-name']}) - LOG.debug("Asked to manage ISCSI volume %(vol)s, with vol " - "ref %(ref)s.", {'vol': volume['id'], - 'ref': existing_vol_ref['source-name']}) + fs_label, vol_name = ( + self._get_info_from_vol_ref(existing_vol_ref['source-name'])) - self._check_pool_and_fs(volume, fs_label) + if volume.volume_type is not None: + self._check_pool_and_fs(volume, fs_label) - vol_name = "'{}'".format(vol_name) - - self.bend.rename_existing_lu(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], fs_label, - volume['name'], vol_name) + self.backend.rename_existing_lu(fs_label, vol_name, volume.name) LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."), - {'name': volume['name']}) + {'name': volume.name}) - lun = self.arid + '.' + volume['name'] - - return {'provider_location': lun} + return {'provider_location': self._get_provider_location(volume)} + @cinder_utils.trace def unmanage(self, volume): """Unmanages a volume from cinder. Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. A log entry - will be made to notify the Admin that the volume is no longer being + will be made to notify the admin that the volume is no longer being managed. :param volume: cinder volume to unmanage """ - svc = self._get_service(volume) + fslabel = self._get_service(volume) + new_name = 'unmanage-' + volume.name + vol_path = fslabel + '/' + volume.name - new_name = 'unmanage-' + volume['name'] - vol_path = svc + '/' + volume['name'] + self.backend.rename_existing_lu(fslabel, volume.name, new_name) - self.bend.rename_existing_lu(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], svc, new_name, - volume['name']) + LOG.info(_LI("The volume with path %(old)s is no longer being managed " + "by Cinder. However, it was not deleted and can be found " + "with the new name %(cr)s on backend."), + {'old': vol_path, 'cr': new_name}) - LOG.info(_LI("Cinder ISCSI volume with current path %(path)s is " - "no longer being managed. The new name is %(unm)s."), - {'path': vol_path, 'unm': new_name}) + def _get_provider_location(self, volume): + """Gets the provider location of a given volume + + :param volume: dictionary volume reference + :returns: the provider_location related to the volume + """ + return self.backend.get_version()['mac'] + '.' + volume.name diff --git a/cinder/volume/drivers/hitachi/hnas_nfs.py b/cinder/volume/drivers/hitachi/hnas_nfs.py index c79c89a92..c393493f4 100644 --- a/cinder/volume/drivers/hitachi/hnas_nfs.py +++ b/cinder/volume/drivers/hitachi/hnas_nfs.py @@ -14,21 +14,18 @@ # under the License. """ -Volume driver for HDS HNAS NFS storage. +Volume driver for HNAS NFS storage. """ import math import os -import re -import six import socket -import time -from xml.etree import ElementTree as ETree from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units +import six from cinder import exception from cinder.i18n import _, _LE, _LI @@ -36,171 +33,86 @@ from cinder.image import image_utils from cinder import interface from cinder import utils as cutils from cinder.volume.drivers.hitachi import hnas_backend +from cinder.volume.drivers.hitachi import hnas_utils from cinder.volume.drivers import nfs from cinder.volume import utils -from cinder.volume import volume_types -HDS_HNAS_NFS_VERSION = '4.1.0' +HNAS_NFS_VERSION = '5.0.0' LOG = logging.getLogger(__name__) NFS_OPTS = [ cfg.StrOpt('hds_hnas_nfs_config_file', default='/opt/hds/hnas/cinder_nfs_conf.xml', - help='Configuration file for HDS NFS cinder plugin'), ] + help='Legacy configuration file for HNAS NFS Cinder plugin. ' + 'This is not needed if you fill all configuration on ' + 'cinder.conf', + deprecated_for_removal=True) +] CONF = cfg.CONF CONF.register_opts(NFS_OPTS) -HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'ssh_port': '22'} - - -def _xml_read(root, element, check=None): - """Read an xml element.""" - - val = root.findtext(element) - - # mandatory parameter not found - if val is None and check: - raise exception.ParameterNotFound(param=element) - - # tag not found - if val is None: - return None - - svc_tag_pattern = re.compile("svc_.$") - # tag found but empty parameter. - if not val.strip(): - if svc_tag_pattern.search(element): - return "" - raise exception.ParameterNotFound(param=element) - - LOG.debug(_LI("%(element)s: %(val)s"), - {'element': element, - 'val': val if element != 'password' else '***'}) - - return val.strip() - - -def _read_config(xml_config_file): - """Read hds driver specific xml config file. - - :param xml_config_file: string filename containing XML configuration - """ - - if not os.access(xml_config_file, os.R_OK): - msg = (_("Can't open config file: %s") % xml_config_file) - raise exception.NotFound(message=msg) - - try: - root = ETree.parse(xml_config_file).getroot() - except Exception: - msg = (_("Error parsing config file: %s") % xml_config_file) - raise exception.ConfigNotFound(message=msg) - - # mandatory parameters - config = {} - arg_prereqs = ['mgmt_ip0', 'username'] - for req in arg_prereqs: - config[req] = _xml_read(root, req, True) - - # optional parameters - opt_parameters = ['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0'] - for req in opt_parameters: - config[req] = _xml_read(root, req) - - if config['ssh_enabled'] == 'True': - config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True) - config['password'] = _xml_read(root, 'password') - config['ssh_port'] = _xml_read(root, 'ssh_port') - if config['ssh_port'] is None: - config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] - else: - # password is mandatory when not using SSH - config['password'] = _xml_read(root, 'password', True) - - if config['hnas_cmd'] is None: - config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd'] - - config['hdp'] = {} - config['services'] = {} - - # min one needed - for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: - if _xml_read(root, svc) is None: - continue - service = {'label': svc} - - # none optional - for arg in ['volume_type', 'hdp']: - service[arg] = _xml_read(root, svc + '/' + arg, True) - config['services'][service['volume_type']] = service - config['hdp'][service['hdp']] = service['hdp'] - - # at least one service required! - if config['services'].keys() is None: - raise exception.ParameterNotFound(param="No service found") - - return config - - -def factory_bend(drv_config): - """Factory over-ride in self-tests.""" - - return hnas_backend.HnasBackend(drv_config) +HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', 'ssh_port': '22'} @interface.volumedriver -class HDSNFSDriver(nfs.NfsDriver): +class HNASNFSDriver(nfs.NfsDriver): """Base class for Hitachi NFS driver. Executes commands relating to Volumes. - .. code-block:: none + Version history: + + .. code-block:: none Version 1.0.0: Initial driver version Version 2.2.0: Added support to SSH authentication Version 3.0.0: Added pool aware scheduling Version 4.0.0: Added manage/unmanage features Version 4.1.0: Fixed XML parser checks on blank options + Version 5.0.0: Remove looping in driver initialization + Code cleaning up + New communication interface between the driver and HNAS + Removed the option to use local SSC (ssh_enabled=False) + Updated to use versioned objects + Changed the class name to HNASNFSDriver + Deprecated XML config file + Added support to manage/unmanage snapshots features """ + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Hitachi_HNAS_CI" + VERSION = HNAS_NFS_VERSION def __init__(self, *args, **kwargs): - # NOTE(vish): db is set by Manager self._execute = None self.context = None self.configuration = kwargs.get('configuration', None) + service_parameters = ['volume_type', 'hdp'] + optional_parameters = ['ssc_cmd', 'cluster_admin_ip0'] + if self.configuration: + self.configuration.append_config_values( + hnas_utils.drivers_common_opts) self.configuration.append_config_values(NFS_OPTS) - self.config = _read_config( - self.configuration.hds_hnas_nfs_config_file) + self.config = {} - super(HDSNFSDriver, self).__init__(*args, **kwargs) - self.bend = factory_bend(self.config) + # Trying to get HNAS configuration from cinder.conf + self.config = hnas_utils.read_cinder_conf( + self.configuration, 'nfs') - def _array_info_get(self): - """Get array parameters.""" + # If HNAS configuration are not set on cinder.conf, tries to use + # the deprecated XML configuration file + if not self.config: + self.config = hnas_utils.read_xml_config( + self.configuration.hds_hnas_nfs_config_file, + service_parameters, + optional_parameters) - out = self.bend.get_version(self.config['hnas_cmd'], - HDS_HNAS_NFS_VERSION, - self.config['mgmt_ip0'], - self.config['username'], - self.config['password']) - - inf = out.split() - return inf[1], 'nfs_' + inf[1], inf[6] - - def _id_to_vol(self, volume_id): - """Given the volume id, retrieve the volume object from database. - - :param volume_id: string volume id - """ - - vol = self.db.volume_get(self.context, volume_id) - - return vol + super(HNASNFSDriver, self).__init__(*args, **kwargs) + self.backend = hnas_backend.HNASSSHBackend(self.config) def _get_service(self, volume): """Get service parameters. @@ -209,54 +121,56 @@ class HDSNFSDriver(nfs.NfsDriver): its type. :param volume: dictionary volume reference + :returns: Tuple containing the service parameters (label, + export path and export file system) or error if no configuration is + found. + :raises: ParameterNotFound """ - - LOG.debug("_get_service: volume: %s", volume) - label = utils.extract_host(volume['host'], level='pool') + LOG.debug("_get_service: volume: %(vol)s", {'vol': volume}) + label = utils.extract_host(volume.host, level='pool') if label in self.config['services'].keys(): svc = self.config['services'][label] - LOG.info(_LI("Get service: %(lbl)s->%(svc)s"), - {'lbl': label, 'svc': svc['fslabel']}) - service = (svc['hdp'], svc['path'], svc['fslabel']) + LOG.debug("_get_service: %(lbl)s->%(svc)s", + {'lbl': label, 'svc': svc['export']['fs']}) + service = (svc['hdp'], svc['export']['path'], svc['export']['fs']) else: - LOG.info(_LI("Available services: %s"), - self.config['services'].keys()) - LOG.error(_LE("No configuration found for service: %s"), - label) + LOG.info(_LI("Available services: %(svc)s"), + {'svc': self.config['services'].keys()}) + LOG.error(_LE("No configuration found for service: %(lbl)s"), + {'lbl': label}) raise exception.ParameterNotFound(param=label) return service + @cutils.trace def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: dictionary volume reference :param new_size: int size in GB to extend + :raises: InvalidResults """ - - nfs_mount = self._get_provider_location(volume['id']) - path = self._get_volume_path(nfs_mount, volume['name']) + nfs_mount = volume.provider_location + path = self._get_volume_path(nfs_mount, volume.name) # Resize the image file on share to new size. - LOG.debug("Checking file for resize") + LOG.info(_LI("Checking file for resize.")) + + if not self._is_file_size_equal(path, new_size): + LOG.info(_LI("Resizing file to %(sz)sG"), {'sz': new_size}) + image_utils.resize_image(path, new_size) if self._is_file_size_equal(path, new_size): - return + LOG.info(_LI("LUN %(id)s extended to %(size)s GB."), + {'id': volume.id, 'size': new_size}) else: - LOG.info(_LI("Resizing file to %sG"), new_size) - image_utils.resize_image(path, new_size) - if self._is_file_size_equal(path, new_size): - LOG.info(_LI("LUN %(id)s extended to %(size)s GB."), - {'id': volume['id'], 'size': new_size}) - return - else: - raise exception.InvalidResults( - _("Resizing image file failed.")) + msg = _("Resizing image file failed.") + LOG.error(msg) + raise exception.InvalidResults(msg) def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" - data = image_utils.qemu_img_info(path) virt_size = data.virtual_size / units.Gi @@ -265,174 +179,118 @@ class HDSNFSDriver(nfs.NfsDriver): else: return False + @cutils.trace def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" + """Creates a volume from a snapshot. - LOG.debug("create_volume_from %s", volume) - vol_size = volume['size'] - snap_size = snapshot['volume_size'] - - if vol_size != snap_size: - msg = _("Cannot create volume of size %(vol_size)s from " - "snapshot of size %(snap_size)s") - msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} - raise exception.CinderException(msg % msg_fmt) - - self._clone_volume(snapshot['name'], - volume['name'], - snapshot['volume_id']) - share = self._get_volume_location(snapshot['volume_id']) + :param volume: volume to be created + :param snapshot: source snapshot + :returns: the provider_location of the volume created + """ + self._clone_volume(snapshot.volume, volume.name, snapshot.name) + share = snapshot.volume.provider_location return {'provider_location': share} + @cutils.trace def create_snapshot(self, snapshot): """Create a snapshot. :param snapshot: dictionary snapshot reference + :returns: the provider_location of the snapshot created """ + self._clone_volume(snapshot.volume, snapshot.name) - self._clone_volume(snapshot['volume_name'], - snapshot['name'], - snapshot['volume_id']) - share = self._get_volume_location(snapshot['volume_id']) - LOG.debug('Share: %s', share) + share = snapshot.volume.provider_location + LOG.debug('Share: %(shr)s', {'shr': share}) # returns the mount point (not path) return {'provider_location': share} + @cutils.trace def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: dictionary snapshot reference """ + nfs_mount = snapshot.volume.provider_location - nfs_mount = self._get_provider_location(snapshot['volume_id']) - - if self._volume_not_present(nfs_mount, snapshot['name']): + if self._volume_not_present(nfs_mount, snapshot.name): return True - self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']), + self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), run_as_root=True) - def _get_volume_location(self, volume_id): - """Returns NFS mount address as :. - - :param volume_id: string volume id - """ - - nfs_server_ip = self._get_host_ip(volume_id) - export_path = self._get_export_path(volume_id) - - return nfs_server_ip + ':' + export_path - - def _get_provider_location(self, volume_id): - """Returns provider location for given volume. - - :param volume_id: string volume id - """ - - volume = self.db.volume_get(self.context, volume_id) - - # same format as _get_volume_location - return volume.provider_location - - def _get_host_ip(self, volume_id): - """Returns IP address for the given volume. - - :param volume_id: string volume id - """ - - return self._get_provider_location(volume_id).split(':')[0] - - def _get_export_path(self, volume_id): - """Returns NFS export path for the given volume. - - :param volume_id: string volume id - """ - - return self._get_provider_location(volume_id).split(':')[1] - def _volume_not_present(self, nfs_mount, volume_name): - """Check if volume exists. + """Check if volume does not exist. + :param nfs_mount: string path of the nfs share :param volume_name: string volume name + :returns: boolean (true for volume not present and false otherwise) """ - try: - self._try_execute('ls', self._get_volume_path(nfs_mount, - volume_name)) + self._try_execute('ls', + self._get_volume_path(nfs_mount, volume_name)) except processutils.ProcessExecutionError: # If the volume isn't present return True return False - def _try_execute(self, *command, **kwargs): - # NOTE(vish): Volume commands can partially fail due to timing, but - # running them a second time on failure will usually - # recover nicely. - tries = 0 - while True: - try: - self._execute(*command, **kwargs) - return True - except processutils.ProcessExecutionError: - tries += 1 - if tries >= self.configuration.num_shell_tries: - raise - LOG.exception(_LE("Recovering from a failed execute. " - "Try number %s"), tries) - time.sleep(tries ** 2) - def _get_volume_path(self, nfs_share, volume_name): """Get volume path (local fs path) for given name on given nfs share. :param nfs_share string, example 172.18.194.100:/var/nfs :param volume_name string, - example volume-91ee65ec-c473-4391-8c09-162b00c68a8c + example volume-91ee65ec-c473-4391-8c09-162b00c68a8c + :returns: the local path according to the parameters """ - return os.path.join(self._get_mount_point_for_share(nfs_share), volume_name) + @cutils.trace def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. - :param volume: dictionary volume reference - :param src_vref: dictionary src_vref reference + :param volume: reference to the volume being created + :param src_vref: reference to the source volume + :returns: the provider_location of the cloned volume """ + vol_size = volume.size + src_vol_size = src_vref.size - vol_size = volume['size'] - src_vol_size = src_vref['size'] + self._clone_volume(src_vref, volume.name, src_vref.name) - if vol_size < src_vol_size: - msg = _("Cannot create clone of size %(vol_size)s from " - "volume of size %(src_vol_size)s") - msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} - raise exception.CinderException(msg % msg_fmt) - - self._clone_volume(src_vref['name'], volume['name'], src_vref['id']) + share = src_vref.provider_location if vol_size > src_vol_size: + volume.provider_location = share self.extend_volume(volume, vol_size) - share = self._get_volume_location(src_vref['id']) - return {'provider_location': share} def get_volume_stats(self, refresh=False): """Get volume stats. - if 'refresh' is True, update the stats first. + :param refresh: if it is True, update the stats first. + :returns: dictionary with the stats from HNAS + _stats['pools']={ + 'total_capacity_gb': total size of the pool, + 'free_capacity_gb': the available size, + 'allocated_capacity_gb': current allocated size, + 'QoS_support': bool to indicate if QoS is supported, + 'reserved_percentage': percentage of size reserved + } """ + LOG.info(_LI("Getting volume stats")) - _stats = super(HDSNFSDriver, self).get_volume_stats(refresh) - _stats["vendor_name"] = 'HDS' - _stats["driver_version"] = HDS_HNAS_NFS_VERSION + _stats = super(HNASNFSDriver, self).get_volume_stats(refresh) + _stats["vendor_name"] = 'Hitachi' + _stats["driver_version"] = HNAS_NFS_VERSION _stats["storage_protocol"] = 'NFS' for pool in self.pools: - capacity, free, used = self._get_capacity_info(pool['hdp']) + capacity, free, used = self._get_capacity_info(pool['fs']) pool['total_capacity_gb'] = capacity / float(units.Gi) pool['free_capacity_gb'] = free / float(units.Gi) pool['allocated_capacity_gb'] = used / float(units.Gi) @@ -441,79 +299,63 @@ class HDSNFSDriver(nfs.NfsDriver): _stats['pools'] = self.pools - LOG.info(_LI('Driver stats: %s'), _stats) + LOG.debug('Driver stats: %(stat)s', {'stat': _stats}) return _stats - def _get_nfs_info(self): - out = self.bend.get_nfs_info(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password']) - lines = out.split('\n') - - # dict based on NFS exports addresses - conf = {} - for line in lines: - if 'Export' in line: - inf = line.split() - (export, path, fslabel, hdp, ip1) = \ - inf[1], inf[3], inf[5], inf[7], inf[11] - # 9, 10, etc are IP addrs - key = ip1 + ':' + export - conf[key] = {} - conf[key]['path'] = path - conf[key]['hdp'] = hdp - conf[key]['fslabel'] = fslabel - LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s " - "FSID: %(hdp)s"), - {'key': key, 'path': path, - 'fslabel': fslabel, 'hdp': hdp}) - - return conf - def do_setup(self, context): """Perform internal driver setup.""" + version_info = self.backend.get_version() + LOG.info(_LI("HNAS NFS driver.")) + LOG.info(_LI("HNAS model: %(mdl)s"), {'mdl': version_info['model']}) + LOG.info(_LI("HNAS version: %(ver)s"), + {'ver': version_info['version']}) + LOG.info(_LI("HNAS hardware: %(hw)s"), + {'hw': version_info['hardware']}) + LOG.info(_LI("HNAS S/N: %(sn)s"), {'sn': version_info['serial']}) self.context = context - self._load_shares_config(getattr(self.configuration, - self.driver_prefix + - '_shares_config')) - LOG.info(_LI("Review shares: %s"), self.shares) + self._load_shares_config( + getattr(self.configuration, self.driver_prefix + '_shares_config')) + LOG.info(_LI("Review shares: %(shr)s"), {'shr': self.shares}) - nfs_info = self._get_nfs_info() + elist = self.backend.get_export_list() - LOG.debug("nfs_info: %s", nfs_info) + # Check for all configured exports + for svc_name, svc_info in self.config['services'].items(): + server_ip = svc_info['hdp'].split(':')[0] + mountpoint = svc_info['hdp'].split(':')[1] - for share in self.shares: - if share in nfs_info.keys(): - LOG.info(_LI("share: %(share)s -> %(info)s"), - {'share': share, 'info': nfs_info[share]['path']}) + # Ensure export are configured in HNAS + export_configured = False + for export in elist: + if mountpoint == export['name'] and server_ip in export['evs']: + svc_info['export'] = export + export_configured = True - for svc in self.config['services'].keys(): - if share == self.config['services'][svc]['hdp']: - self.config['services'][svc]['path'] = \ - nfs_info[share]['path'] - # don't overwrite HDP value - self.config['services'][svc]['fsid'] = \ - nfs_info[share]['hdp'] - self.config['services'][svc]['fslabel'] = \ - nfs_info[share]['fslabel'] - LOG.info(_LI("Save service info for" - " %(svc)s -> %(hdp)s, %(path)s"), - {'svc': svc, 'hdp': nfs_info[share]['hdp'], - 'path': nfs_info[share]['path']}) - break - if share != self.config['services'][svc]['hdp']: - LOG.error(_LE("NFS share %(share)s has no service entry:" - " %(svc)s -> %(hdp)s"), - {'share': share, 'svc': svc, - 'hdp': self.config['services'][svc]['hdp']}) - raise exception.ParameterNotFound(param=svc) - else: - LOG.info(_LI("share: %s incorrect entry"), share) + # Ensure export are reachable + try: + out, err = self._execute('showmount', '-e', server_ip) + except processutils.ProcessExecutionError: + LOG.exception(_LE("NFS server %(srv)s not reachable!"), + {'srv': server_ip}) + raise - LOG.debug("self.config['services'] = %s", self.config['services']) + export_list = out.split('\n')[1:] + export_list.pop() + mountpoint_not_found = mountpoint not in map( + lambda x: x.split()[0], export_list) + if (len(export_list) < 1 or + mountpoint_not_found or + not export_configured): + LOG.error(_LE("Configured share %(share)s is not present" + "in %(srv)s."), + {'share': mountpoint, 'srv': server_ip}) + msg = _('Section: %(svc_name)s') % {'svc_name': svc_name} + raise exception.InvalidParameterValue(err=msg) + + LOG.debug("Loading services: %(svc)s", { + 'svc': self.config['services']}) service_list = self.config['services'].keys() for svc in service_list: @@ -521,74 +363,59 @@ class HDSNFSDriver(nfs.NfsDriver): pool = {} pool['pool_name'] = svc['volume_type'] pool['service_label'] = svc['volume_type'] - pool['hdp'] = svc['hdp'] + pool['fs'] = svc['hdp'] self.pools.append(pool) - LOG.info(_LI("Configured pools: %s"), self.pools) + LOG.debug("Configured pools: %(pool)s", {'pool': self.pools}) + LOG.info(_LI("HNAS NFS Driver loaded successfully.")) - def _clone_volume(self, volume_name, clone_name, volume_id): + def _clone_volume(self, src_vol, clone_name, src_name=None): """Clones mounted volume using the HNAS file_clone. - :param volume_name: string volume name + :param src_vol: object source volume :param clone_name: string clone name (or snapshot) - :param volume_id: string volume id + :param src_name: name of the source volume. """ - export_path = self._get_export_path(volume_id) + # when the source is a snapshot, we need to pass the source name and + # use the information of the volume that originated the snapshot to + # get the clone path. + if not src_name: + src_name = src_vol.name + # volume-ID snapshot-ID, /cinder - LOG.info(_LI("Cloning with volume_name %(vname)s clone_name %(cname)s" - " export_path %(epath)s"), {'vname': volume_name, - 'cname': clone_name, - 'epath': export_path}) + LOG.info(_LI("Cloning with volume_name %(vname)s, clone_name %(cname)s" + " ,export_path %(epath)s"), + {'vname': src_name, 'cname': clone_name, + 'epath': src_vol.provider_location}) - source_vol = self._id_to_vol(volume_id) - # sps; added target - (_hdp, _path, _fslabel) = self._get_service(source_vol) - target_path = '%s/%s' % (_path, clone_name) - source_path = '%s/%s' % (_path, volume_name) - out = self.bend.file_clone(self.config['hnas_cmd'], - self.config['mgmt_ip0'], - self.config['username'], - self.config['password'], - _fslabel, source_path, target_path) + (fs, path, fs_label) = self._get_service(src_vol) - return out + target_path = '%s/%s' % (path, clone_name) + source_path = '%s/%s' % (path, src_name) - def get_pool(self, volume): - if not volume['volume_type']: - return 'default' - else: - metadata = {} - type_id = volume['volume_type_id'] - if type_id is not None: - metadata = volume_types.get_volume_type_extra_specs(type_id) - if not metadata.get('service_label'): - return 'default' - else: - if metadata['service_label'] not in \ - self.config['services'].keys(): - return 'default' - else: - return metadata['service_label'] + self.backend.file_clone(fs_label, source_path, target_path) + @cutils.trace def create_volume(self, volume): """Creates a volume. :param volume: volume reference + :returns: the volume provider_location """ self._ensure_shares_mounted() - (_hdp, _path, _fslabel) = self._get_service(volume) + (fs_id, path, fslabel) = self._get_service(volume) - volume['provider_location'] = _hdp + volume.provider_location = fs_id LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"), - {'label': _fslabel, 'loc': volume['provider_location']}) + {'label': fslabel, 'loc': volume.provider_location}) self._do_create_volume(volume) - return {'provider_location': volume['provider_location']} + return {'provider_location': fs_id} def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): """Converts the share point name to an IP address. @@ -596,8 +423,10 @@ class HDSNFSDriver(nfs.NfsDriver): The volume reference may have a DNS name portion in the share name. Convert that to an IP address and then restore the entire path. - :param vol_ref: driver-specific information used to identify a volume - :returns: a volume reference where share is in IP format + :param vol_ref: driver-specific information used to identify a volume + :returns: a volume reference where share is in IP format or raises + error + :raises: e.strerror """ # First strip out share and convert to IP format. @@ -606,9 +435,9 @@ class HDSNFSDriver(nfs.NfsDriver): try: vol_ref_share_ip = cutils.resolve_hostname(share_split[0]) except socket.gaierror as e: - LOG.error(_LE('Invalid hostname %(host)s'), - {'host': share_split[0]}) - LOG.debug('error: %s', e.strerror) + LOG.exception(_LE('Invalid hostname %(host)s'), + {'host': share_split[0]}) + LOG.debug('error: %(err)s', {'err': e.strerror}) raise # Now place back into volume reference. @@ -624,7 +453,8 @@ class HDSNFSDriver(nfs.NfsDriver): if unsuccessful. :param vol_ref: driver-specific information used to identify a volume - :returns: NFS Share, NFS mount, volume path or raise error + :returns: NFS Share, NFS mount, volume path or raise error + :raises: ManageExistingInvalidReference """ # Check that the reference is valid. if 'source-name' not in vol_ref: @@ -649,8 +479,8 @@ class HDSNFSDriver(nfs.NfsDriver): file_path) = vol_ref_share.partition(cfg_share) if work_share == cfg_share: file_path = file_path[1:] # strip off leading path divider - LOG.debug("Found possible share %s; checking mount.", - work_share) + LOG.debug("Found possible share %(shr)s; checking mount.", + {'shr': work_share}) nfs_mount = self._get_mount_point_for_share(nfs_share) vol_full_path = os.path.join(nfs_mount, file_path) if os.path.isfile(vol_full_path): @@ -665,8 +495,10 @@ class HDSNFSDriver(nfs.NfsDriver): raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, - reason=_('Volume not found on configured storage backend.')) + reason=_('Volume/Snapshot not found on configured storage ' + 'backend.')) + @cutils.trace def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. @@ -677,38 +509,44 @@ class HDSNFSDriver(nfs.NfsDriver): e.g., 10.10.32.1:/openstack/vol_to_manage or 10.10.32.1:/openstack/some_directory/vol_to_manage - :param volume: cinder volume to manage + :param volume: cinder volume to manage :param existing_vol_ref: driver-specific information used to identify a - volume + volume + :returns: the provider location + :raises: VolumeBackendAPIException """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. - (nfs_share, nfs_mount, vol_path + (nfs_share, nfs_mount, vol_name ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s.", - {'vol': volume['id'], - 'ref': existing_vol_ref['source-name']}) + LOG.info(_LI("Asked to manage NFS volume %(vol)s, " + "with vol ref %(ref)s."), + {'vol': volume.id, + 'ref': existing_vol_ref['source-name']}) + self._check_pool_and_share(volume, nfs_share) - if vol_path == volume['name']: - LOG.debug("New Cinder volume %s name matches reference name: " - "no need to rename.", volume['name']) + + if vol_name == volume.name: + LOG.debug("New Cinder volume %(vol)s name matches reference name: " + "no need to rename.", {'vol': volume.name}) else: - src_vol = os.path.join(nfs_mount, vol_path) - dst_vol = os.path.join(nfs_mount, volume['name']) + src_vol = os.path.join(nfs_mount, vol_name) + dst_vol = os.path.join(nfs_mount, volume.name) try: - self._execute("mv", src_vol, dst_vol, run_as_root=False, - check_exit_code=True) - LOG.debug("Setting newly managed Cinder volume name to %s.", - volume['name']) + self._try_execute("mv", src_vol, dst_vol, run_as_root=False, + check_exit_code=True) + LOG.debug("Setting newly managed Cinder volume name " + "to %(vol)s.", {'vol': volume.name}) self._set_rw_permissions_for_all(dst_vol) except (OSError, processutils.ProcessExecutionError) as err: - exception_msg = (_("Failed to manage existing volume " - "%(name)s, because rename operation " - "failed: Error msg: %(msg)s."), - {'name': existing_vol_ref['source-name'], - 'msg': six.text_type(err)}) - raise exception.VolumeBackendAPIException(data=exception_msg) + msg = (_("Failed to manage existing volume " + "%(name)s, because rename operation " + "failed: Error msg: %(msg)s.") % + {'name': existing_vol_ref['source-name'], + 'msg': six.text_type(err)}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) return {'provider_location': nfs_share} def _check_pool_and_share(self, volume, nfs_share): @@ -718,62 +556,45 @@ class HDSNFSDriver(nfs.NfsDriver): one passed in the volume reference. Also, checks if the pool for the volume type matches the pool for the host passed. - :param volume: cinder volume reference + :param volume: cinder volume reference :param nfs_share: NFS share passed to manage + :raises: ManageExistingVolumeTypeMismatch """ - pool_from_vol_type = self.get_pool(volume) + pool_from_vol_type = hnas_utils.get_pool(self.config, volume) - pool_from_host = utils.extract_host(volume['host'], level='pool') - - if self.config['services'][pool_from_vol_type]['hdp'] != nfs_share: + pool_from_host = utils.extract_host(volume.host, level='pool') + pool = self.config['services'][pool_from_vol_type]['hdp'] + if pool != nfs_share: msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen does not match the NFS share " - "passed in the volume reference."), - {'Share passed': nfs_share, - 'Share for volume type': - self.config['services'][pool_from_vol_type]['hdp']}) + "the volume type chosen (%(pool)s) does not match the " + "NFS share passed in the volume reference (%(share)s).") + % {'share': nfs_share, 'pool': pool}) + LOG.error(msg) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if pool_from_host != pool_from_vol_type: msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen does not match the pool of " - "the host."), - {'Pool of the volume type': pool_from_vol_type, - 'Pool of the host': pool_from_host}) + "the volume type chosen (%(pool)s) does not match the " + "pool of the host %(pool_host)s") % + {'pool': pool_from_vol_type, + 'pool_host': pool_from_host}) + LOG.error(msg) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) + @cutils.trace def manage_existing_get_size(self, volume, existing_vol_ref): """Returns the size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. - :param volume: cinder volume to manage + :param volume: cinder volume to manage :param existing_vol_ref: existing volume to take under management + :returns: the size of the volume or raise error + :raises: VolumeBackendAPIException """ + return self._manage_existing_get_size(existing_vol_ref) - # Attempt to find NFS share, NFS mount, and volume path from vol_ref. - (nfs_share, nfs_mount, vol_path - ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - - try: - LOG.debug("Asked to get size of NFS vol_ref %s.", - existing_vol_ref['source-name']) - - file_path = os.path.join(nfs_mount, vol_path) - file_size = float(cutils.get_file_size(file_path)) / units.Gi - vol_size = int(math.ceil(file_size)) - except (OSError, ValueError): - exception_message = (_("Failed to manage existing volume " - "%(name)s, because of error in getting " - "volume size."), - {'name': existing_vol_ref['source-name']}) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.", - {'ref': existing_vol_ref['source-name'], 'size': vol_size}) - - return vol_size - + @cutils.trace def unmanage(self, volume): """Removes the specified volume from Cinder management. @@ -783,8 +604,8 @@ class HDSNFSDriver(nfs.NfsDriver): :param volume: cinder volume to unmanage """ - vol_str = CONF.volume_name_template % volume['id'] - path = self._get_mount_point_for_share(volume['provider_location']) + vol_str = CONF.volume_name_template % volume.id + path = self._get_mount_point_for_share(volume.provider_location) new_str = "unmanage-" + vol_str @@ -792,12 +613,122 @@ class HDSNFSDriver(nfs.NfsDriver): new_path = os.path.join(path, new_str) try: - self._execute("mv", vol_path, new_path, - run_as_root=False, check_exit_code=True) + self._try_execute("mv", vol_path, new_path, + run_as_root=False, check_exit_code=True) - LOG.info(_LI("Cinder NFS volume with current path %(cr)s is " - "no longer being managed."), {'cr': new_path}) + LOG.info(_LI("The volume with path %(old)s is no longer being " + "managed by Cinder. However, it was not deleted " + "and can be found in the new path %(cr)s."), + {'old': vol_path, 'cr': new_path}) except (OSError, ValueError): - LOG.error(_LE("The NFS Volume %(cr)s does not exist."), - {'cr': new_path}) + LOG.exception(_LE("The NFS Volume %(cr)s does not exist."), + {'cr': new_path}) + + def _manage_existing_get_size(self, existing_ref): + # Attempt to find NFS share, NFS mount, and path from vol_ref. + (nfs_share, nfs_mount, path + ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) + + try: + LOG.debug("Asked to get size of NFS ref %(ref)s.", + {'ref': existing_ref['source-name']}) + + file_path = os.path.join(nfs_mount, path) + file_size = float(cutils.get_file_size(file_path)) / units.Gi + # Round up to next Gb + size = int(math.ceil(file_size)) + except (OSError, ValueError): + exception_message = (_("Failed to manage existing volume/snapshot " + "%(name)s, because of error in getting " + "its size."), + {'name': existing_ref['source-name']}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug("Reporting size of NFS ref %(ref)s as %(size)d GB.", + {'ref': existing_ref['source-name'], 'size': size}) + + return size + + def _check_snapshot_parent(self, volume, old_snap_name, share): + + volume_name = 'volume-' + volume.id + (fs, path, fs_label) = self._get_service(volume) + # 172.24.49.34:/nfs_cinder + + export_path = self.backend.get_export_path(share.split(':')[1], + fs_label) + volume_path = os.path.join(export_path, volume_name) + + return self.backend.check_snapshot_parent(volume_path, old_snap_name, + fs_label) + + def manage_existing_snapshot(self, snapshot, existing_ref): + # Attempt to find NFS share, NFS mount, and volume path from ref. + (nfs_share, nfs_mount, src_snapshot_name + ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) + + LOG.info(_LI("Asked to manage NFS snapshot %(snap)s for volume " + "%(vol)s, with vol ref %(ref)s."), + {'snap': snapshot.id, + 'vol': snapshot.volume_id, + 'ref': existing_ref['source-name']}) + + volume = snapshot.volume + + # Check if the snapshot belongs to the volume + real_parent = self._check_snapshot_parent(volume, src_snapshot_name, + nfs_share) + + if not real_parent: + msg = (_("This snapshot %(snap)s doesn't belong " + "to the volume parent %(vol)s.") % + {'snap': snapshot.id, 'vol': volume.id}) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + + if src_snapshot_name == snapshot.name: + LOG.debug("New Cinder snapshot %(snap)s name matches reference " + "name. No need to rename.", {'snap': snapshot.name}) + else: + src_snap = os.path.join(nfs_mount, src_snapshot_name) + dst_snap = os.path.join(nfs_mount, snapshot.name) + try: + self._try_execute("mv", src_snap, dst_snap, run_as_root=False, + check_exit_code=True) + LOG.info(_LI("Setting newly managed Cinder snapshot name " + "to %(snap)s."), {'snap': snapshot.name}) + self._set_rw_permissions_for_all(dst_snap) + except (OSError, processutils.ProcessExecutionError) as err: + msg = (_("Failed to manage existing snapshot " + "%(name)s, because rename operation " + "failed: Error msg: %(msg)s.") % + {'name': existing_ref['source-name'], + 'msg': six.text_type(err)}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return {'provider_location': nfs_share} + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + return self._manage_existing_get_size(existing_ref) + + def unmanage_snapshot(self, snapshot): + path = self._get_mount_point_for_share(snapshot.provider_location) + + new_name = "unmanage-" + snapshot.name + + old_path = os.path.join(path, snapshot.name) + new_path = os.path.join(path, new_name) + + try: + self._execute("mv", old_path, new_path, + run_as_root=False, check_exit_code=True) + LOG.info(_LI("The snapshot with path %(old)s is no longer being " + "managed by Cinder. However, it was not deleted and " + "can be found in the new path %(cr)s."), + {'old': old_path, 'cr': new_path}) + + except (OSError, ValueError): + LOG.exception(_LE("The NFS snapshot %(old)s does not exist."), + {'old': old_path}) diff --git a/cinder/volume/drivers/hitachi/hnas_utils.py b/cinder/volume/drivers/hitachi/hnas_utils.py new file mode 100644 index 000000000..f6355fb58 --- /dev/null +++ b/cinder/volume/drivers/hitachi/hnas_utils.py @@ -0,0 +1,339 @@ +# Copyright (c) 2016 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Shared code for HNAS drivers +""" + +import os +import re + +from oslo_config import cfg +from oslo_log import log as logging +import six +from xml.etree import ElementTree as ETree + +from cinder import exception +from cinder.i18n import _, _LW, _LE +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', + 'chap_enabled': True, + 'ssh_port': 22} + +MAX_HNAS_ISCSI_TARGETS = 32 + +drivers_common_opts = [ + cfg.IPOpt('hnas_mgmt_ip0', + help='Management IP address of HNAS. This can ' + 'be any IP in the admin address on HNAS or ' + 'the SMU IP.'), + cfg.StrOpt('hnas_ssc_cmd', + default='ssc', + help='Command to communicate to HNAS.'), + cfg.StrOpt('hnas_username', + help='HNAS username.'), + cfg.StrOpt('hnas_password', + secret=True, + help='HNAS password.'), + cfg.PortOpt('hnas_ssh_port', + default=22, + help='Port to be used for SSH authentication.'), + cfg.StrOpt('hnas_ssh_private_key', + help='Path to the SSH private key used to ' + 'authenticate in HNAS SMU.'), + cfg.StrOpt('hnas_cluster_admin_ip0', + default=None, + help='The IP of the HNAS cluster admin. ' + 'Required only for HNAS multi-cluster setups.'), + cfg.StrOpt('hnas_svc0_volume_type', + help='Service 0 volume type'), + cfg.StrOpt('hnas_svc0_hdp', + help='Service 0 HDP'), + cfg.StrOpt('hnas_svc1_volume_type', + help='Service 1 volume type'), + cfg.StrOpt('hnas_svc1_hdp', + help='Service 1 HDP'), + cfg.StrOpt('hnas_svc2_volume_type', + help='Service 2 volume type'), + cfg.StrOpt('hnas_svc2_hdp', + help='Service 2 HDP'), + cfg.StrOpt('hnas_svc3_volume_type', + help='Service 3 volume type'), + cfg.StrOpt('hnas_svc3_hdp', + help='Service 3 HDP') +] + +CONF = cfg.CONF +CONF.register_opts(drivers_common_opts) + + +def _check_conf_params(config, vol_type, dv_type, idx): + """Validates if the configuration on cinder.conf is complete. + + :param config: Dictionary with the driver configurations + :param vol_type: The volume type of the current pool + :param dv_type: The type of the driver (NFS or iSCSI) + :param idx: Index of the current pool + """ + + # Validating the inputs on cinder.conf + if config['username'] is None: + msg = (_("The config parameter hnas_username " + "is not set in the cinder.conf.")) + LOG.error(msg) + raise exception.InvalidParameterValue(err=msg) + + if (config['password'] is None and + config['ssh_private_key'] is None): + msg = (_("Credentials configuration parameters " + "missing: you need to set hnas_password " + "or hnas_ssh_private_key " + "in the cinder.conf.")) + LOG.error(msg) + raise exception.InvalidParameterValue(err=msg) + + if config['mgmt_ip0'] is None: + msg = (_("The config parameter hnas_mgmt_ip0 " + "is not set in the cinder.conf.")) + LOG.error(msg) + raise exception.InvalidParameterValue(err=msg) + + if config['services'][vol_type]['hdp'] is None: + msg = (_("The config parameter hnas_svc%(idx)s_hdp is " + "not set in the cinder.conf. Note that you need to " + "have at least one pool configured.") % + {'idx': idx}) + LOG.error(msg) + raise exception.InvalidParameterValue(err=msg) + + if config['services'][vol_type]['volume_type'] is None: + msg = (_("The config parameter " + "hnas_svc%(idx)s_volume_type is not set " + "in the cinder.conf. Note that you need to " + "have at least one pool configured.") % + {'idx': idx}) + LOG.error(msg) + raise exception.InvalidParameterValue(err=msg) + + if (dv_type == 'iscsi' and + config['services'][vol_type]['iscsi_ip'] is None): + msg = (_("The config parameter " + "hnas_svc%(idx)s_iscsi_ip is not set " + "in the cinder.conf. Note that you need to " + "have at least one pool configured.") % {'idx': idx}) + LOG.error(msg) + raise exception.InvalidParameterValue(err=msg) + + +def _xml_read(root, element, check=None): + """Read an xml element. + + :param root: XML object + :param element: string desired tag + :param check: string if present, throw exception if element missing + """ + + val = root.findtext(element) + + # mandatory parameter not found + if val is None and check: + LOG.error(_LE("Mandatory parameter not found: %(p)s"), {'p': element}) + raise exception.ParameterNotFound(param=element) + + # tag not found + if val is None: + return None + + svc_tag_pattern = re.compile("svc_[0-3]$") + # tag found but empty parameter. + if not val.strip(): + if svc_tag_pattern.search(element): + return "" + LOG.error(_LE("Parameter not found: %(param)s"), {'param': element}) + raise exception.ParameterNotFound(param=element) + + LOG.debug("%(element)s: %(val)s", + {'element': element, + 'val': val if element != 'password' else '***'}) + + return val.strip() + + +def read_xml_config(xml_config_file, svc_params, optional_params): + """Read Hitachi driver specific xml config file. + + :param xml_config_file: string filename containing XML configuration + :param svc_params: parameters to configure the services + ['volume_type', 'hdp', 'iscsi_ip'] + :param optional_params: parameters to configure that are not mandatory + ['ssc_cmd', 'cluster_admin_ip0', 'chap_enabled'] + """ + + if not os.access(xml_config_file, os.R_OK): + msg = (_("Can't find HNAS configurations on cinder.conf neither " + "on the path %(xml)s.") % {'xml': xml_config_file}) + LOG.error(msg) + raise exception.ConfigNotFound(message=msg) + else: + LOG.warning(_LW("This XML configuration file %(xml)s is deprecated. " + "Please, move all the configurations to the " + "cinder.conf file. If you keep both configuration " + "files, the options set on cinder.conf will be " + "used."), {'xml': xml_config_file}) + + try: + root = ETree.parse(xml_config_file).getroot() + except ETree.ParseError: + msg = (_("Error parsing config file: %(xml_config_file)s") % + {'xml_config_file': xml_config_file}) + LOG.error(msg) + raise exception.ConfigNotFound(message=msg) + + # mandatory parameters for NFS and iSCSI + config = {} + arg_prereqs = ['mgmt_ip0', 'username'] + for req in arg_prereqs: + config[req] = _xml_read(root, req, 'check') + + # optional parameters for NFS and iSCSI + for req in optional_params: + config[req] = _xml_read(root, req) + if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None: + config[req] = HNAS_DEFAULT_CONFIG.get(req) + + config['ssh_private_key'] = _xml_read(root, 'ssh_private_key') + config['password'] = _xml_read(root, 'password') + + if config['ssh_private_key'] is None and config['password'] is None: + msg = _("Missing authentication option (passw or private key file).") + LOG.error(msg) + raise exception.ConfigNotFound(message=msg) + + if _xml_read(root, 'ssh_port') is not None: + config['ssh_port'] = int(_xml_read(root, 'ssh_port')) + else: + config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] + + config['fs'] = {} + config['services'] = {} + + # min one needed + for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: + if _xml_read(root, svc) is None: + continue + service = {'label': svc} + + # none optional + for arg in svc_params: + service[arg] = _xml_read(root, svc + '/' + arg, 'check') + config['services'][service['volume_type']] = service + config['fs'][service['hdp']] = service['hdp'] + + # at least one service required! + if not config['services'].keys(): + LOG.error(_LE("No service found in xml config file")) + raise exception.ParameterNotFound(param="svc_0") + + return config + + +def get_pool(config, volume): + """Get the pool of a volume. + + :param config: dictionary containing the configuration parameters + :param volume: dictionary volume reference + :returns: the pool related to the volume + """ + + if volume.volume_type: + metadata = {} + type_id = volume.volume_type_id + if type_id is not None: + metadata = volume_types.get_volume_type_extra_specs(type_id) + if metadata.get('service_label'): + if metadata['service_label'] in config['services'].keys(): + return metadata['service_label'] + return 'default' + + +def read_cinder_conf(config_opts, dv_type): + """Reads cinder.conf + + Gets the driver specific information set on cinder.conf configuration + file. + + :param config_opts: Configuration object that contains the information + needed by HNAS driver + :param dv_type: The type of the driver (NFS or iSCSI) + :returns: Dictionary with the driver configuration + """ + + config = {} + config['services'] = {} + config['fs'] = {} + mandatory_parameters = ['username', 'password', 'mgmt_ip0'] + optional_parameters = ['ssc_cmd', 'chap_enabled', + 'ssh_port', 'cluster_admin_ip0', + 'ssh_private_key'] + + # Trying to get the mandatory parameters from cinder.conf + for opt in mandatory_parameters: + config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt}) + + # If there is at least one of the mandatory parameters in + # cinder.conf, we assume that we should use the configuration + # from this file. + # Otherwise, we use the configuration from the deprecated XML file. + for param in mandatory_parameters: + if config[param] is not None: + break + else: + return None + + # Getting the optional parameters from cinder.conf + for opt in optional_parameters: + config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt}) + + # It's possible to have up to 4 pools configured. + for i in range(0, 4): + idx = six.text_type(i) + svc_vol_type = (config_opts.safe_get( + 'hnas_svc%(idx)s_volume_type' % {'idx': idx})) + + svc_hdp = (config_opts.safe_get( + 'hnas_svc%(idx)s_hdp' % {'idx': idx})) + + # It's mandatory to have at least 1 pool configured (svc_0) + if (idx == '0' or svc_vol_type is not None or + svc_hdp is not None): + config['services'][svc_vol_type] = {} + config['fs'][svc_hdp] = svc_hdp + config['services'][svc_vol_type]['hdp'] = svc_hdp + config['services'][svc_vol_type]['volume_type'] = svc_vol_type + + if dv_type == 'iscsi': + svc_ip = (config_opts.safe_get( + 'hnas_svc%(idx)s_iscsi_ip' % {'idx': idx})) + config['services'][svc_vol_type]['iscsi_ip'] = svc_ip + + config['services'][svc_vol_type]['label'] = ( + 'svc_%(idx)s' % {'idx': idx}) + # Checking to ensure that the pools configurations are complete + _check_conf_params(config, svc_vol_type, dv_type, idx) + + return config diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py index da3aad94e..feeae61a1 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ b/cinder/volume/drivers/hpe/hpe_3par_common.py @@ -243,10 +243,11 @@ class HPE3PARCommon(object): characters. bug #1573647 3.0.24 - Fix terminate connection on failover 3.0.25 - Fix delete volume when online clone is active. bug #1349639 + 3.0.26 - Fix concurrent snapshot delete conflict. bug #1600104 """ - VERSION = "3.0.25" + VERSION = "3.0.26" stats = {} @@ -2476,8 +2477,34 @@ class HPE3PARCommon(object): "cinder: %(id)s Ex: %(msg)s"), {'id': snapshot['id'], 'msg': ex}) except hpeexceptions.HTTPConflict as ex: - LOG.error(_LE("Exception: %s"), ex) - raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) + if (ex.get_code() == 32): + # Error 32 means that the snapshot has children + # see if we have any temp snapshots + snaps = self.client.getVolumeSnapshots(snap_name) + for snap in snaps: + if snap.startswith('tss-'): + LOG.info( + _LI("Found a temporary snapshot %(name)s"), + {'name': snap}) + try: + self.client.deleteVolume(snap) + except hpeexceptions.HTTPNotFound: + # if the volume is gone, it's as good as a + # successful delete + pass + except Exception: + msg = _("Snapshot has a temporary snapshot that " + "can't be deleted at this time.") + raise exception.SnapshotIsBusy(message=msg) + + try: + self.client.deleteVolume(snap_name) + except Exception: + msg = _("Snapshot has children and cannot be deleted!") + raise exception.SnapshotIsBusy(message=msg) + else: + LOG.error(_LE("Exception: %s"), ex) + raise exception.SnapshotIsBusy(message=ex.get_description()) def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): if wwns is not None and not isinstance(wwns, list): diff --git a/cinder/volume/drivers/hpe/hpe_3par_fc.py b/cinder/volume/drivers/hpe/hpe_3par_fc.py index c6c60bce8..c53b0c1e9 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_fc.py +++ b/cinder/volume/drivers/hpe/hpe_3par_fc.py @@ -35,10 +35,12 @@ except ImportError: hpeexceptions = None from oslo_log import log as logging +from oslo_utils.excutils import save_and_reraise_exception from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _, _LI, _LW, _LE from cinder import interface +from cinder import utils from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.san import san @@ -46,6 +48,9 @@ from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) +# EXISTENT_PATH error code returned from hpe3parclient +EXISTENT_PATH = 73 + @interface.volumedriver class HPE3PARFCDriver(driver.TransferVD, @@ -105,10 +110,16 @@ class HPE3PARFCDriver(driver.TransferVD, 3.0.7 - Remove metadata that tracks the instance ID. bug #1572665 3.0.8 - NSP feature, creating FC Vlun as match set instead of host sees. bug #1577993 + 3.0.9 - Handling HTTP conflict 409, host WWN/iSCSI name already used + by another host, while creating 3PAR FC Host. bug #1597454 + 3.0.10 - Added Entry point tracing """ - VERSION = "3.0.8" + VERSION = "3.0.10" + + # The name of the CI wiki page. + CI_WIKI_NAME = "HPE_Storage_CI" def __init__(self, *args, **kwargs): super(HPE3PARFCDriver, self).__init__(*args, **kwargs) @@ -179,6 +190,7 @@ class HPE3PARFCDriver(driver.TransferVD, """Setup errors are already checked for in do_setup so return pass.""" pass + @utils.trace def create_volume(self, volume): common = self._login() try: @@ -186,6 +198,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_cloned_volume(self, volume, src_vref): common = self._login() try: @@ -193,6 +206,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_volume(self, volume): common = self._login() try: @@ -200,6 +214,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. @@ -211,6 +226,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_snapshot(self, snapshot): common = self._login() try: @@ -218,6 +234,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_snapshot(self, snapshot): common = self._login() try: @@ -225,6 +242,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Assigns the volume to a server. @@ -347,6 +365,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" @@ -435,16 +454,41 @@ class HPE3PARFCDriver(driver.TransferVD, return host_found else: persona_id = int(persona_id) - common.client.createHost(hostname, FCWwns=wwns, - optional={'domain': domain, - 'persona': persona_id}) + try: + common.client.createHost(hostname, FCWwns=wwns, + optional={'domain': domain, + 'persona': persona_id}) + except hpeexceptions.HTTPConflict as path_conflict: + msg = _LE("Create FC host caught HTTP conflict code: %s") + LOG.exception(msg, path_conflict.get_code()) + with save_and_reraise_exception(reraise=False) as ctxt: + if path_conflict.get_code() is EXISTENT_PATH: + # Handle exception : EXISTENT_PATH - host WWN/iSCSI + # name already used by another host + hosts = common.client.queryHost(wwns=wwns) + if hosts and hosts['members'] and ( + 'name' in hosts['members'][0]): + hostname = hosts['members'][0]['name'] + else: + # re rasise last caught exception + ctxt.reraise = True + else: + # re rasise last caught exception + # for other HTTP conflict + ctxt.reraise = True return hostname def _modify_3par_fibrechan_host(self, common, hostname, wwn): mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, 'FCWWNs': wwn} - - common.client.modifyHost(hostname, mod_request) + try: + common.client.modifyHost(hostname, mod_request) + except hpeexceptions.HTTPConflict as path_conflict: + msg = _LE("Modify FC Host %(hostname)s caught " + "HTTP conflict code: %(code)s") + LOG.exception(msg, + {'hostname': hostname, + 'code': path_conflict.get_code()}) def _create_host(self, common, volume, connector): """Creates or modifies existing 3PAR host.""" @@ -464,8 +508,9 @@ class HPE3PARFCDriver(driver.TransferVD, domain, persona_id) host = common._get_3par_host(hostname) - - return self._add_new_wwn_to_host(common, host, connector['wwpns']) + return host + else: + return self._add_new_wwn_to_host(common, host, connector['wwpns']) def _add_new_wwn_to_host(self, common, host, wwns): """Add wwns to a host if one or more don't exist. @@ -505,6 +550,7 @@ class HPE3PARFCDriver(driver.TransferVD, def remove_export(self, context, volume): pass + @utils.trace def extend_volume(self, volume, new_size): common = self._login() try: @@ -512,6 +558,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_consistencygroup(self, context, group): common = self._login() try: @@ -519,6 +566,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): @@ -530,6 +578,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_consistencygroup(self, context, group, volumes): common = self._login() try: @@ -537,6 +586,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): common = self._login() @@ -546,6 +596,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: @@ -553,6 +604,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: @@ -560,6 +612,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing(self, volume, existing_ref): common = self._login() try: @@ -567,6 +620,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing_snapshot(self, snapshot, existing_ref): common = self._login() try: @@ -574,6 +628,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing_get_size(self, volume, existing_ref): common = self._login() try: @@ -581,6 +636,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing_snapshot_get_size(self, snapshot, existing_ref): common = self._login() try: @@ -589,6 +645,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def unmanage(self, volume): common = self._login() try: @@ -596,6 +653,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def unmanage_snapshot(self, snapshot): common = self._login() try: @@ -603,6 +661,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() @@ -611,6 +670,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] @@ -625,6 +685,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" @@ -635,6 +696,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def get_pool(self, volume): common = self._login() try: @@ -646,6 +708,7 @@ class HPE3PARFCDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def failover_host(self, context, volumes, secondary_id=None): """Force failover to a secondary replication target.""" common = self._login(timeout=30) diff --git a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py index 43c3414d9..5df59c041 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py +++ b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py @@ -40,6 +40,7 @@ from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE, _LW from cinder import interface +from cinder import utils from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.san import san @@ -117,10 +118,14 @@ class HPE3PARISCSIDriver(driver.TransferVD, 3.0.10 - Remove metadata that tracks the instance ID. bug #1572665 3.0.11 - _create_3par_iscsi_host() now accepts iscsi_iqn as list only. Bug #1590180 + 3.0.12 - Added entry point tracing """ - VERSION = "3.0.11" + VERSION = "3.0.12" + + # The name of the CI wiki page. + CI_WIKI_NAME = "HPE_Storage_CI" def __init__(self, *args, **kwargs): super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs) @@ -164,6 +169,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, 'san_password'] common.check_flags(self.configuration, required_flags) + @utils.trace def get_volume_stats(self, refresh=False): common = self._login() try: @@ -257,6 +263,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, """Setup errors are already checked for in do_setup so return pass.""" pass + @utils.trace def create_volume(self, volume): common = self._login() try: @@ -264,6 +271,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_cloned_volume(self, volume, src_vref): """Clone an existing volume.""" common = self._login() @@ -272,6 +280,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_volume(self, volume): common = self._login() try: @@ -279,6 +288,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. @@ -290,6 +300,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_snapshot(self, snapshot): common = self._login() try: @@ -297,6 +308,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_snapshot(self, snapshot): common = self._login() try: @@ -304,6 +316,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def initialize_connection(self, volume, connector): """Assigns the volume to a server. @@ -460,6 +473,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" common = self._login() @@ -665,6 +679,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, return model_update + @utils.trace def create_export(self, context, volume, connector): common = self._login() try: @@ -672,6 +687,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def ensure_export(self, context, volume): """Ensure the volume still exists on the 3PAR. @@ -779,6 +795,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, return current_least_used_nsp + @utils.trace def extend_volume(self, volume, new_size): common = self._login() try: @@ -786,6 +803,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_consistencygroup(self, context, group): common = self._login() try: @@ -793,6 +811,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): @@ -804,6 +823,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_consistencygroup(self, context, group, volumes): common = self._login() try: @@ -811,6 +831,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): common = self._login() @@ -820,6 +841,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def create_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: @@ -827,6 +849,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def delete_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: @@ -834,6 +857,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing(self, volume, existing_ref): common = self._login() try: @@ -841,6 +865,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing_snapshot(self, snapshot, existing_ref): common = self._login() try: @@ -848,6 +873,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing_get_size(self, volume, existing_ref): common = self._login() try: @@ -855,6 +881,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def manage_existing_snapshot_get_size(self, snapshot, existing_ref): common = self._login() try: @@ -863,6 +890,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def unmanage(self, volume): common = self._login() try: @@ -870,6 +898,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def unmanage_snapshot(self, snapshot): common = self._login() try: @@ -877,6 +906,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() @@ -885,6 +915,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] @@ -899,6 +930,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" @@ -909,6 +941,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def get_pool(self, volume): common = self._login() try: @@ -920,6 +953,7 @@ class HPE3PARISCSIDriver(driver.TransferVD, finally: self._logout(common) + @utils.trace def failover_host(self, context, volumes, secondary_id=None): """Force failover to a secondary replication target.""" common = self._login(timeout=30) diff --git a/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py b/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py index f423c11c8..217f7396d 100644 --- a/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py +++ b/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py @@ -162,6 +162,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): VERSION = "2.0.9" + CI_WIKI_NAME = "HPE_Storage_CI" + device_stats = {} # v2 replication constants diff --git a/cinder/volume/drivers/hpe/hpe_xp_fc.py b/cinder/volume/drivers/hpe/hpe_xp_fc.py index d0e76f9f3..5b3e3fd43 100644 --- a/cinder/volume/drivers/hpe/hpe_xp_fc.py +++ b/cinder/volume/drivers/hpe/hpe_xp_fc.py @@ -19,6 +19,7 @@ Fibre channel Cinder volume driver for Hewlett Packard Enterprise storage. from oslo_utils import importutils from cinder import interface +from cinder import utils from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_xp_opts as opts from cinder.zonemanager import utils as fczm_utils @@ -31,6 +32,9 @@ _DRIVER_CLASS = 'hpe_xp_horcm_fc.HPEXPHORCMFC' class HPEXPFCDriver(driver.FibreChannelDriver): """OpenStack Fibre Channel driver to enable HPE XP storage.""" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "XP_Storage_CI" + def __init__(self, *args, **kwargs): """Initialize the driver.""" super(HPEXPFCDriver, self).__init__(*args, **kwargs) @@ -45,26 +49,32 @@ class HPEXPFCDriver(driver.FibreChannelDriver): """Setup errors are already checked for in do_setup so return pass.""" pass + @utils.trace def create_volume(self, volume): """Create a volume.""" return self.common.create_volume(volume) + @utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" return self.common.create_volume_from_snapshot(volume, snapshot) + @utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" return self.common.create_cloned_volume(volume, src_vref) + @utils.trace def delete_volume(self, volume): """Delete a volume.""" self.common.delete_volume(volume) + @utils.trace def create_snapshot(self, snapshot): """Create a snapshot.""" return self.common.create_snapshot(snapshot) + @utils.trace def delete_snapshot(self, snapshot): """Delete a snapshot.""" self.common.delete_snapshot(snapshot) @@ -72,10 +82,12 @@ class HPEXPFCDriver(driver.FibreChannelDriver): def local_path(self, volume): pass + @utils.trace def get_volume_stats(self, refresh=False): """Get volume stats.""" return self.common.get_volume_stats(refresh) + @utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume. @@ -87,6 +99,7 @@ class HPEXPFCDriver(driver.FibreChannelDriver): self.common.copy_image_to_volume( context, volume, image_service, image_id) + @utils.trace def after_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions after copyvolume data. @@ -95,6 +108,7 @@ class HPEXPFCDriver(driver.FibreChannelDriver): """ self.common.copy_volume_data(context, src_vol, dest_vol, remote) + @utils.trace def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume. @@ -105,10 +119,12 @@ class HPEXPFCDriver(driver.FibreChannelDriver): context, backup, volume, backup_service) self.common.restore_backup(context, backup, volume, backup_service) + @utils.trace def extend_volume(self, volume, new_size): """Extend a volume.""" self.common.extend_volume(volume, new_size) + @utils.trace def manage_existing(self, volume, existing_ref): """Manage an existing HPE XP storage volume. @@ -119,6 +135,7 @@ class HPEXPFCDriver(driver.FibreChannelDriver): """ return self.common.manage_existing(volume, existing_ref) + @utils.trace def manage_existing_get_size(self, volume, existing_ref): """Return size of volume for manage_existing.""" return self.common.manage_existing_get_size(volume, existing_ref) @@ -140,11 +157,13 @@ class HPEXPFCDriver(driver.FibreChannelDriver): def remove_export(self, context, volume): pass + @utils.trace @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Attach the volume to an instance.""" return self.common.initialize_connection(volume, connector) + @utils.trace @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Detach a volume from an instance.""" diff --git a/cinder/volume/drivers/huawei/constants.py b/cinder/volume/drivers/huawei/constants.py index 140e2562c..599063b75 100644 --- a/cinder/volume/drivers/huawei/constants.py +++ b/cinder/volume/drivers/huawei/constants.py @@ -19,6 +19,8 @@ STATUS_RUNNING = '10' STATUS_VOLUME_READY = '27' STATUS_LUNCOPY_READY = '40' STATUS_QOS_ACTIVE = '2' +LUN_TYPE = '11' +SNAPSHOT_TYPE = '27' BLOCK_STORAGE_POOL_TYPE = '1' FILE_SYSTEM_POOL_TYPE = '2' @@ -61,6 +63,8 @@ THIN_LUNTYPE = 1 MAX_HOSTNAME_LENGTH = 31 MAX_VOL_DESCRIPTION = 170 PORT_NUM_PER_CONTR = 2 +PWD_EXPIRED = 3 +PWD_RESET = 4 OS_TYPE = {'Linux': '0', 'Windows': '1', diff --git a/cinder/volume/drivers/huawei/fc_zone_helper.py b/cinder/volume/drivers/huawei/fc_zone_helper.py index ff196642e..03c0943b5 100644 --- a/cinder/volume/drivers/huawei/fc_zone_helper.py +++ b/cinder/volume/drivers/huawei/fc_zone_helper.py @@ -61,7 +61,7 @@ class FCZoneHelper(object): {"portg": portg, "views": views[0]}) # In fact, there is just one view for one port group. lungroup = self.client.get_lungroup_by_view(views[0]) - lun_num = self.client.get_lunnum_from_lungroup(lungroup) + lun_num = self.client.get_obj_count_from_lungroup(lungroup) ports_in_portg = self.client.get_ports_by_portg(portg) LOG.debug("PortGroup %(portg)s contains ports: %(ports)s.", {"portg": portg, "ports": ports_in_portg}) @@ -133,10 +133,11 @@ class FCZoneHelper(object): 'initiators': fabric_connected_initiators}) return fabric_connected_ports, fabric_connected_initiators - def _get_lun_engine_contrs(self, engines, lun_id): + def _get_lun_engine_contrs(self, engines, lun_id, + lun_type=constants.LUN_TYPE): contrs = [] engine_id = None - lun_info = self.client.get_lun_info(lun_id) + lun_info = self.client.get_lun_info(lun_id, lun_type) lun_contr_id = lun_info['OWNINGCONTROLLER'] for engine in engines: contrs = json.loads(engine['NODELIST']) @@ -172,11 +173,13 @@ class FCZoneHelper(object): new_portg_id = self.client.create_portg(portg_name, description) return new_portg_id - def build_ini_targ_map(self, wwns, host_id, lun_id): + def build_ini_targ_map(self, wwns, host_id, lun_id, + lun_type=constants.LUN_TYPE): engines = self.client.get_all_engines() LOG.debug("Get array engines: %s", engines) - contrs, engine_id = self._get_lun_engine_contrs(engines, lun_id) + contrs, engine_id = self._get_lun_engine_contrs(engines, lun_id, + lun_type) # Check if there is already a port group in the view. # If yes and have already considered the engine, diff --git a/cinder/volume/drivers/huawei/huawei_driver.py b/cinder/volume/drivers/huawei/huawei_driver.py index 89933acc4..f682d49a3 100644 --- a/cinder/volume/drivers/huawei/huawei_driver.py +++ b/cinder/volume/drivers/huawei/huawei_driver.py @@ -74,10 +74,15 @@ CONF.register_opts(huawei_opts) snap_attrs = ('id', 'volume_id', 'volume', 'provider_location') Snapshot = collections.namedtuple('Snapshot', snap_attrs) +vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') +Volume = collections.namedtuple('Volume', vol_attrs) class HuaweiBaseDriver(driver.VolumeDriver): + # ThirdPartySytems wiki page + CI_WIKI_NAME = "Huawei_volume_CI" + def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -95,7 +100,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): def get_local_and_remote_dev_conf(self): self.loc_dev_conf = self.huawei_conf.get_local_device() - # Now just support one replication_devices. + # Now just support one replication device. replica_devs = self.huawei_conf.get_replication_devices() self.replica_dev_conf = replica_devs[0] if replica_devs else {} @@ -249,7 +254,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): opts.update(opts_value) for key, value in specs.items(): - # Get the scope, if is using scope format. + # Get the scope, if it is using scope format. scope = None key_split = key.split(':') if len(key_split) > 2 and key_split[0] != "capabilities": @@ -321,7 +326,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): def _create_base_type_volume(self, opts, volume, volume_type): """Create volume and add some base type. - Base type is the services won't conflict with the other service. + Base type is the service type which doesn't conflict with the other. """ lun_params = self._get_lun_params(volume, opts) lun_info, model_update = self._create_volume(volume, lun_params) @@ -349,8 +354,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): model_update): """Add the extend type. - Extend type is the services may conflict with LUNCopy. - So add it after the those services. + Extend type is the service type which may conflict with the other. + So add it after those services. """ lun_id = lun_info['ID'] if opts.get('hypermetro') == 'true': @@ -551,7 +556,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): LOG.error(_LE('Unable to rename lun %s on array.'), current_name) return {'_name_id': new_volume.name_id} - LOG.debug("Rename lun from %(current_name)s to %(original_name)s " + LOG.debug("Renamed lun from %(current_name)s to %(original_name)s " "successfully.", {'current_name': current_name, 'original_name': original_name}) @@ -1158,6 +1163,18 @@ class HuaweiBaseDriver(driver.VolumeDriver): """Remove an export for a volume.""" pass + def create_export_snapshot(self, context, snapshot, connector): + """Export a snapshot.""" + pass + + def remove_export_snapshot(self, context, snapshot): + """Remove an export for a snapshot.""" + pass + + def backup_use_temp_snapshot(self): + # This config option has a default to be False, So just return it. + return self.configuration.safe_get("backup_use_temp_snapshot") + def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): luncopy_id = self.client.create_luncopy(copy_name, src_lun, @@ -1484,14 +1501,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Get the size of the existing snapshot.""" snapshot_info = self._get_snapshot_info_by_ref(existing_ref) - size = (float(snapshot_info.get('USERCAPACITY')) - // constants.CAPACITY_UNIT) - remainder = (float(snapshot_info.get('USERCAPACITY')) - % constants.CAPACITY_UNIT) - if int(remainder) > 0: - msg = _("Snapshot size must be multiple of 1 GB.") - raise exception.VolumeBackendAPIException(data=msg) - return int(size) + size = int(math.ceil(snapshot_info.get('USERCAPACITY') / + constants.CAPACITY_UNIT)) + return size def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" @@ -1756,6 +1768,37 @@ class HuaweiBaseDriver(driver.VolumeDriver): return secondary_id, volumes_update + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + """Map a snapshot to a host and return target iSCSI information.""" + # From the volume structure. + volume = Volume(id=snapshot.id, + provider_location=snapshot.provider_location, + lun_type=constants.SNAPSHOT_TYPE, + metadata=None) + + return self.initialize_connection(volume, connector) + + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + """Delete map between a snapshot and a host.""" + # From the volume structure. + volume = Volume(id=snapshot.id, + provider_location=snapshot.provider_location, + lun_type=constants.SNAPSHOT_TYPE, + metadata=None) + + return self.terminate_connection(volume, connector) + + def get_lun_id_and_type(self, volume): + if hasattr(volume, 'lun_type'): + lun_id = volume.provider_location + lun_type = constants.SNAPSHOT_TYPE + else: + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + lun_type = constants.LUN_TYPE + + return lun_id, lun_type + @interface.volumedriver class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): @@ -1784,9 +1827,11 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): Hypermetro consistency group support Consistency group support Cgsnapshot support + 2.0.8 - Backup snapshot optimal path support + 2.0.9 - Support reporting disk type of pool """ - VERSION = "2.0.7" + VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) @@ -1804,9 +1849,7 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): @utils.synchronized('huawei', external=True) def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - + lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] LOG.info(_LI( 'initiator name: %(initiator_name)s, ' @@ -1837,9 +1880,11 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): # Mapping lungroup and hostgroup to view. self.client.do_mapping(lun_id, hostgroup_id, - host_id, portgroup_id) + host_id, portgroup_id, + lun_type) - hostlun_id = self.client.get_host_lun_id(host_id, lun_id) + hostlun_id = self.client.get_host_lun_id(host_id, lun_id, + lun_type) LOG.info(_LI("initialize_connection, host lun id is: %s."), hostlun_id) @@ -1877,8 +1922,7 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): @utils.synchronized('huawei', external=True) def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_WARN) + lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] host_name = connector['host'] lungroup_id = None @@ -1912,10 +1956,12 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): # Remove lun from lungroup. if lun_id and lungroup_id: - lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id) + lungroup_ids = self.client.get_lungroupids_by_lunid( + lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, - lun_id) + lun_id, + lun_type) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " @@ -1925,7 +1971,7 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): # Remove portgroup from mapping view if no lun left in lungroup. if lungroup_id: - left_lunnum = self.client.get_lunnum_from_lungroup(lungroup_id) + left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if portgroup_id and view_id and (int(left_lunnum) <= 0): if self.client.is_portgroup_associated_to_view(view_id, @@ -1981,9 +2027,11 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): Hypermetro consistency group support Consistency group support Cgsnapshot support + 2.0.8 - Backup snapshot optimal path support + 2.0.9 - Support reporting disk type of pool """ - VERSION = "2.0.7" + VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiFCDriver, self).__init__(*args, **kwargs) @@ -2002,9 +2050,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): @utils.synchronized('huawei', external=True) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - + lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] LOG.info(_LI( 'initialize_connection, initiator: %(wwpns)s,' @@ -2027,7 +2073,8 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) try: (tgt_port_wwns, portg_id, init_targ_map) = ( - zone_helper.build_ini_targ_map(wwns, host_id, lun_id)) + zone_helper.build_ini_targ_map(wwns, host_id, lun_id, + lun_type)) except Exception as err: self.remove_host_with_check(host_id) msg = _('build_ini_targ_map fails. %s') % err @@ -2065,8 +2112,10 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): # Add host into hostgroup. hostgroup_id = self.client.add_host_to_hostgroup(host_id) map_info = self.client.do_mapping(lun_id, hostgroup_id, - host_id, portg_id) - host_lun_id = self.client.get_host_lun_id(host_id, lun_id) + host_id, portg_id, + lun_type) + host_lun_id = self.client.get_host_lun_id(host_id, lun_id, + lun_type) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', @@ -2143,9 +2192,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_WARN) - + lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] host_name = connector['host'] @@ -2165,10 +2212,12 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): lungroup_id = self.client.find_lungroup_from_map(view_id) if lun_id and lungroup_id: - lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id) + lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id, + lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, - lun_id) + lun_id, + lun_type) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " @@ -2179,7 +2228,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): else: LOG.warning(_LW("Can't find lun on the array.")) if lungroup_id: - left_lunnum = self.client.get_lunnum_from_lungroup(lungroup_id) + left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if int(left_lunnum) > 0: fc_info = {'driver_volume_type': 'fibre_channel', 'data': {}} diff --git a/cinder/volume/drivers/huawei/hypermetro.py b/cinder/volume/drivers/huawei/hypermetro.py index 468d60bb4..535dd938b 100644 --- a/cinder/volume/drivers/huawei/hypermetro.py +++ b/cinder/volume/drivers/huawei/hypermetro.py @@ -235,7 +235,7 @@ class HuaweiHyperMetro(object): lungroup_id = self.rmt_client.find_lungroup_from_map( view_id) if lungroup_id: - left_lunnum = self.rmt_client.get_lunnum_from_lungroup( + left_lunnum = self.rmt_client.get_obj_count_from_lungroup( lungroup_id) if int(left_lunnum) > 0: diff --git a/cinder/volume/drivers/huawei/rest_client.py b/cinder/volume/drivers/huawei/rest_client.py index 84098d2c3..7bf18d298 100644 --- a/cinder/volume/drivers/huawei/rest_client.py +++ b/cinder/volume/drivers/huawei/rest_client.py @@ -131,6 +131,13 @@ class RestClient(object): self.device_id = device_id self.url = item_url + device_id self.headers['iBaseToken'] = result['data']['iBaseToken'] + if (result['data']['accountstate'] + in (constants.PWD_EXPIRED, constants.PWD_RESET)): + self.logout() + msg = _("Password has expired or has been reset, " + "please change the password.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) break if device_id is None: @@ -176,7 +183,7 @@ class RestClient(object): """Logout the session.""" url = "/sessions" if self.url: - result = self.call(url, None, "DELETE") + result = self.do_call(url, None, "DELETE") self._assert_rest_result(result, _('Logout session error.')) def _assert_rest_result(self, result, err_str): @@ -250,6 +257,9 @@ class RestClient(object): info['ID'] = pool['ID'] info['CAPACITY'] = pool.get('DATASPACE', pool['USERFREECAPACITY']) info['TOTALCAPACITY'] = pool['USERTOTALCAPACITY'] + info['TIER0CAPACITY'] = pool['TIER0CAPACITY'] + info['TIER1CAPACITY'] = pool['TIER1CAPACITY'] + info['TIER2CAPACITY'] = pool['TIER2CAPACITY'] return info @@ -414,7 +424,8 @@ class RestClient(object): return True return False - def do_mapping(self, lun_id, hostgroup_id, host_id, portgroup_id=None): + def do_mapping(self, lun_id, hostgroup_id, host_id, portgroup_id=None, + lun_type=constants.LUN_TYPE): """Add hostgroup and lungroup to mapping view.""" lungroup_name = constants.LUNGROUP_PREFIX + host_id mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id @@ -434,9 +445,10 @@ class RestClient(object): if lungroup_id is None: lungroup_id = self._create_lungroup(lungroup_name) is_associated = self._is_lun_associated_to_lungroup(lungroup_id, - lun_id) + lun_id, + lun_type) if not is_associated: - self.associate_lun_to_lungroup(lungroup_id, lun_id) + self.associate_lun_to_lungroup(lungroup_id, lun_id, lun_type) if view_id is None: view_id = self._add_mapping_view(mapping_view_name) @@ -468,7 +480,7 @@ class RestClient(object): LOG.error(_LE( 'Error occurred when adding hostgroup and lungroup to ' 'view. Remove lun from lungroup now.')) - self.remove_lun_from_lungroup(lungroup_id, lun_id) + self.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) return map_info @@ -602,9 +614,10 @@ class RestClient(object): return True return False - def get_host_lun_id(self, host_id, lun_id): - url = ("/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21" - "&ASSOCIATEOBJID=%s" % (host_id)) + def get_host_lun_id(self, host_id, lun_id, lun_type=constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/associate?TYPE=%s&ASSOCIATEOBJTYPE=21" + "&ASSOCIATEOBJID=%s" % (cmd_type, lun_type, host_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find host lun id error.')) @@ -692,10 +705,13 @@ class RestClient(object): return False - def _is_lun_associated_to_lungroup(self, lungroup_id, lun_id): + def _is_lun_associated_to_lungroup(self, lungroup_id, lun_id, + lun_type=constants.LUN_TYPE): """Check whether the lun is associated to the lungroup.""" - url = ("/lun/associate?TYPE=11&" - "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroup_id) + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/associate?TYPE=%s&" + "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" + % (cmd_type, lun_type, lungroup_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check lungroup associate error.')) @@ -716,19 +732,21 @@ class RestClient(object): self._assert_rest_result(result, _('Associate host to hostgroup ' 'error.')) - def associate_lun_to_lungroup(self, lungroup_id, lun_id): + def associate_lun_to_lungroup(self, lungroup_id, lun_id, + lun_type=constants.LUN_TYPE): """Associate lun to lungroup.""" url = "/lungroup/associate" data = {"ID": lungroup_id, - "ASSOCIATEOBJTYPE": "11", + "ASSOCIATEOBJTYPE": lun_type, "ASSOCIATEOBJID": lun_id} result = self.call(url, data) self._assert_rest_result(result, _('Associate lun to lungroup error.')) - def remove_lun_from_lungroup(self, lungroup_id, lun_id): + def remove_lun_from_lungroup(self, lungroup_id, lun_id, + lun_type=constants.LUN_TYPE): """Remove lun from lungroup.""" - url = ("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=11" - "&ASSOCIATEOBJID=%s" % (lungroup_id, lun_id)) + url = ("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=%s" + "&ASSOCIATEOBJID=%s" % (lungroup_id, lun_type, lun_id)) result = self.call(url, None, 'DELETE') self._assert_rest_result( @@ -942,16 +960,25 @@ class RestClient(object): result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete mapping view error.')) - def get_lunnum_from_lungroup(self, lungroup_id): - """Check if there are still other luns associated to the lungroup.""" + def get_obj_count_from_lungroup(self, lungroup_id): + """Get all objects count associated to the lungroup.""" + lun_count = self._get_obj_count_from_lungroup_by_type( + lungroup_id, constants.LUN_TYPE) + snapshot_count = self._get_obj_count_from_lungroup_by_type( + lungroup_id, constants.SNAPSHOT_TYPE) + return int(lun_count) + int(snapshot_count) + + def _get_obj_count_from_lungroup_by_type(self, lungroup_id, + lun_type=constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' lunnum = 0 if not lungroup_id: return lunnum - url = ("/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256&" - "ASSOCIATEOBJID=%s" % lungroup_id) + url = ("/%s/count?TYPE=%s&ASSOCIATEOBJTYPE=256&" + "ASSOCIATEOBJID=%s" % (cmd_type, lun_type, lungroup_id)) result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Find lun number error.')) + self._assert_rest_result(result, _('Find obj number error.')) if 'data' in result: lunnum = int(result['data']['COUNT']) return lunnum @@ -1004,6 +1031,22 @@ class RestClient(object): return pool_capacity + def _get_disk_type(self, pool_name, result): + """Get disk type of the pool.""" + pool_info = self.get_pool_info(pool_name, result) + if not pool_info: + return None + + pool_disk = [] + for i, x in enumerate(['ssd', 'sas', 'nl_sas']): + if pool_info['TIER%dCAPACITY' % i] != '0': + pool_disk.append(x) + + if len(pool_disk) > 1: + pool_disk = ['mix'] + + return pool_disk[0] if pool_disk else None + def get_luncopy_info(self, luncopy_id): """Get LUNcopy information.""" url = "/LUNCOPY?range=[0-1023]" @@ -1141,6 +1184,7 @@ class RestClient(object): result = self.get_all_pools() for pool_name in self.storage_pools: capacity = self._get_capacity(pool_name, result) + disk_type = self._get_disk_type(pool_name, result) pool = {} pool.update(dict( location_info=self.device_id, @@ -1160,6 +1204,9 @@ class RestClient(object): hypermetro=True, consistencygroup_support=True, )) + if disk_type: + pool['disk_type'] = disk_type + data['pools'].append(pool) return data @@ -1457,10 +1504,10 @@ class RestClient(object): return result['data']['IOCLASSID'] - def get_lungroupids_by_lunid(self, lun_id): + def get_lungroupids_by_lunid(self, lun_id, lun_type=constants.LUN_TYPE): """Get lungroup ids by lun id.""" url = ("/lungroup/associate?TYPE=256" - "&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=%s" % lun_id) + "&ASSOCIATEOBJTYPE=%s&ASSOCIATEOBJID=%s" % (lun_type, lun_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lungroup id by lun id error.')) @@ -1472,8 +1519,9 @@ class RestClient(object): return lungroup_ids - def get_lun_info(self, lun_id): - url = "/lun/" + lun_id + def get_lun_info(self, lun_id, lun_type = constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/%s" % (cmd_type, lun_id)) result = self.call(url, None, "GET") msg = _('Get volume error.') diff --git a/cinder/volume/drivers/ibm/flashsystem_common.py b/cinder/volume/drivers/ibm/flashsystem_common.py index 9aa4d6ec2..41853f7cc 100644 --- a/cinder/volume/drivers/ibm/flashsystem_common.py +++ b/cinder/volume/drivers/ibm/flashsystem_common.py @@ -64,6 +64,7 @@ CONF.register_opts(flashsystem_opts) class FlashSystemDriver(san.SanDriver, + driver.ManageableVD, driver.TransferVD, driver.ExtendVD, driver.SnapshotVD, @@ -92,9 +93,11 @@ class FlashSystemDriver(san.SanDriver, 1.0.10 - Fix bug #1585085, add host name check in _find_host_exhaustive for iSCSI 1.0.11 - Update driver to use ABC metaclasses + 1.0.12 - Update driver to support Manage/Unmanage + existing volume """ - VERSION = "1.0.11" + VERSION = "1.0.12" def __init__(self, *args, **kwargs): super(FlashSystemDriver, self).__init__(*args, **kwargs) @@ -104,6 +107,9 @@ class FlashSystemDriver(san.SanDriver, self._context = None self._system_name = None self._system_id = None + self._check_lock_interval = 5 + self._vdisk_copy_in_progress = set() + self._vdisk_copy_lock = None def _ssh(self, ssh_cmd, check_exit_code=True): try: @@ -149,6 +155,9 @@ class FlashSystemDriver(san.SanDriver, map[idx].append(t_wwpn) return map + def _check_vdisk_params(self, params): + raise NotImplementedError() + def _connector_to_hostname_prefix(self, connector): """Translate connector info to storage system host name. @@ -279,6 +288,9 @@ class FlashSystemDriver(san.SanDriver, self._unset_vdisk_copy_in_progress( [src_vdisk_name, dest_vdisk_name]) + def _create_host(self, connector): + raise NotImplementedError() + def _create_vdisk(self, name, size, unit, opts): """Create a new vdisk.""" @@ -390,6 +402,9 @@ class FlashSystemDriver(san.SanDriver, return attributes + def _find_host_exhaustive(self, connector, hosts): + raise NotImplementedError() + def _get_hdr_dic(self, header, row, delim): """Return CLI row data as a dictionary indexed by names from header. @@ -530,18 +545,24 @@ class FlashSystemDriver(san.SanDriver, except KeyError: self._handle_keyerror('lsnode', header) - def _get_vdisk_attributes(self, vdisk_name): + def _get_vdisk_attributes(self, vdisk_ref): """Return vdisk attributes Exception is raised if the information from system can not be parsed/matched to a single vdisk. + + :param vdisk_ref: vdisk name or vdisk id """ ssh_cmd = [ - 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_name] + 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_ref] return self._execute_command_and_parse_attributes(ssh_cmd) + def _get_vdisk_map_properties( + self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): + raise NotImplementedError() + def _get_vdiskhost_mappings(self, vdisk_name): """Return the defined storage mappings for a vdisk.""" @@ -684,6 +705,27 @@ class FlashSystemDriver(san.SanDriver, 'out': six.text_type(out), 'err': six.text_type(err)}) + def _manage_input_check(self, existing_ref): + """Verify the input of manage function.""" + # Check that the reference is valid + if 'source-name' in existing_ref: + manage_source = existing_ref['source-name'] + vdisk = self._get_vdisk_attributes(manage_source) + elif 'source-id' in existing_ref: + manage_source = existing_ref['source-id'] + vdisk = self._get_vdisk_attributes(manage_source) + else: + reason = _('Reference must contain source-id or ' + 'source-name element.') + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=reason) + if vdisk is None: + reason = (_('No vdisk with the ID specified by ref %s.') + % manage_source) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=reason) + return vdisk + @utils.synchronized('flashsystem-map', external=True) def _map_vdisk_to_host(self, vdisk_name, connector): """Create a mapping between a vdisk to a host.""" @@ -785,6 +827,26 @@ class FlashSystemDriver(san.SanDriver, LOG.debug('leave: _remove_device') + def _rename_vdisk(self, vdisk_name, new_name): + """Rename vdisk""" + # Try to rename volume only if found on the storage + vdisk_defined = self._is_vdisk_defined(vdisk_name) + if not vdisk_defined: + LOG.warning(_LW('warning: Tried to rename vdisk %s but ' + 'it does not exist.'), vdisk_name) + return + ssh_cmd = [ + 'svctask', 'chvdisk', '-name', new_name, vdisk_name] + out, err = self._ssh(ssh_cmd) + # No output should be returned from chvdisk + self._assert_ssh_return( + (not out.strip()), + '_rename_vdisk %(name)s' % {'name': vdisk_name}, + ssh_cmd, out, err) + + LOG.info(_LI('Renamed %(vdisk)s to %(newname)s .'), + {'vdisk': vdisk_name, 'newname': new_name}) + def _scan_device(self, properties): LOG.debug('enter: _scan_device') @@ -1094,3 +1156,32 @@ class FlashSystemDriver(san.SanDriver, self._update_volume_stats() return self._stats + + def manage_existing(self, volume, existing_ref): + """Manages an existing vdisk. + + Renames the vdisk to match the expected name for the volume. + """ + LOG.debug('enter: manage_existing: volume %(vol)s ref %(ref)s.', + {'vol': volume, 'ref': existing_ref}) + vdisk = self._manage_input_check(existing_ref) + new_name = 'volume-' + volume['id'] + self._rename_vdisk(vdisk['name'], new_name) + LOG.debug('leave: manage_existing: volume %(vol)s ref %(ref)s.', + {'vol': volume, 'ref': existing_ref}) + return + + def manage_existing_get_size(self, volume, existing_ref): + """Return size of volume to be managed by manage_existing.""" + vdisk = self._manage_input_check(existing_ref) + if self._get_vdiskhost_mappings(vdisk['name']): + reason = _('The specified vdisk is mapped to a host.') + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=reason) + return int(vdisk['capacity']) / units.Gi + + def unmanage(self, volume): + """Removes the specified volume from Cinder management.""" + LOG.debug('unmanage: volume %(vol)s is no longer managed by cinder.', + {'vol': volume}) + pass diff --git a/cinder/volume/drivers/ibm/flashsystem_fc.py b/cinder/volume/drivers/ibm/flashsystem_fc.py index 461d275dd..c0038035a 100644 --- a/cinder/volume/drivers/ibm/flashsystem_fc.py +++ b/cinder/volume/drivers/ibm/flashsystem_fc.py @@ -79,9 +79,14 @@ class FlashSystemFCDriver(fscommon.FlashSystemDriver): 1.0.10 - Fix bug #1585085, add host name check in _find_host_exhaustive for iSCSI 1.0.11 - Update driver to use ABC metaclasses + 1.0.12 - Update driver to support Manage/Unmanage + existing volume """ - VERSION = "1.0.11" + VERSION = "1.0.12" + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "IBM_FlashSystem_CI" def __init__(self, *args, **kwargs): super(FlashSystemFCDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/ibm/flashsystem_iscsi.py b/cinder/volume/drivers/ibm/flashsystem_iscsi.py index a59e0f22f..dc4dd2b97 100644 --- a/cinder/volume/drivers/ibm/flashsystem_iscsi.py +++ b/cinder/volume/drivers/ibm/flashsystem_iscsi.py @@ -77,9 +77,14 @@ class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): 1.0.10 - Fix bug #1585085, add host name check in _find_host_exhaustive for iSCSI 1.0.11 - Update driver to use ABC metaclasses + 1.0.12 - Update driver to support Manage/Unmanage + existing volume """ - VERSION = "1.0.11" + VERSION = "1.0.12" + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "IBM_FlashSystem_CI" def __init__(self, *args, **kwargs): super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/ibm/gpfs.py b/cinder/volume/drivers/ibm/gpfs.py index 55f1fe7f0..2b7942b1a 100644 --- a/cinder/volume/drivers/ibm/gpfs.py +++ b/cinder/volume/drivers/ibm/gpfs.py @@ -124,6 +124,9 @@ class GPFSDriver(driver.ConsistencyGroupVD, driver.ExtendVD, VERSION = "1.3.1" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "IBM_GPFS_CI" + def __init__(self, *args, **kwargs): super(GPFSDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(gpfs_opts) diff --git a/cinder/volume/drivers/ibm/xiv_ds8k.py b/cinder/volume/drivers/ibm/ibm_storage.py similarity index 64% rename from cinder/volume/drivers/ibm/xiv_ds8k.py rename to cinder/volume/drivers/ibm/ibm_storage.py index de34e4657..daad160bc 100644 --- a/cinder/volume/drivers/ibm/xiv_ds8k.py +++ b/cinder/volume/drivers/ibm/ibm_storage.py @@ -19,7 +19,8 @@ # Avishay Traeger """ -Unified Volume driver for IBM XIV and DS8K Storage Systems. +IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum Accelerate, +FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. """ from oslo_config import cfg @@ -27,22 +28,21 @@ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception -from cinder import interface from cinder.volume import driver from cinder.volume.drivers.san import san -xiv_ds8k_opts = [ +driver_opts = [ cfg.StrOpt( - 'xiv_ds8k_proxy', - default='xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy', + 'proxy', + default='storage.proxy.IBMStorageProxy', help='Proxy driver that connects to the IBM Storage Array'), cfg.StrOpt( - 'xiv_ds8k_connection_type', + 'connection_type', default='iscsi', choices=['fibre_channel', 'iscsi'], help='Connection type to the IBM Storage Array'), cfg.StrOpt( - 'xiv_chap', + 'chap', default='disabled', choices=['disabled', 'enabled'], help='CHAP authentication mode, effective only for iscsi' @@ -54,47 +54,51 @@ xiv_ds8k_opts = [ ] CONF = cfg.CONF -CONF.register_opts(xiv_ds8k_opts) +CONF.register_opts(driver_opts) LOG = logging.getLogger(__name__) -@interface.volumedriver -class XIVDS8KDriver(san.SanDriver, - driver.ManageableVD, - driver.ExtendVD, - driver.SnapshotVD, - driver.MigrateVD, - driver.ConsistencyGroupVD, - driver.CloneableImageVD, - driver.TransferVD): - """Unified IBM XIV and DS8K volume driver.""" +class IBMStorageDriver(san.SanDriver, + driver.ManageableVD, + driver.ExtendVD, + driver.SnapshotVD, + driver.MigrateVD, + driver.ConsistencyGroupVD, + driver.CloneableImageVD, + driver.TransferVD): + """IBM Storage driver + + IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum + Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage + systems. + """ + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "IBM_XIV-DS8K_CI" def __init__(self, *args, **kwargs): """Initialize the driver.""" - super(XIVDS8KDriver, self).__init__(*args, **kwargs) + super(IBMStorageDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(xiv_ds8k_opts) + self.configuration.append_config_values(driver_opts) - proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy) + proxy = importutils.import_class(self.configuration.proxy) active_backend_id = kwargs.get('active_backend_id', None) - # NOTE: All Array specific configurations are prefixed with: - # "xiv_ds8k_array_" - # These additional flags should be specified in the cinder.conf + # Driver additional flags should be specified in the cinder.conf # preferably in each backend configuration. - self.xiv_ds8k_proxy = proxy( + self.proxy = proxy( { - "xiv_ds8k_user": self.configuration.san_login, - "xiv_ds8k_pass": self.configuration.san_password, - "xiv_ds8k_address": self.configuration.san_ip, - "xiv_ds8k_vol_pool": self.configuration.san_clustername, - "xiv_ds8k_connection_type": - self.configuration.xiv_ds8k_connection_type, - "xiv_chap": self.configuration.xiv_chap, + "user": self.configuration.san_login, + "password": self.configuration.san_password, + "address": self.configuration.san_ip, + "vol_pool": self.configuration.san_clustername, + "connection_type": self.configuration.connection_type, + "chap": self.configuration.chap, "management_ips": self.configuration.management_ips }, LOG, @@ -103,81 +107,81 @@ class XIVDS8KDriver(san.SanDriver, active_backend_id=active_backend_id) def do_setup(self, context): - """Setup and verify IBM XIV and DS8K Storage connection.""" + """Setup and verify connection to IBM Storage.""" - self.xiv_ds8k_proxy.setup(context) + self.proxy.setup(context) def ensure_export(self, context, volume): """Ensure an export.""" - return self.xiv_ds8k_proxy.ensure_export(context, volume) + return self.proxy.ensure_export(context, volume) def create_export(self, context, volume, connector): """Create an export.""" - return self.xiv_ds8k_proxy.create_export(context, volume) + return self.proxy.create_export(context, volume) def create_volume(self, volume): - """Create a volume on the IBM XIV and DS8K Storage system.""" + """Create a volume on the IBM Storage system.""" - return self.xiv_ds8k_proxy.create_volume(volume) + return self.proxy.create_volume(volume) def delete_volume(self, volume): - """Delete a volume on the IBM XIV and DS8K Storage system.""" + """Delete a volume on the IBM Storage system.""" - self.xiv_ds8k_proxy.delete_volume(volume) + self.proxy.delete_volume(volume) def remove_export(self, context, volume): """Disconnect a volume from an attached instance.""" - return self.xiv_ds8k_proxy.remove_export(context, volume) + return self.proxy.remove_export(context, volume) def initialize_connection(self, volume, connector): """Map the created volume.""" - return self.xiv_ds8k_proxy.initialize_connection(volume, connector) + return self.proxy.initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): """Terminate a connection to a volume.""" - return self.xiv_ds8k_proxy.terminate_connection(volume, connector) + return self.proxy.terminate_connection(volume, connector) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" - return self.xiv_ds8k_proxy.create_volume_from_snapshot( + return self.proxy.create_volume_from_snapshot( volume, snapshot) def create_snapshot(self, snapshot): """Create a snapshot.""" - return self.xiv_ds8k_proxy.create_snapshot(snapshot) + return self.proxy.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Delete a snapshot.""" - return self.xiv_ds8k_proxy.delete_snapshot(snapshot) + return self.proxy.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): """Get volume stats.""" - return self.xiv_ds8k_proxy.get_volume_stats(refresh) + return self.proxy.get_volume_stats(refresh) def create_cloned_volume(self, tgt_volume, src_volume): """Create Cloned Volume.""" - return self.xiv_ds8k_proxy.create_cloned_volume(tgt_volume, src_volume) + return self.proxy.create_cloned_volume(tgt_volume, src_volume) def extend_volume(self, volume, new_size): """Extend Created Volume.""" - self.xiv_ds8k_proxy.extend_volume(volume, new_size) + self.proxy.extend_volume(volume, new_size) def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host.""" - return self.xiv_ds8k_proxy.migrate_volume(context, volume, host) + return self.proxy.migrate_volume(context, volume, host) def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. @@ -187,8 +191,9 @@ class XIVDS8KDriver(san.SanDriver, be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. - In the case of XIV, the existing_ref consists of a single field named - 'existing_ref' representing the name of the volume on the storage. + In the case of XIV family and FlashSystem A9000 family, the + existing_ref consists of a single field named 'existing_ref' + representing the name of the volume on the storage. There are two ways to do this: @@ -209,72 +214,72 @@ class XIVDS8KDriver(san.SanDriver, object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. """ - return self.xiv_ds8k_proxy.manage_volume(volume, existing_ref) + return self.proxy.manage_volume(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" - return self.xiv_ds8k_proxy.manage_volume_get_size(volume, existing_ref) + return self.proxy.manage_volume_get_size(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" - return self.xiv_ds8k_proxy.unmanage_volume(volume) + return self.proxy.unmanage_volume(volume) def freeze_backend(self, context): """Notify the backend that it's frozen. """ - return self.xiv_ds8k_proxy.freeze_backend(context) + return self.proxy.freeze_backend(context) def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. """ - return self.xiv_ds8k_proxy.thaw_backend(context) + return self.proxy.thaw_backend(context) def failover_host(self, context, volumes, secondary_id=None): """Failover a backend to a secondary replication target. """ - return self.xiv_ds8k_proxy.failover_host( + return self.proxy.failover_host( context, volumes, secondary_id) def get_replication_status(self, context, volume): """Return replication status.""" - return self.xiv_ds8k_proxy.get_replication_status(context, volume) + return self.proxy.get_replication_status(context, volume) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" - return self.xiv_ds8k_proxy.retype(ctxt, volume, new_type, diff, host) + return self.proxy.retype(ctxt, volume, new_type, diff, host) def create_consistencygroup(self, context, group): """Creates a consistency group.""" - return self.xiv_ds8k_proxy.create_consistencygroup(context, group) + return self.proxy.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" - return self.xiv_ds8k_proxy.delete_consistencygroup( + return self.proxy.delete_consistencygroup( context, group, volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a consistency group snapshot.""" - return self.xiv_ds8k_proxy.create_cgsnapshot( + return self.proxy.create_cgsnapshot( context, cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a consistency group snapshot.""" - return self.xiv_ds8k_proxy.delete_cgsnapshot( + return self.proxy.delete_cgsnapshot( context, cgsnapshot, snapshots) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Adds or removes volume(s) to/from an existing consistency group.""" - return self.xiv_ds8k_proxy.update_consistencygroup( + return self.proxy.update_consistencygroup( context, group, add_volumes, remove_volumes) def create_consistencygroup_from_src( @@ -282,6 +287,6 @@ class XIVDS8KDriver(san.SanDriver, source_cg=None, source_vols=None): """Creates a consistencygroup from source.""" - return self.xiv_ds8k_proxy.create_consistencygroup_from_src( + return self.proxy.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index d2791cd35..6a51667f1 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -255,7 +255,7 @@ class StorwizeSSH(object): If vdisk already mapped and multihostmap is True, use the force flag. """ ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host, - '-scsi', lun, vdisk] + '-scsi', lun, '"%s"' % vdisk] if multihostmap: ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force') try: @@ -335,11 +335,12 @@ class StorwizeSSH(object): return self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskhostmap(self, host, vdisk): - ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, vdisk] + ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, + '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskhostmap(self, vdisk): - ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk] + ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lshostvdiskmap(self, host): @@ -360,17 +361,18 @@ class StorwizeSSH(object): ssh_cmd = ['svctask', 'rmvdisk'] if force: ssh_cmd += ['-force'] - ssh_cmd += [vdisk] + ssh_cmd += ['"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdisk(self, vdisk): """Return vdisk attributes or None if it doesn't exist.""" - ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk] + ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', + '"%s"' % vdisk] out, err = self._ssh(ssh_cmd, check_exit_code=False) - if not len(err): + if not err: return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False)[0] - if err.startswith('CMMVC5754E'): + if 'CMMVC5754E' in err: return None msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % @@ -390,22 +392,22 @@ class StorwizeSSH(object): return self.run_ssh_info(ssh_cmd, with_header=True) def chvdisk(self, vdisk, params): - ssh_cmd = ['svctask', 'chvdisk'] + params + [vdisk] + ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def movevdisk(self, vdisk, iogrp): - ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, vdisk] + ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def expandvdisksize(self, vdisk, amount): ssh_cmd = ( ['svctask', 'expandvdisksize', '-size', six.text_type(amount), - '-unit', 'gb', vdisk]) + '-unit', 'gb', '"%s"' % vdisk]) self.run_ssh_assert_no_output(ssh_cmd) def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None): - ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target', - target, '-autodelete'] + ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target', + '"%s"' % target, '-autodelete'] if not full_copy: ssh_cmd.extend(['-copyrate', '0']) else: @@ -469,7 +471,8 @@ class StorwizeSSH(object): self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskfcmappings(self, vdisk): - ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', vdisk] + ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', + '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfcmap(self, fc_map_id): @@ -493,7 +496,7 @@ class StorwizeSSH(object): def addvdiskcopy(self, vdisk, dest_pool, params): ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp', - '"%s"' % dest_pool, vdisk]) + '"%s"' % dest_pool, '"%s"' % vdisk]) return self.run_ssh_check_created(ssh_cmd) def lsvdiskcopy(self, vdisk, copy_id=None): @@ -502,24 +505,25 @@ class StorwizeSSH(object): if copy_id: ssh_cmd += ['-copy', copy_id] with_header = False - ssh_cmd += [vdisk] + ssh_cmd += ['"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=with_header) def lsvdisksyncprogress(self, vdisk, copy_id): ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!', - '-copy', copy_id, vdisk] + '-copy', copy_id, '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True)[0] def rmvdiskcopy(self, vdisk, copy_id): - ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk] + ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def addvdiskaccess(self, vdisk, iogrp): - ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, vdisk] + ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, + '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskaccess(self, vdisk, iogrp): - ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, vdisk] + ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsportfc(self, node_id): @@ -535,6 +539,7 @@ class StorwizeHelpers(object): # 'default': to indicate the value, when the parameter is disabled. # 'param': to indicate the corresponding parameter in the command. # 'type': to indicate the type of this value. + WAIT_TIME = 5 svc_qos_keys = {'IOThrottling': {'default': '0', 'param': 'rate', 'type': int}} @@ -716,9 +721,11 @@ class StorwizeHelpers(object): wwpn_info['remote_wwpn'].lower() == wwpn.lower()): host_name = wwpn_info['name'] + break except KeyError: self.handle_keyerror('lsfabric', wwpn_info) - + if host_name: + break if host_name: LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name @@ -745,8 +752,10 @@ class StorwizeHelpers(object): for name in host_list: try: resp = self.ssh.lshost(host=name) - except processutils.ProcessExecutionError as ex: - if 'CMMVC5754E' in ex.stderr: + except exception.VolumeBackendAPIException as ex: + LOG.debug("Exception message: %s" % ex.msg) + if 'CMMVC5754E' in ex.msg: + LOG.debug("CMMVC5754E found in CLI exception.") # CMMVC5754E: The specified object does not exist # The host has been deleted while walking the list. # This is a result of a host change on the SVC that @@ -1126,6 +1135,7 @@ class StorwizeHelpers(object): return params def create_vdisk(self, name, size, units, pool, opts): + name = '"%s"' % name LOG.debug('Enter: create_vdisk: vdisk %s.', name) params = self._get_vdisk_create_params(opts) self.ssh.mkvdisk(name, size, units, pool, opts, params) @@ -1178,8 +1188,7 @@ class StorwizeHelpers(object): def _prepare_fc_map(self, fc_map_id, timeout): self.ssh.prestartfcmap(fc_map_id) mapping_ready = False - wait_time = 5 - max_retries = (timeout // wait_time) + 1 + max_retries = (timeout // self.WAIT_TIME) + 1 for try_number in range(1, max_retries): mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) if (mapping_attrs is None or @@ -1198,7 +1207,7 @@ class StorwizeHelpers(object): 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - greenthread.sleep(wait_time) + greenthread.sleep(self.WAIT_TIME) if not mapping_ready: msg = (_('Mapping %(id)s prepare failed to complete within the' @@ -1888,6 +1897,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, METRO = 'metro' VALID_REP_TYPES = (GLOBAL, METRO) FAILBACK_VALUE = 'default' + DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0 def __init__(self, *args, **kwargs): super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs) @@ -2131,7 +2141,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, except Exception as e: LOG.error(_LE('Error has occurred: %s'), e) last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) + greenthread.sleep(self.DEFAULT_GR_SLEEP) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, @@ -2246,9 +2256,14 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._helpers.delete_vdisk(snapshot['name'], False) def create_volume_from_snapshot(self, volume, snapshot): - if volume['size'] != snapshot['volume_size']: - msg = (_('create_volume_from_snapshot: Source and destination ' - 'size differ.')) + if snapshot['volume_size'] > volume['size']: + msg = (_("create_volume_from_snapshot: snapshot %(snapshot_name)s " + "size is %(snapshot_size)dGB and doesn't fit in target " + "volume %(volume_name)s of size %(volume_size)dGB.") % + {'snapshot_name': snapshot['name'], + 'snapshot_size': snapshot['volume_size'], + 'volume_name': volume['name'], + 'volume_size': volume['size']}) LOG.error(msg) raise exception.InvalidInput(message=msg) @@ -2259,6 +2274,17 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._helpers.create_copy(snapshot['name'], volume['name'], snapshot['id'], self.configuration, opts, True, pool=pool) + # The volume size is equal to the snapshot size in most + # of the cases. But in some scenario, the volume size + # may be bigger than the source volume size. + # SVC does not support flashcopy between two volumes + # with two different size. So use the snapshot size to + # create volume first and then extend the volume to- + # the target size. + if volume['size'] > snapshot['volume_size']: + # extend the new created target volume to expected size. + self._extend_volume_op(volume, volume['size'], + snapshot['volume_size']) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos']) diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py index f13d831f1..95e6befa0 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py @@ -92,6 +92,9 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): VERSION = "2.1.1" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "IBM_STORWIZE_CI" + def __init__(self, *args, **kwargs): super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs) self.protocol = 'FC' diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py index 5cc7c0691..819f07159 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py @@ -92,6 +92,9 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): VERSION = "2.1.1" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "IBM_STORWIZE_CI" + def __init__(self, *args, **kwargs): super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs) self.protocol = 'iSCSI' diff --git a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py index 6fda5dfca..afa48fcaa 100644 --- a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py +++ b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py @@ -37,6 +37,10 @@ class InfortrendCLIFCDriver(driver.FibreChannelDriver): 1.0.1 - Support DS4000 """ + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Infortrend_Storage_CI" + VERSION = common_cli.InfortrendCommon.VERSION + def __init__(self, *args, **kwargs): super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs) self.common = common_cli.InfortrendCommon( diff --git a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py index 461776cef..3d3a74fc6 100644 --- a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py +++ b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py @@ -35,6 +35,10 @@ class InfortrendCLIISCSIDriver(driver.ISCSIDriver): 1.0.1 - Support DS4000 """ + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Infortrend_Storage_CI" + VERSION = common_cli.InfortrendCommon.VERSION + def __init__(self, *args, **kwargs): super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs) self.common = common_cli.InfortrendCommon( diff --git a/cinder/volume/drivers/kaminario/kaminario_common.py b/cinder/volume/drivers/kaminario/kaminario_common.py index 5945c8a3a..df951b853 100644 --- a/cinder/volume/drivers/kaminario/kaminario_common.py +++ b/cinder/volume/drivers/kaminario/kaminario_common.py @@ -14,23 +14,35 @@ # under the License. """Volume driver for Kaminario K2 all-flash arrays.""" +import math import re -import six +import threading +import time +import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units from oslo_utils import versionutils +import requests +import six import cinder from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _, _LE, _LW, _LI +from cinder import objects +from cinder.objects import fields from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import utils as vol_utils +krest = importutils.try_import("krest") + K2_MIN_VERSION = '2.2.0' +K2_LOCK_PREFIX = 'Kaminario' +MAX_K2_RETRY = 5 +K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) kaminario1_opts = [ @@ -38,7 +50,11 @@ kaminario1_opts = [ default='K2-nodedup', help="If volume-type name contains this substring " "nodedup volume will be created, otherwise " - "dedup volume wil be created.")] + "dedup volume wil be created.", + deprecated_for_removal=True, + deprecated_reason="This option is deprecated in favour of " + "'kaminario:thin_prov_type' in extra-specs " + "and will be removed in the next release.")] kaminario2_opts = [ cfg.BoolOpt('auto_calc_max_oversubscription_ratio', default=False, @@ -48,6 +64,43 @@ kaminario2_opts = [ CONF = cfg.CONF CONF.register_opts(kaminario1_opts) +K2HTTPError = requests.exceptions.HTTPError +K2_RETRY_ERRORS = ("MC_ERR_BUSY", "MC_ERR_BUSY_SPECIFIC", + "MC_ERR_INPROGRESS", "MC_ERR_START_TIMEOUT") + +if krest: + class KrestWrap(krest.EndPoint): + def __init__(self, *args, **kwargs): + self.krestlock = threading.Lock() + super(KrestWrap, self).__init__(*args, **kwargs) + + def _should_retry(self, err_code, err_msg): + if err_code == 400: + for er in K2_RETRY_ERRORS: + if er in err_msg: + LOG.debug("Retry ERROR: %d with status %s", + err_code, err_msg) + return True + return False + + @utils.retry(exception.KaminarioRetryableException, + retries=MAX_K2_RETRY) + def _request(self, method, *args, **kwargs): + try: + LOG.debug("running through the _request wrapper...") + self.krestlock.acquire() + return super(KrestWrap, self)._request(method, + *args, **kwargs) + except K2HTTPError as err: + err_code = err.response.status_code + err_msg = err.response.text + if self._should_retry(err_code, err_msg): + raise exception.KaminarioRetryableException( + reason=six.text_type(err_msg)) + raise + finally: + self.krestlock.release() + def kaminario_logger(func): """Return a function wrapper. @@ -71,28 +124,43 @@ def kaminario_logger(func): return func_wrapper +class Replication(object): + def __init__(self, config, *args, **kwargs): + self.backend_id = config.get('backend_id') + self.login = config.get('login') + self.password = config.get('password') + self.rpo = config.get('rpo') + + class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): VENDOR = "Kaminario" - VERSION = "1.0" stats = {} def __init__(self, *args, **kwargs): super(KaminarioCinderDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(kaminario2_opts) + self.replica = None self._protocol = None + k2_lock_sfx = self.configuration.safe_get('volume_backend_name') or '' + self.k2_lock_name = "%s-%s" % (K2_LOCK_PREFIX, k2_lock_sfx) def check_for_setup_error(self): - if self.krest is None: + if krest is None: msg = _("Unable to import 'krest' python module.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) else: conf = self.configuration - self.client = self.krest.EndPoint(conf.san_ip, - conf.san_login, - conf.san_password, - ssl_validate=False) + self.client = KrestWrap(conf.san_ip, + conf.san_login, + conf.san_password, + ssl_validate=False) + if self.replica: + self.target = KrestWrap(self.replica.backend_id, + self.replica.login, + self.replica.password, + ssl_validate=False) v_rs = self.client.search("system/state") if hasattr(v_rs, 'hits') and v_rs.total != 0: ver = v_rs.hits[0].rest_api_version @@ -117,11 +185,19 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): if not getattr(self.configuration, attr, None): raise exception.InvalidInput(reason=_('%s is not set.') % attr) + replica = self.configuration.safe_get('replication_device') + if replica and isinstance(replica, list): + replica_ops = ['backend_id', 'login', 'password', 'rpo'] + for attr in replica_ops: + if attr not in replica[0]: + msg = _('replication_device %s is not set.') % attr + raise exception.InvalidInput(reason=msg) + self.replica = Replication(replica[0]) + @kaminario_logger def do_setup(self, context): super(KaminarioCinderDriver, self).do_setup(context) self._check_ops() - self.krest = importutils.try_import("krest") @kaminario_logger def create_volume(self, volume): @@ -132,10 +208,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): """ vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) - if CONF.kaminario_nodedup_substring in volume.volume_type.name: - prov_type = False - else: - prov_type = True + prov_type = self._get_is_dedup(volume.get('volume_type')) try: LOG.debug("Creating volume group with name: %(name)s, " "quota: unlimited and dedup_support: %(dedup)s", @@ -146,9 +219,9 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("Creating volume with name: %(name)s, size: %(size)s " "GB, volume_group: %(vg)s", {'name': vol_name, 'size': volume.size, 'vg': vg_name}) - self.client.new("volumes", name=vol_name, - size=volume.size * units.Mi, - volume_group=vg).save() + vol = self.client.new("volumes", name=vol_name, + size=volume.size * units.Mi, + volume_group=vg).save() except Exception as ex: vg_rs = self.client.search("volume_groups", name=vg_name) if vg_rs.total != 0: @@ -158,6 +231,325 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) + if self._get_is_replica(volume.volume_type) and self.replica: + self._create_volume_replica(volume, vg, vol, self.replica.rpo) + + @kaminario_logger + def _create_volume_replica(self, volume, vg, vol, rpo): + """Volume replica creation in K2 needs session and remote volume. + + - create a session + - create a volume in the volume group + + """ + session_name = self.get_session_name(volume.id) + rsession_name = self.get_rep_name(session_name) + + rvg_name = self.get_rep_name(vg.name) + rvol_name = self.get_rep_name(vol.name) + + k2peer_rs = self.client.search("replication/peer_k2arrays", + mgmt_host=self.replica.backend_id) + if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: + k2peer = k2peer_rs.hits[0] + else: + msg = _("Unable to find K2peer in source K2:") + LOG.error(msg) + raise exception.KaminarioCinderDriverException(reason=msg) + try: + LOG.debug("Creating source session with name: %(sname)s and " + " target session name: %(tname)s", + {'sname': session_name, 'tname': rsession_name}) + src_ssn = self.client.new("replication/sessions") + src_ssn.replication_peer_k2array = k2peer + src_ssn.auto_configure_peer_volumes = "False" + src_ssn.local_volume_group = vg + src_ssn.replication_peer_volume_group_name = rvg_name + src_ssn.remote_replication_session_name = rsession_name + src_ssn.name = session_name + src_ssn.rpo = rpo + src_ssn.save() + LOG.debug("Creating remote volume with name: %s", + rvol_name) + self.client.new("replication/peer_volumes", + local_volume=vol, + name=rvol_name, + replication_session=src_ssn).save() + src_ssn.state = "in_sync" + src_ssn.save() + except Exception as ex: + LOG.exception(_LE("Replication for the volume %s has " + "failed."), vol.name) + self._delete_by_ref(self.client, "replication/sessions", + session_name, 'session') + self._delete_by_ref(self.target, "replication/sessions", + rsession_name, 'remote session') + self._delete_by_ref(self.target, "volumes", + rvol_name, 'remote volume') + self._delete_by_ref(self.client, "volumes", vol.name, "volume") + self._delete_by_ref(self.target, "volume_groups", + rvg_name, "remote vg") + self._delete_by_ref(self.client, "volume_groups", vg.name, "vg") + raise exception.KaminarioCinderDriverException( + reason=six.text_type(ex.message)) + + @kaminario_logger + def _create_failover_volume_replica(self, volume, vg_name, vol_name): + """Volume replica creation in K2 needs session and remote volume. + + - create a session + - create a volume in the volume group + + """ + session_name = self.get_session_name(volume.id) + rsession_name = self.get_rep_name(session_name) + + rvg_name = self.get_rep_name(vg_name) + rvol_name = self.get_rep_name(vol_name) + rvg = self.target.search("volume_groups", name=rvg_name).hits[0] + rvol = self.target.search("volumes", name=rvol_name).hits[0] + k2peer_rs = self.target.search("replication/peer_k2arrays", + mgmt_host=self.configuration.san_ip) + if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: + k2peer = k2peer_rs.hits[0] + else: + msg = _("Unable to find K2peer in source K2:") + LOG.error(msg) + raise exception.KaminarioCinderDriverException(reason=msg) + try: + LOG.debug("Creating source session with name: %(sname)s and " + " target session name: %(tname)s", + {'sname': rsession_name, 'tname': session_name}) + tgt_ssn = self.target.new("replication/sessions") + tgt_ssn.replication_peer_k2array = k2peer + tgt_ssn.auto_configure_peer_volumes = "False" + tgt_ssn.local_volume_group = rvg + tgt_ssn.replication_peer_volume_group_name = vg_name + tgt_ssn.remote_replication_session_name = session_name + tgt_ssn.name = rsession_name + tgt_ssn.rpo = self.replica.rpo + tgt_ssn.save() + LOG.debug("Creating remote volume with name: %s", + rvol_name) + self.target.new("replication/peer_volumes", + local_volume=rvol, + name=vol_name, + replication_session=tgt_ssn).save() + tgt_ssn.state = "in_sync" + tgt_ssn.save() + except Exception as ex: + LOG.exception(_LE("Replication for the volume %s has " + "failed."), rvol_name) + self._delete_by_ref(self.target, "replication/sessions", + rsession_name, 'session') + self._delete_by_ref(self.client, "replication/sessions", + session_name, 'remote session') + self._delete_by_ref(self.client, "volumes", vol_name, "volume") + self._delete_by_ref(self.client, "volume_groups", vg_name, "vg") + raise exception.KaminarioCinderDriverException( + reason=six.text_type(ex.message)) + + def _delete_by_ref(self, device, url, name, msg): + rs = device.search(url, name=name) + for result in rs.hits: + result.delete() + LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name}) + + @kaminario_logger + def _failover_volume(self, volume): + """Promoting a secondary volume to primary volume.""" + session_name = self.get_session_name(volume.id) + rsession_name = self.get_rep_name(session_name) + tgt_ssn = self.target.search("replication/sessions", + name=rsession_name).hits[0] + if tgt_ssn.state == 'in_sync': + tgt_ssn.state = 'failed_over' + tgt_ssn.save() + LOG.debug("The target session: %s state is " + "changed to failed_over ", rsession_name) + + @kaminario_logger + def failover_host(self, context, volumes, secondary_id=None): + """Failover to replication target.""" + volume_updates = [] + back_end_ip = None + svc_host = vol_utils.extract_host(self.host, 'backend') + service = objects.Service.get_by_args(context, svc_host, + 'cinder-volume') + + if secondary_id and secondary_id != self.replica.backend_id: + LOG.error(_LE("Kaminario driver received failover_host " + "request, But backend is non replicated device")) + raise exception.UnableToFailOver(reason=_("Failover requested " + "on non replicated " + "backend.")) + + if (service.active_backend_id and + service.active_backend_id != self.configuration.san_ip): + self.snap_updates = [] + rep_volumes = [] + # update status for non-replicated primary volumes + for v in volumes: + vol_name = self.get_volume_name(v['id']) + vol = self.client.search("volumes", name=vol_name) + if v.replication_status != K2_REP_FAILED_OVER and vol.total: + status = 'available' + if v.volume_attachment: + map_rs = self.client.search("mappings", + volume=vol.hits[0]) + status = 'in-use' + if map_rs.total: + map_rs.hits[0].delete() + volume_updates.append({'volume_id': v['id'], + 'updates': + {'status': status}}) + else: + rep_volumes.append(v) + + # In-sync from secondaray array to primary array + for v in rep_volumes: + vol_name = self.get_volume_name(v['id']) + vol = self.client.search("volumes", name=vol_name) + rvol_name = self.get_rep_name(vol_name) + rvol = self.target.search("volumes", name=rvol_name) + session_name = self.get_session_name(v['id']) + rsession_name = self.get_rep_name(session_name) + ssn = self.target.search("replication/sessions", + name=rsession_name) + if ssn.total: + tgt_ssn = ssn.hits[0] + ssn = self.client.search("replication/sessions", + name=session_name) + if ssn.total: + src_ssn = ssn.hits[0] + + if (tgt_ssn.state == 'failed_over' and + tgt_ssn.current_role == 'target' and vol.total and src_ssn): + map_rs = self.client.search("mappings", volume=vol.hits[0]) + if map_rs.total: + map_rs.hits[0].delete() + tgt_ssn.state = 'in_sync' + tgt_ssn.save() + self._check_for_status(src_ssn, 'in_sync') + if (rvol.total and src_ssn.state == 'in_sync' and + src_ssn.current_role == 'target'): + gen_no = self._create_volume_replica_user_snap(self.target, + tgt_ssn) + self.snap_updates.append({'tgt_ssn': tgt_ssn, + 'gno': gen_no, + 'stime': time.time()}) + LOG.debug("The target session: %s state is " + "changed to in sync", rsession_name) + + self._is_user_snap_sync_finished() + + # Delete secondary volume mappings and create snapshot + for v in rep_volumes: + vol_name = self.get_volume_name(v['id']) + vol = self.client.search("volumes", name=vol_name) + rvol_name = self.get_rep_name(vol_name) + rvol = self.target.search("volumes", name=rvol_name) + session_name = self.get_session_name(v['id']) + rsession_name = self.get_rep_name(session_name) + ssn = self.target.search("replication/sessions", + name=rsession_name) + if ssn.total: + tgt_ssn = ssn.hits[0] + ssn = self.client.search("replication/sessions", + name=session_name) + if ssn.total: + src_ssn = ssn.hits[0] + if (rvol.total and src_ssn.state == 'in_sync' and + src_ssn.current_role == 'target'): + map_rs = self.target.search("mappings", + volume=rvol.hits[0]) + if map_rs.total: + map_rs.hits[0].delete() + gen_no = self._create_volume_replica_user_snap(self.target, + tgt_ssn) + self.snap_updates.append({'tgt_ssn': tgt_ssn, + 'gno': gen_no, + 'stime': time.time()}) + self._is_user_snap_sync_finished() + # changing source sessions to failed-over + for v in rep_volumes: + vol_name = self.get_volume_name(v['id']) + vol = self.client.search("volumes", name=vol_name) + rvol_name = self.get_rep_name(vol_name) + rvol = self.target.search("volumes", name=rvol_name) + session_name = self.get_session_name(v['id']) + rsession_name = self.get_rep_name(session_name) + ssn = self.target.search("replication/sessions", + name=rsession_name) + if ssn.total: + tgt_ssn = ssn.hits[0] + ssn = self.client.search("replication/sessions", + name=session_name) + if ssn.total: + src_ssn = ssn.hits[0] + if (rvol.total and src_ssn.state == 'in_sync' and + src_ssn.current_role == 'target'): + src_ssn.state = 'failed_over' + src_ssn.save() + self._check_for_status(tgt_ssn, 'suspended') + LOG.debug("The target session: %s state is " + "changed to failed over", session_name) + + src_ssn.state = 'in_sync' + src_ssn.save() + LOG.debug("The target session: %s state is " + "changed to in sync", session_name) + rep_status = fields.ReplicationStatus.DISABLED + volume_updates.append({'volume_id': v['id'], + 'updates': + {'replication_status': rep_status}}) + + back_end_ip = self.configuration.san_ip + else: + """Failover to replication target.""" + for v in volumes: + vol_name = self.get_volume_name(v['id']) + rv = self.get_rep_name(vol_name) + if self.target.search("volumes", name=rv).total: + self._failover_volume(v) + volume_updates.append( + {'volume_id': v['id'], + 'updates': + {'replication_status': K2_REP_FAILED_OVER}}) + else: + volume_updates.append({'volume_id': v['id'], + 'updates': {'status': 'error', }}) + back_end_ip = self.replica.backend_id + return back_end_ip, volume_updates + + def _create_volume_replica_user_snap(self, k2, sess): + snap = k2.new("snapshots") + snap.is_application_consistent = "False" + snap.replication_session = sess + snap.save() + return snap.generation_number + + def _is_user_snap_sync_finished(self): + # waiting for user snapshot to be synced + while len(self.snap_updates) > 0: + for l in self.snap_updates: + sess = l.get('tgt_ssn') + gno = l.get('gno') + stime = l.get('stime') + sess.refresh() + if (sess.generation_number == gno and + sess.current_snapshot_progress == 100 + and sess.current_snapshot_id is None): + if time.time() - stime > 300: + gen_no = self._create_volume_replica_user_snap( + self.target, + sess) + self.snap_updates.append({'tgt_ssn': sess, + 'gno': gen_no, + 'stime': time.time()}) + self.snap_updates.remove(l) + eventlet.sleep(1) + @kaminario_logger def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot. @@ -272,6 +664,9 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) try: + if self._get_is_replica(volume.volume_type) and self.replica: + self._delete_volume_replica(volume, vg_name, vol_name) + LOG.debug("Searching and deleting volume: %s in K2.", vol_name) vol_rs = self.client.search("volumes", name=vol_name) if vol_rs.total != 0: @@ -285,6 +680,66 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) + @kaminario_logger + def _delete_volume_replica(self, volume, vg_name, vol_name): + rvg_name = self.get_rep_name(vg_name) + rvol_name = self.get_rep_name(vol_name) + session_name = self.get_session_name(volume.id) + rsession_name = self.get_rep_name(session_name) + src_ssn = self.client.search('replication/sessions', + name=session_name).hits[0] + tgt_ssn = self.target.search('replication/sessions', + name=rsession_name).hits[0] + src_ssn.state = 'suspended' + src_ssn.save() + self._check_for_status(tgt_ssn, 'suspended') + src_ssn.state = 'idle' + src_ssn.save() + self._check_for_status(tgt_ssn, 'idle') + tgt_ssn.delete() + src_ssn.delete() + + LOG.debug("Searching and deleting snapshots for volume groups:" + "%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name}) + vg = self.client.search('volume_groups', name=vg_name).hits + rvg = self.target.search('volume_groups', name=rvg_name).hits + snaps = self.client.search('snapshots', volume_group=vg).hits + for s in snaps: + s.delete() + rsnaps = self.target.search('snapshots', volume_group=rvg).hits + for s in rsnaps: + s.delete() + + self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume') + self._delete_by_ref(self.target, "volume_groups", + rvg_name, "remote vg") + + @kaminario_logger + def _delete_failover_volume_replica(self, volume, vg_name, vol_name): + rvg_name = self.get_rep_name(vg_name) + rvol_name = self.get_rep_name(vol_name) + session_name = self.get_session_name(volume.id) + rsession_name = self.get_rep_name(session_name) + tgt_ssn = self.target.search('replication/sessions', + name=rsession_name).hits[0] + tgt_ssn.state = 'idle' + tgt_ssn.save() + tgt_ssn.delete() + + LOG.debug("Searching and deleting snapshots for target volume group " + "and target volume: %(vol)s, %(vg)s in K2.", + {'vol': rvol_name, 'vg': rvg_name}) + rvg = self.target.search('volume_groups', name=rvg_name).hits + rsnaps = self.target.search('snapshots', volume_group=rvg).hits + for s in rsnaps: + s.delete() + + @kaminario_logger + def _check_for_status(self, obj, status): + while obj.state != status: + obj.refresh() + eventlet.sleep(1) + @kaminario_logger def get_volume_stats(self, refresh=False): if refresh: @@ -319,7 +774,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("Creating a snapshot: %(snap)s from vg: %(vg)s", {'snap': snap_name, 'vg': vg_name}) self.client.new("snapshots", short_name=snap_name, - source=vg, retention_policy=rpolicy).save() + source=vg, retention_policy=rpolicy, + is_auto_deleteable=False).save() except Exception as ex: LOG.exception(_LE("Creation of snapshot: %s failed."), snap_name) raise exception.KaminarioCinderDriverException( @@ -375,7 +831,10 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): 'total_volumes': total_volumes, 'thick_provisioning_support': False, 'provisioned_capacity_gb': provisioned_vol / units.Mi, - 'max_oversubscription_ratio': ratio} + 'max_oversubscription_ratio': ratio, + 'kaminario:thin_prov_type': 'dedup/nodedup', + 'replication_enabled': True, + 'kaminario:replication': True} @kaminario_logger def get_initiator_host_name(self, connector): @@ -385,7 +844,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): All other characters are replaced with '_'. Total characters in initiator host name: 32 """ - return re.sub('[^0-9a-zA-Z-_]', '_', connector['host'])[:32] + return re.sub('[^0-9a-zA-Z-_]', '_', connector.get('host', ''))[:32] @kaminario_logger def get_volume_group_name(self, vid): @@ -397,6 +856,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): """Return the volume name.""" return "cv-{0}".format(vid) + @kaminario_logger + def get_session_name(self, vid): + """Return the volume name.""" + return "ssn-{0}".format(vid) + @kaminario_logger def get_snap_name(self, sid): """Return the snapshot name.""" @@ -407,6 +871,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): """Return the view name.""" return "cview-{0}".format(vid) + @kaminario_logger + def get_rep_name(self, name): + """Return the corresponding replication names.""" + return "r{0}".format(name) + @kaminario_logger def _delete_host_by_name(self, name): """Deleting host by name.""" @@ -430,6 +899,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): @kaminario_logger def _get_volume_object(self, volume): vol_name = self.get_volume_name(volume.id) + if volume.replication_status == K2_REP_FAILED_OVER: + vol_name = self.get_rep_name(vol_name) LOG.debug("Searching volume : %s in K2.", vol_name) vol_rs = self.client.search("volumes", name=vol_name) if not hasattr(vol_rs, 'hits') or vol_rs.total == 0: @@ -454,11 +925,13 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): pass @kaminario_logger - def terminate_connection(self, volume, connector, **kwargs): + def terminate_connection(self, volume, connector): """Terminate connection of volume from host.""" # Get volume object if type(volume).__name__ != 'RestObject': vol_name = self.get_volume_name(volume.id) + if volume.replication_status == K2_REP_FAILED_OVER: + vol_name = self.get_rep_name(vol_name) LOG.debug("Searching volume: %s in K2.", vol_name) volume_rs = self.client.search("volumes", name=vol_name) if hasattr(volume_rs, "hits") and volume_rs.total != 0: @@ -511,3 +984,158 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): def _get_host_object(self, connector): pass + + def _get_is_dedup(self, vol_type): + if vol_type: + specs_val = vol_type.get('extra_specs', {}).get( + 'kaminario:thin_prov_type') + if specs_val == 'nodedup': + return False + elif CONF.kaminario_nodedup_substring in vol_type.get('name'): + LOG.info(_LI("'kaminario_nodedup_substring' option is " + "deprecated in favour of 'kaminario:thin_prov_" + "type' in extra-specs and will be removed in " + "the 10.0.0 release.")) + return False + else: + return True + else: + return True + + def _get_is_replica(self, vol_type): + replica = False + if vol_type and vol_type.get('extra_specs'): + specs = vol_type.get('extra_specs') + if (specs.get('kaminario:replication') == 'enabled' and + self.replica): + replica = True + return replica + + def _get_replica_status(self, vg_name): + vg_rs = self.client.search("volume_groups", name=vg_name) + if vg_rs.total: + vg = vg_rs.hits[0] + if self.client.search("replication/sessions", + local_volume_group=vg).total: + return True + return False + + def manage_existing(self, volume, existing_ref): + vol_name = existing_ref['source-name'] + new_name = self.get_volume_name(volume.id) + vg_new_name = self.get_volume_group_name(volume.id) + vg_name = None + is_dedup = self._get_is_dedup(volume.get('volume_type')) + try: + LOG.debug("Searching volume: %s in K2.", vol_name) + vol = self.client.search("volumes", name=vol_name).hits[0] + vg = vol.volume_group + vg_replica = self._get_replica_status(vg.name) + vol_map = False + if self.client.search("mappings", volume=vol).total != 0: + vol_map = True + if is_dedup != vg.is_dedup or vg_replica or vol_map: + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, + reason=_('Manage volume type invalid.')) + vol.name = new_name + vg_name = vg.name + LOG.debug("Manage new volume name: %s", new_name) + vg.name = vg_new_name + LOG.debug("Manage volume group name: %s", vg_new_name) + vg.save() + LOG.debug("Manage volume: %s in K2.", vol_name) + vol.save() + except Exception as ex: + vg_rs = self.client.search("volume_groups", name=vg_new_name) + if hasattr(vg_rs, 'hits') and vg_rs.total != 0: + vg = vg_rs.hits[0] + if vg_name and vg.name == vg_new_name: + vg.name = vg_name + LOG.debug("Updating vg new name to old name: %s ", vg_name) + vg.save() + LOG.exception(_LE("manage volume: %s failed."), vol_name) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, + reason=six.text_type(ex.message)) + + def manage_existing_get_size(self, volume, existing_ref): + vol_name = existing_ref['source-name'] + v_rs = self.client.search("volumes", name=vol_name) + if hasattr(v_rs, 'hits') and v_rs.total != 0: + vol = v_rs.hits[0] + size = vol.size / units.Mi + return math.ceil(size) + else: + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, + reason=_('Unable to get size of manage volume.')) + + def after_volume_copy(self, ctxt, volume, new_volume, remote=None): + self.delete_volume(volume) + vg_name_old = self.get_volume_group_name(volume.id) + vol_name_old = self.get_volume_name(volume.id) + vg_name_new = self.get_volume_group_name(new_volume.id) + vol_name_new = self.get_volume_name(new_volume.id) + vg_new = self.client.search("volume_groups", name=vg_name_new).hits[0] + vg_new.name = vg_name_old + vg_new.save() + vol_new = self.client.search("volumes", name=vol_name_new).hits[0] + vol_new.name = vol_name_old + vol_new.save() + + def retype(self, ctxt, volume, new_type, diff, host): + old_type = volume.get('volume_type') + vg_name = self.get_volume_group_name(volume.id) + vol_name = self.get_volume_name(volume.id) + vol_rs = self.client.search("volumes", name=vol_name) + if vol_rs.total: + vol = vol_rs.hits[0] + vmap = self.client.search("mappings", volume=vol).total + old_rep_type = self._get_replica_status(vg_name) + new_rep_type = self._get_is_replica(new_type) + new_prov_type = self._get_is_dedup(new_type) + old_prov_type = self._get_is_dedup(old_type) + # Change dedup<->nodedup with add/remove replication is complex in K2 + # since K2 does not have api to change dedup<->nodedup. + if new_prov_type == old_prov_type: + if not old_rep_type and new_rep_type: + self._add_replication(volume) + return True + elif old_rep_type and not new_rep_type: + self._delete_replication(volume) + return True + elif not new_rep_type and not old_rep_type: + msg = ("Use '--migration-policy on-demand' to change 'dedup " + "without replication'<->'nodedup without replication'.") + if vol_rs.total and vmap: + msg = "Unattach volume and {0}".format(msg) + LOG.debug(msg) + return False + else: + LOG.error(_LE('Change from type1: %(type1)s to type2: %(type2)s ' + 'is not supported directly in K2.'), + {'type1': old_type, 'type2': new_type}) + return False + + def _add_replication(self, volume): + vg_name = self.get_volume_group_name(volume.id) + vol_name = self.get_volume_name(volume.id) + if volume.replication_status == K2_REP_FAILED_OVER: + self._create_failover_volume_replica(volume, vg_name, vol_name) + else: + LOG.debug("Searching volume group with name: %(name)s", + {'name': vg_name}) + vg = self.client.search("volume_groups", name=vg_name).hits[0] + LOG.debug("Searching volume with name: %(name)s", + {'name': vol_name}) + vol = self.client.search("volumes", name=vol_name).hits[0] + self._create_volume_replica(volume, vg, vol, self.replica.rpo) + + def _delete_replication(self, volume): + vg_name = self.get_volume_group_name(volume.id) + vol_name = self.get_volume_name(volume.id) + if volume.replication_status == K2_REP_FAILED_OVER: + self._delete_failover_volume_replica(volume, vg_name, vol_name) + else: + self._delete_volume_replica(volume, vg_name, vol_name) diff --git a/cinder/volume/drivers/kaminario/kaminario_fc.py b/cinder/volume/drivers/kaminario/kaminario_fc.py index e8df19e66..7b0389f0e 100644 --- a/cinder/volume/drivers/kaminario/kaminario_fc.py +++ b/cinder/volume/drivers/kaminario/kaminario_fc.py @@ -17,17 +17,33 @@ import six from oslo_log import log as logging +from cinder import coordination from cinder import exception from cinder.i18n import _, _LE +from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common from cinder.zonemanager import utils as fczm_utils +K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) kaminario_logger = common.kaminario_logger class KaminarioFCDriver(common.KaminarioCinderDriver): - """Kaminario K2 FC Volume Driver.""" + """Kaminario K2 FC Volume Driver. + + Version history: + 1.0 - Initial driver + 1.1 - Added manage/unmanage and extra-specs support for nodedup + 1.2 - Added replication support + 1.3 - Added retype support + 1.4 - Added replication failback support + """ + + VERSION = '1.4' + + # ThirdPartySystems wiki page name + CI_WIKI_NAME = "Kaminario_K2_CI" @kaminario_logger def __init__(self, *args, **kwargs): @@ -37,20 +53,30 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): @fczm_utils.AddFCZone @kaminario_logger + @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" # Check wwpns in host connector. - if not connector['wwpns']: + if not connector.get('wwpns'): msg = _("No wwpns found in host connector.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) + # To support replication failback + temp_client = None + if (hasattr(volume, 'replication_status') and + volume.replication_status == K2_REP_FAILED_OVER): + temp_client = self.client + self.client = self.target # Get target wwpns. - target_wwpns = self.get_target_info() + target_wwpns = self.get_target_info(volume) # Map volume. lun = self.k2_initialize_connection(volume, connector) # Create initiator-target mapping. target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) + # To support replication failback + if temp_client: + self.client = temp_client # Return target volume information. return {'driver_volume_type': 'fibre_channel', 'data': {"target_discovered": True, @@ -59,7 +85,15 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): "initiator_target_map": init_target_map}} @fczm_utils.RemoveFCZone + @kaminario_logger + @coordination.synchronized('{self.k2_lock_name}') def terminate_connection(self, volume, connector, **kwargs): + # To support replication failback + temp_client = None + if (hasattr(volume, 'replication_status') and + volume.replication_status == K2_REP_FAILED_OVER): + temp_client = self.client + self.client = self.target super(KaminarioFCDriver, self).terminate_connection(volume, connector) properties = {"driver_volume_type": "fibre_channel", "data": {}} host_name = self.get_initiator_host_name(connector) @@ -68,15 +102,18 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): # is not attached to any volume if host_rs.total == 0: # Get target wwpns. - target_wwpns = self.get_target_info() + target_wwpns = self.get_target_info(volume) target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) properties["data"] = {"target_wwn": target_wwpns, "initiator_target_map": init_target_map} + # To support replication failback + if temp_client: + self.client = temp_client return properties @kaminario_logger - def get_target_info(self): + def get_target_info(self, volume): LOG.debug("Searching target wwpns in K2.") fc_ports_rs = self.client.search("system/fc_ports") target_wwpns = [] @@ -140,7 +177,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): if self.lookup_service is not None: # use FC san lookup. dev_map = self.lookup_service.get_device_mapping_from_network( - connector['wwpns'], + connector.get('wwpns'), all_target_wwns) for fabric_name in dev_map: @@ -154,7 +191,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: - initiator_wwns = connector['wwpns'] + initiator_wwns = connector.get('wwpns', []) target_wwns = all_target_wwns for initiator in initiator_wwns: diff --git a/cinder/volume/drivers/kaminario/kaminario_iscsi.py b/cinder/volume/drivers/kaminario/kaminario_iscsi.py index 39003a329..549a9e986 100644 --- a/cinder/volume/drivers/kaminario/kaminario_iscsi.py +++ b/cinder/volume/drivers/kaminario/kaminario_iscsi.py @@ -17,19 +17,35 @@ import six from oslo_log import log as logging +from cinder import coordination from cinder import exception from cinder.i18n import _, _LE from cinder import interface +from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common ISCSI_TCP_PORT = "3260" +K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) kaminario_logger = common.kaminario_logger @interface.volumedriver class KaminarioISCSIDriver(common.KaminarioCinderDriver): - """Kaminario K2 iSCSI Volume Driver.""" + """Kaminario K2 iSCSI Volume Driver. + + Version history: + 1.0 - Initial driver + 1.1 - Added manage/unmanage and extra-specs support for nodedup + 1.2 - Added replication support + 1.3 - Added retype support + 1.4 - Added replication failback support + """ + + VERSION = '1.4' + + # ThirdPartySystems wiki page name + CI_WIKI_NAME = "Kaminario_K2_CI" @kaminario_logger def __init__(self, *args, **kwargs): @@ -37,12 +53,22 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver): self._protocol = 'iSCSI' @kaminario_logger + @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" + # To support replication failback + temp_client = None + if (hasattr(volume, 'replication_status') and + volume.replication_status == K2_REP_FAILED_OVER): + temp_client = self.client + self.client = self.target # Get target_portal and target iqn. - iscsi_portal, target_iqn = self.get_target_info() + iscsi_portal, target_iqn = self.get_target_info(volume) # Map volume. lun = self.k2_initialize_connection(volume, connector) + # To support replication failback + if temp_client: + self.client = temp_client # Return target volume information. return {"driver_volume_type": "iscsi", "data": {"target_iqn": target_iqn, @@ -51,7 +77,22 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver): "target_discovered": True}} @kaminario_logger - def get_target_info(self): + @coordination.synchronized('{self.k2_lock_name}') + def terminate_connection(self, volume, connector, **kwargs): + # To support replication failback + temp_client = None + if (hasattr(volume, 'replication_status') and + volume.replication_status == K2_REP_FAILED_OVER): + temp_client = self.client + self.client = self.target + super(KaminarioISCSIDriver, self).terminate_connection(volume, + connector) + # To support replication failback + if temp_client: + self.client = temp_client + + @kaminario_logger + def get_target_info(self, volume): LOG.debug("Searching first iscsi port ip without wan in K2.") iscsi_ip_rs = self.client.search("system/net_ips", wan_port="") iscsi_ip = target_iqn = None diff --git a/cinder/volume/drivers/lenovo/lenovo_fc.py b/cinder/volume/drivers/lenovo/lenovo_fc.py index ba24af36c..7e8bc42de 100644 --- a/cinder/volume/drivers/lenovo/lenovo_fc.py +++ b/cinder/volume/drivers/lenovo/lenovo_fc.py @@ -30,6 +30,9 @@ class LenovoFCDriver(dothill_fc.DotHillFCDriver): VERSION = "1.0" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Vedams-LenovoStorage_FCISCSI_CI" + def __init__(self, *args, **kwargs): super(LenovoFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) diff --git a/cinder/volume/drivers/lenovo/lenovo_iscsi.py b/cinder/volume/drivers/lenovo/lenovo_iscsi.py index 77b7cbec4..647dabd9b 100644 --- a/cinder/volume/drivers/lenovo/lenovo_iscsi.py +++ b/cinder/volume/drivers/lenovo/lenovo_iscsi.py @@ -30,6 +30,9 @@ class LenovoISCSIDriver(dothill_iscsi.DotHillISCSIDriver): VERSION = "1.0" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Vedams-LenovoStorage_FCISCSI_CI" + def __init__(self, *args, **kwargs): super(LenovoISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index c7141646a..3fa806004 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -82,6 +82,9 @@ class LVMVolumeDriver(driver.VolumeDriver): VERSION = '3.0.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + def __init__(self, vg_obj=None, *args, **kwargs): # Parent sets db, host, _execute and base config super(LVMVolumeDriver, self).__init__(*args, **kwargs) @@ -178,6 +181,12 @@ class LVMVolumeDriver(driver.VolumeDriver): return snapshot_name return '_' + snapshot_name + def _unescape_snapshot(self, snapshot_name): + # Undo snapshot name change done by _escape_snapshot() + if not snapshot_name.startswith('_snapshot'): + return snapshot_name + return snapshot_name[1:] + def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): vg_ref = self.vg if vg is not None: @@ -586,7 +595,8 @@ class LVMVolumeDriver(driver.VolumeDriver): lv_name = existing_ref['source-name'] self.vg.get_volume(lv_name) - if volutils.check_already_managed_volume(lv_name): + vol_id = volutils.extract_id_from_volume_name(lv_name) + if volutils.check_already_managed_volume(vol_id): raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name) # Attempt to rename the LV to match the OpenStack internal name. @@ -654,6 +664,61 @@ class LVMVolumeDriver(driver.VolumeDriver): existing_ref = {"source-name": existing_ref} return self.manage_existing(snapshot_temp, existing_ref) + def _get_manageable_resource_info(self, cinder_resources, resource_type, + marker, limit, offset, sort_keys, + sort_dirs): + entries = [] + lvs = self.vg.get_volumes() + cinder_ids = [resource['id'] for resource in cinder_resources] + + for lv in lvs: + is_snap = self.vg.lv_is_snapshot(lv['name']) + if ((resource_type == 'volume' and is_snap) or + (resource_type == 'snapshot' and not is_snap)): + continue + + if resource_type == 'volume': + potential_id = volutils.extract_id_from_volume_name(lv['name']) + else: + unescape = self._unescape_snapshot(lv['name']) + potential_id = volutils.extract_id_from_snapshot_name(unescape) + lv_info = {'reference': {'source-name': lv['name']}, + 'size': int(math.ceil(float(lv['size']))), + 'cinder_id': None, + 'extra_info': None} + + if potential_id in cinder_ids: + lv_info['safe_to_manage'] = False + lv_info['reason_not_safe'] = 'already managed' + lv_info['cinder_id'] = potential_id + elif self.vg.lv_is_open(lv['name']): + lv_info['safe_to_manage'] = False + lv_info['reason_not_safe'] = '%s in use' % resource_type + else: + lv_info['safe_to_manage'] = True + lv_info['reason_not_safe'] = None + + if resource_type == 'snapshot': + origin = self.vg.lv_get_origin(lv['name']) + lv_info['source_reference'] = {'source-name': origin} + + entries.append(lv_info) + + return volutils.paginate_entries_list(entries, marker, limit, offset, + sort_keys, sort_dirs) + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + return self._get_manageable_resource_info(cinder_volumes, 'volume', + marker, limit, + offset, sort_keys, sort_dirs) + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + return self._get_manageable_resource_info(cinder_snapshots, 'snapshot', + marker, limit, + offset, sort_keys, sort_dirs) + def retype(self, context, volume, new_type, diff, host): """Retypes a volume, allow QoS and extra_specs change.""" diff --git a/cinder/volume/drivers/netapp/dataontap/block_7mode.py b/cinder/volume/drivers/netapp/dataontap/block_7mode.py index 0c184c1db..dc8fed078 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_7mode.py @@ -194,8 +194,12 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None): - """Clone LUN with the given handle to the new name.""" + block_count=0, source_snapshot=None, is_snapshot=False): + """Clone LUN with the given handle to the new name. + + :param: is_snapshot Not used, present for method signature consistency + """ + if not space_reserved: space_reserved = self.lun_space_reservation if qos_policy_group_name is not None: @@ -297,6 +301,7 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): pool = dict() pool['pool_name'] = volume_name pool['QoS_support'] = False + pool['multiattach'] = True pool['reserved_percentage'] = ( self.reserved_percentage) pool['max_over_subscription_ratio'] = ( diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py index 48066d214..d9d1b3952 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ b/cinder/volume/drivers/netapp/dataontap/block_base.py @@ -32,6 +32,7 @@ import uuid from oslo_log import log as logging from oslo_log import versionutils +from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units import six @@ -46,6 +47,7 @@ from cinder.volume import utils as volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) +HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes class NetAppLun(object): @@ -103,6 +105,8 @@ class NetAppBlockStorageLibrary(object): self.lun_space_reservation = 'true' self.lookup_service = fczm_utils.create_lookup_service() self.app_version = kwargs.get("app_version", "unknown") + self.host = kwargs.get('host') + self.backend_name = self.host.split('@')[1] self.configuration = kwargs['configuration'] self.configuration.append_config_values(na_opts.netapp_connection_opts) @@ -167,6 +171,21 @@ class NetAppBlockStorageLibrary(object): self._extract_and_populate_luns(lun_list) LOG.debug("Success getting list of LUNs from server.") + self._start_periodic_tasks() + + def _start_periodic_tasks(self): + """Start recurring tasks common to all Data ONTAP block drivers.""" + + # Start the task that runs other housekeeping tasks, such as deletion + # of previously soft-deleted storage artifacts. + housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall( + self._handle_housekeeping_tasks) + housekeeping_periodic_task.start( + interval=HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0) + + def _handle_housekeeping_tasks(self): + """Handle various cleanup activities.""" + def get_pool(self, volume): """Return pool name where volume resides. @@ -275,7 +294,8 @@ class NetAppBlockStorageLibrary(object): vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] lun = self._get_lun_from_table(vol_name) - self._clone_lun(lun.name, snapshot_name, space_reserved='false') + self._clone_lun(lun.name, snapshot_name, space_reserved='false', + is_snapshot=True) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" @@ -453,7 +473,7 @@ class NetAppBlockStorageLibrary(object): def _clone_lun(self, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None): + block_count=0, source_snapshot=None, is_snapshot=False): """Clone LUN with the given name to the new name.""" raise NotImplementedError() @@ -1061,32 +1081,11 @@ class NetAppBlockStorageLibrary(object): source_snapshot=cgsnapshot['id']) for flexvol in flexvols: - self._handle_busy_snapshot(flexvol, cgsnapshot['id']) + self.zapi_client.wait_for_busy_snapshot(flexvol, cgsnapshot['id']) self.zapi_client.delete_snapshot(flexvol, cgsnapshot['id']) return None, None - @utils.retry(exception.SnapshotIsBusy) - def _handle_busy_snapshot(self, flexvol, snapshot_name): - """Checks for and handles a busy snapshot. - - If a snapshot is not busy, take no action. If a snapshot is busy for - reasons other than a clone dependency, raise immediately. Otherwise, - since we always start a clone split operation after cloning a share, - wait up to a minute for a clone dependency to clear before giving up. - """ - snapshot = self.zapi_client.get_snapshot(flexvol, snapshot_name) - if not snapshot['busy']: - LOG.info(_LI("Backing consistency group snapshot %s " - "available for deletion"), snapshot_name) - return - else: - LOG.debug('Snapshot %(snap)s for vol %(vol)s is busy, waiting ' - 'for volume clone dependency to clear.', - {'snap': snapshot_name, 'vol': flexvol}) - - raise exception.SnapshotIsBusy(snapshot_name=snapshot_name) - def delete_cgsnapshot(self, cgsnapshot, snapshots): """Delete LUNs backing each snapshot in the cgsnapshot. diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py index b0a32f0ee..b97739b04 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_cmode.py @@ -33,20 +33,21 @@ from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap import block_base -from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities +from cinder.volume.drivers.netapp.dataontap.utils import data_motion +from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) -QOS_CLEANUP_INTERVAL_SECONDS = 60 SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly @six.add_metaclass(utils.TraceWrapperMetaclass) -class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): +class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, + data_motion.DataMotionMixin): """NetApp block storage library for Data ONTAP (Cluster-mode).""" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] @@ -57,27 +58,42 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): **kwargs) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.driver_mode = 'cluster' + self.failed_over_backend_name = kwargs.get('active_backend_id') + self.failed_over = self.failed_over_backend_name is not None + self.replication_enabled = ( + True if self.get_replication_backend_names( + self.configuration) else False) def do_setup(self, context): super(NetAppBlockStorageCmodeLibrary, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) - self.vserver = self.configuration.netapp_vserver - - self.zapi_client = client_cmode.Client( - transport_type=self.configuration.netapp_transport_type, - username=self.configuration.netapp_login, - password=self.configuration.netapp_password, - hostname=self.configuration.netapp_server_hostname, - port=self.configuration.netapp_server_port, - vserver=self.vserver) + # cDOT API client + self.zapi_client = cmode_utils.get_client_for_backend( + self.failed_over_backend_name or self.backend_name) + self.vserver = self.zapi_client.vserver + # Performance monitoring library self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) + + # Storage service catalog self.ssc_library = capabilities.CapabilitiesLibrary( self.driver_protocol, self.vserver, self.zapi_client, self.configuration) + def _update_zapi_client(self, backend_name): + """Set cDOT API client for the specified config backend stanza name.""" + + self.zapi_client = cmode_utils.get_client_for_backend(backend_name) + self.vserver = self.zapi_client.vserver + self.ssc_library._update_for_failover(self.zapi_client, + self._get_flexvol_to_pool_map()) + ssc = self.ssc_library.get_ssc() + self.perf_library._update_for_failover(self.zapi_client, ssc) + # Clear LUN table cache + self.lun_table = {} + def check_for_setup_error(self): """Check that the driver is working and can communicate.""" self.ssc_library.check_api_permissions() @@ -89,9 +105,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): raise exception.NetAppDriverException(msg) super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error() - self._start_periodic_tasks() def _start_periodic_tasks(self): + """Start recurring tasks for NetApp cDOT block drivers.""" # Note(cknight): Run the task once in the current thread to prevent a # race with the first invocation of _update_volume_stats. @@ -104,12 +120,32 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): interval=SSC_UPDATE_INTERVAL_SECONDS, initial_delay=SSC_UPDATE_INTERVAL_SECONDS) - # Start the task that harvests soft-deleted QoS policy groups. - harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall( - self.zapi_client.remove_unused_qos_policy_groups) - harvest_qos_periodic_task.start( - interval=QOS_CLEANUP_INTERVAL_SECONDS, - initial_delay=QOS_CLEANUP_INTERVAL_SECONDS) + super(NetAppBlockStorageCmodeLibrary, self)._start_periodic_tasks() + + def _handle_housekeeping_tasks(self): + """Handle various cleanup activities.""" + (super(NetAppBlockStorageCmodeLibrary, self). + _handle_housekeeping_tasks()) + + # Harvest soft-deleted QoS policy groups + self.zapi_client.remove_unused_qos_policy_groups() + + active_backend = self.failed_over_backend_name or self.backend_name + + LOG.debug("Current service state: Replication enabled: %(" + "replication)s. Failed-Over: %(failed)s. Active Backend " + "ID: %(active)s", + { + 'replication': self.replication_enabled, + 'failed': self.failed_over, + 'active': active_backend, + }) + + # Create pool mirrors if whole-backend replication configured + if self.replication_enabled and not self.failed_over: + self.ensure_snapmirrors( + self.configuration, self.backend_name, + self.ssc_library.get_ssc_flexvol_names()) def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): @@ -118,8 +154,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): self.zapi_client.create_lun( volume_name, lun_name, size, metadata, qos_policy_group_name) - def _create_lun_handle(self, metadata): + def _create_lun_handle(self, metadata, vserver=None): """Returns LUN handle based on filer type.""" + vserver = vserver or self.vserver return '%s:%s' % (self.vserver, metadata['Path']) def _find_mapped_lun_igroup(self, path, initiator_list): @@ -138,7 +175,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None): + block_count=0, source_snapshot=None, is_snapshot=False): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation @@ -149,7 +186,8 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, - source_snapshot=source_snapshot) + source_snapshot=source_snapshot, + is_snapshot=is_snapshot) LOG.debug("Cloned LUN with new name %s", new_name) lun = self.zapi_client.get_lun_by_args(vserver=self.vserver, @@ -185,7 +223,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): def _update_volume_stats(self, filter_function=None, goodness_function=None): - """Retrieve stats info from vserver.""" + """Retrieve backend stats.""" LOG.debug('Updating volume stats') data = {} @@ -198,6 +236,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): filter_function=filter_function, goodness_function=goodness_function) data['sparse_copy_volume'] = True + data.update(self.get_replication_backend_stats(self.configuration)) self.zapi_client.provide_ems(self, self.driver_name, self.app_version) self._stats = data @@ -216,8 +255,13 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): if not ssc: return pools + # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) + # Get up-to-date aggregate capacities just once + aggregates = self.ssc_library.get_ssc_aggregates() + aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates) + for ssc_vol_name, ssc_vol_info in ssc.items(): pool = dict() @@ -227,6 +271,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): # Add driver capabilities and config info pool['QoS_support'] = True + pool['multiattach'] = True pool['consistencygroup_support'] = True pool['reserved_percentage'] = self.reserved_percentage pool['max_over_subscription_ratio'] = ( @@ -245,6 +290,11 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): pool['provisioned_capacity_gb'] = round( pool['total_capacity_gb'] - pool['free_capacity_gb'], 2) + aggregate_name = ssc_vol_info.get('netapp_aggregate') + aggr_capacity = aggr_capacities.get(aggregate_name, {}) + pool['netapp_aggregate_used_percent'] = aggr_capacity.get( + 'percent-used', 0) + # Add utilization data utilization = self.perf_library.get_node_utilization_for_pool( ssc_vol_name) @@ -357,3 +407,8 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume) + + def failover_host(self, context, volumes, secondary_id=None): + """Failover a backend to a secondary replication target.""" + + return self._failover_host(volumes, secondary_id=secondary_id) diff --git a/cinder/volume/drivers/netapp/dataontap/client/api.py b/cinder/volume/drivers/netapp/dataontap/client/api.py index 8ce048cdd..a4d2c2d6b 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/api.py +++ b/cinder/volume/drivers/netapp/dataontap/client/api.py @@ -37,10 +37,18 @@ from cinder import utils LOG = logging.getLogger(__name__) +EAPIERROR = '13001' EAPIPRIVILEGE = '13003' EAPINOTFOUND = '13005' -ESIS_CLONE_NOT_LICENSED = '14956' ESNAPSHOTNOTALLOWED = '13023' +ESIS_CLONE_NOT_LICENSED = '14956' +EOBJECTNOTFOUND = '15661' +ESOURCE_IS_DIFFERENT = '17105' +ERELATION_EXISTS = '17122' +ERELATION_NOT_QUIESCED = '17127' +ENOTRANSFER_IN_PROGRESS = '17130' +EANOTHER_OP_ACTIVE = '17131' +ETRANSFER_IN_PROGRESS = '17137' class NaServer(object): diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py b/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py index 3aef869d1..90f7cbae0 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py @@ -326,14 +326,19 @@ class Client(client_base.Client): raise exception.NotFound(_('No storage path found for export path %s') % (export_path)) - def clone_file(self, src_path, dest_path): + def clone_file(self, src_path, dest_path, source_snapshot=None): LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s", {'src_path': src_path, 'dest_path': dest_path}) + zapi_args = { + 'source-path': src_path, + 'destination-path': dest_path, + 'no-snap': 'true', + } + if source_snapshot: + zapi_args['snapshot-name'] = source_snapshot + clone_start = netapp_api.NaElement.create_node_with_children( - 'clone-start', - **{'source-path': src_path, - 'destination-path': dest_path, - 'no-snap': 'true'}) + 'clone-start', **zapi_args) result = self.connection.invoke_successfully(clone_start, enable_tunneling=True) clone_id_el = result.get_child_by_name('clone-id') diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_base.py b/cinder/volume/drivers/netapp/dataontap/client/client_base.py index 7435d1292..858fc3e2f 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_base.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_base.py @@ -73,6 +73,11 @@ class Client(object): minor = res.get_child_content('minor-version') return major, minor + def _strip_xml_namespace(self, string): + if string.startswith('{') and '}' in string: + return string.split('}', 1)[1] + return string + def check_is_naelement(self, elem): """Checks if object is instance of NaElement.""" if not isinstance(elem, netapp_api.NaElement): @@ -428,3 +433,27 @@ class Client(object): def _commit_cg_snapshot(self, cg_id): snapshot_commit = {'cg-id': cg_id} self.send_request('cg-commit', snapshot_commit) + + def get_snapshot(self, volume_name, snapshot_name): + """Gets a single snapshot.""" + raise NotImplementedError() + + @utils.retry(exception.SnapshotIsBusy) + def wait_for_busy_snapshot(self, flexvol, snapshot_name): + """Checks for and handles a busy snapshot. + + If a snapshot is busy, for reasons other than cloning, an exception is + raised immediately. Otherwise, wait for a period of time for the clone + dependency to finish before giving up. If the snapshot is not busy then + no action is taken and the method exits. + """ + snapshot = self.get_snapshot(flexvol, snapshot_name) + if not snapshot['busy']: + LOG.debug("Backing consistency group snapshot %s available for " + "deletion.", snapshot_name) + return + else: + LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting " + "for volume clone dependency to clear.", + {"snap": snapshot_name, "vol": flexvol}) + raise exception.SnapshotIsBusy(snapshot_name=snapshot_name) diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py index b83c52ba7..1e9464aa8 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py @@ -20,10 +20,11 @@ import math import re from oslo_log import log as logging +from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _, _LW, _LE from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base @@ -59,13 +60,19 @@ class Client(client_base.Client): ontapi_1_20 = ontapi_version >= (1, 20) ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30) ontapi_1_30 = ontapi_version >= (1, 30) + ontapi_1_100 = ontapi_version >= (1, 100) + self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20) self.features.add_feature('USER_CAPABILITY_LIST', supported=ontapi_1_20) self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x) self.features.add_feature('FAST_CLONE_DELETE', supported=ontapi_1_30) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=ontapi_1_30) + self.features.add_feature('ADVANCED_DISK_PARTITIONING', + supported=ontapi_1_30) + self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontapi_1_100) + self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30) def _invoke_vserver_api(self, na_element, vserver): server = copy.copy(self.connection) @@ -375,7 +382,7 @@ class Client(client_base.Client): def clone_lun(self, volume, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None): + block_count=0, source_snapshot=None, is_snapshot=False): # zAPI can only handle 2^24 blocks per range bc_limit = 2 ** 24 # 8GB # zAPI can only handle 32 block ranges per call @@ -400,6 +407,8 @@ class Client(client_base.Client): } if source_snapshot: zapi_args['snapshot-name'] = source_snapshot + if is_snapshot and self.features.BACKUP_CLONE_PARAM: + zapi_args['is-backup'] = 'true' clone_create = netapp_api.NaElement.create_node_with_children( 'clone-create', **zapi_args) if qos_policy_group_name is not None: @@ -624,16 +633,26 @@ class Client(client_base.Client): "%(junction)s ") % msg_fmt) def clone_file(self, flex_vol, src_path, dest_path, vserver, - dest_exists=False): + dest_exists=False, source_snapshot=None, + is_snapshot=False): """Clones file on vserver.""" LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, " - "dest %(dest_path)s, vserver %(vserver)s", + "dest %(dest_path)s, vserver %(vserver)s," + "source_snapshot %(source_snapshot)s", {'volume': flex_vol, 'src_path': src_path, - 'dest_path': dest_path, 'vserver': vserver}) + 'dest_path': dest_path, 'vserver': vserver, + 'source_snapshot': source_snapshot}) + zapi_args = { + 'volume': flex_vol, + 'source-path': src_path, + 'destination-path': dest_path, + } + if is_snapshot and self.features.BACKUP_CLONE_PARAM: + zapi_args['is-backup'] = 'true' + if source_snapshot: + zapi_args['snapshot-name'] = source_snapshot clone_create = netapp_api.NaElement.create_node_with_children( - 'clone-create', - **{'volume': flex_vol, 'source-path': src_path, - 'destination-path': dest_path}) + 'clone-create', **zapi_args) major, minor = self.connection.get_api_version() if major == 1 and minor >= 20 and dest_exists: clone_create.add_new_child('destination-exists', 'true') @@ -877,6 +896,7 @@ class Client(client_base.Client): 'owning-vserver-name': None, 'junction-path': None, 'containing-aggregate-name': None, + 'type': None, }, 'volume-mirror-attributes': { 'is-data-protection-mirror': None, @@ -885,10 +905,18 @@ class Client(client_base.Client): 'volume-space-attributes': { 'is-space-guarantee-enabled': None, 'space-guarantee': None, + 'percentage-snapshot-reserve': None, + 'size': None, }, 'volume-qos-attributes': { 'policy-group-name': None, - } + }, + 'volume-snapshot-attributes': { + 'snapshot-policy': None, + }, + 'volume-language-attributes': { + 'language-code': None, + }, }, }, } @@ -911,6 +939,10 @@ class Client(client_base.Client): 'volume-space-attributes') or netapp_api.NaElement('none') volume_qos_attributes = volume_attributes.get_child_by_name( 'volume-qos-attributes') or netapp_api.NaElement('none') + volume_snapshot_attributes = volume_attributes.get_child_by_name( + 'volume-snapshot-attributes') or netapp_api.NaElement('none') + volume_language_attributes = volume_attributes.get_child_by_name( + 'volume-language-attributes') or netapp_api.NaElement('none') volume = { 'name': volume_id_attributes.get_child_content('name'), @@ -920,13 +952,22 @@ class Client(client_base.Client): 'junction-path'), 'aggregate': volume_id_attributes.get_child_content( 'containing-aggregate-name'), + 'type': volume_id_attributes.get_child_content('type'), 'space-guarantee-enabled': strutils.bool_from_string( volume_space_attributes.get_child_content( 'is-space-guarantee-enabled')), 'space-guarantee': volume_space_attributes.get_child_content( 'space-guarantee'), + 'percentage-snapshot-reserve': ( + volume_space_attributes.get_child_content( + 'percentage-snapshot-reserve')), + 'size': volume_space_attributes.get_child_content('size'), 'qos-policy-group': volume_qos_attributes.get_child_content( - 'policy-group-name') + 'policy-group-name'), + 'snapshot-policy': volume_snapshot_attributes.get_child_content( + 'snapshot-policy'), + 'language': volume_language_attributes.get_child_content( + 'language-code'), } return volume @@ -951,8 +992,8 @@ class Client(client_base.Client): try: result = self.send_iter_request('sis-get-iter', api_args) except netapp_api.NaApiError: - msg = _('Failed to get dedupe info for volume %s.') - LOG.exception(msg % flexvol_name) + msg = _LE('Failed to get dedupe info for volume %s.') + LOG.exception(msg, flexvol_name) return {'compression': False, 'dedupe': False} if self._get_record_count(result) != 1: @@ -993,8 +1034,8 @@ class Client(client_base.Client): try: result = self.send_iter_request('snapmirror-get-iter', api_args) except netapp_api.NaApiError: - msg = _('Failed to get SnapMirror info for volume %s.') - LOG.exception(msg % flexvol_name) + msg = _LE('Failed to get SnapMirror info for volume %s.') + LOG.exception(msg, flexvol_name) return False if not self._has_records(result): @@ -1002,6 +1043,106 @@ class Client(client_base.Client): return True + def create_flexvol(self, flexvol_name, aggregate_name, size_gb, + space_guarantee_type=None, snapshot_policy=None, + language=None, dedupe_enabled=False, + compression_enabled=False, snapshot_reserve=None, + volume_type='rw'): + + """Creates a volume.""" + api_args = { + 'containing-aggr-name': aggregate_name, + 'size': six.text_type(size_gb) + 'g', + 'volume': flexvol_name, + 'volume-type': volume_type, + } + if volume_type == 'dp': + snapshot_policy = None + else: + api_args['junction-path'] = '/%s' % flexvol_name + if snapshot_policy is not None: + api_args['snapshot-policy'] = snapshot_policy + if space_guarantee_type: + api_args['space-reserve'] = space_guarantee_type + if language is not None: + api_args['language-code'] = language + if snapshot_reserve is not None: + api_args['percentage-snapshot-reserve'] = six.text_type( + snapshot_reserve) + self.send_request('volume-create', api_args) + + # cDOT compression requires that deduplication be enabled. + if dedupe_enabled or compression_enabled: + self.enable_flexvol_dedupe(flexvol_name) + if compression_enabled: + self.enable_flexvol_compression(flexvol_name) + + def flexvol_exists(self, volume_name): + """Checks if a flexvol exists on the storage array.""" + LOG.debug('Checking if volume %s exists', volume_name) + + api_args = { + 'query': { + 'volume-attributes': { + 'volume-id-attributes': { + 'name': volume_name, + }, + }, + }, + 'desired-attributes': { + 'volume-attributes': { + 'volume-id-attributes': { + 'name': None, + }, + }, + }, + } + result = self.send_iter_request('volume-get-iter', api_args) + return self._has_records(result) + + def rename_flexvol(self, orig_flexvol_name, new_flexvol_name): + """Set flexvol name.""" + api_args = { + 'volume': orig_flexvol_name, + 'new-volume-name': new_flexvol_name, + } + self.send_request('volume-rename', api_args) + + def mount_flexvol(self, flexvol_name, junction_path=None): + """Mounts a volume on a junction path.""" + api_args = { + 'volume-name': flexvol_name, + 'junction-path': (junction_path if junction_path + else '/%s' % flexvol_name) + } + self.send_request('volume-mount', api_args) + + def enable_flexvol_dedupe(self, flexvol_name): + """Enable deduplication on volume.""" + api_args = {'path': '/vol/%s' % flexvol_name} + self.send_request('sis-enable', api_args) + + def disable_flexvol_dedupe(self, flexvol_name): + """Disable deduplication on volume.""" + api_args = {'path': '/vol/%s' % flexvol_name} + self.send_request('sis-disable', api_args) + + def enable_flexvol_compression(self, flexvol_name): + """Enable compression on volume.""" + api_args = { + 'path': '/vol/%s' % flexvol_name, + 'enable-compression': 'true' + } + self.send_request('sis-set-config', api_args) + + def disable_flexvol_compression(self, flexvol_name): + """Disable compression on volume.""" + api_args = { + 'path': '/vol/%s' % flexvol_name, + 'enable-compression': 'false' + } + self.send_request('sis-set-config', api_args) + @utils.trace_method def delete_file(self, path_to_file): """Delete file at path.""" @@ -1082,6 +1223,7 @@ class Client(client_base.Client): 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, + 'is-hybrid': None, }, }, } @@ -1090,8 +1232,8 @@ class Client(client_base.Client): aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError: - msg = _('Failed to get info for aggregate %s.') - LOG.exception(msg % aggregate_name) + msg = _LE('Failed to get info for aggregate %s.') + LOG.exception(msg, aggregate_name) return {} if len(aggrs) < 1: @@ -1104,25 +1246,50 @@ class Client(client_base.Client): aggregate = { 'name': aggr_attributes.get_child_content('aggregate-name'), 'raid-type': aggr_raid_attrs.get_child_content('raid-type'), + 'is-hybrid': strutils.bool_from_string( + aggr_raid_attrs.get_child_content('is-hybrid')), } return aggregate - def get_aggregate_disk_type(self, aggregate_name): - """Get the disk type of an aggregate.""" + def get_aggregate_disk_types(self, aggregate_name): + """Get the disk type(s) of an aggregate.""" - # Note(cknight): Only get 1 disk, since apart from hybrid - # aggregates all disks must be the same type. - api_args = { - 'max-records': 1, - 'query': { - 'storage-disk-info': { - 'disk-raid-info': { - 'disk-aggregate-info': { + disk_types = set() + disk_types.update(self._get_aggregate_disk_types(aggregate_name)) + if self.features.ADVANCED_DISK_PARTITIONING: + disk_types.update(self._get_aggregate_disk_types(aggregate_name, + shared=True)) + + return list(disk_types) if disk_types else None + + def _get_aggregate_disk_types(self, aggregate_name, shared=False): + """Get the disk type(s) of an aggregate (may be a list).""" + + disk_types = set() + + if shared: + disk_raid_info = { + 'disk-shared-info': { + 'aggregate-list': { + 'shared-aggregate-info': { 'aggregate-name': aggregate_name, }, }, }, + } + else: + disk_raid_info = { + 'disk-aggregate-info': { + 'aggregate-name': aggregate_name, + }, + } + + api_args = { + 'query': { + 'storage-disk-info': { + 'disk-raid-info': disk_raid_info, + }, }, 'desired-attributes': { 'storage-disk-info': { @@ -1132,29 +1299,82 @@ class Client(client_base.Client): }, }, } - try: - result = self.send_request('storage-disk-get-iter', api_args, - enable_tunneling=False) - except netapp_api.NaApiError: - msg = _('Failed to get disk info for aggregate %s.') - LOG.exception(msg % aggregate_name) - return 'unknown' - if self._get_record_count(result) != 1: - return 'unknown' + try: + result = self.send_iter_request( + 'storage-disk-get-iter', api_args, enable_tunneling=False) + except netapp_api.NaApiError: + msg = _LE('Failed to get disk info for aggregate %s.') + LOG.exception(msg, aggregate_name) + return disk_types attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for storage_disk_info in attributes_list.get_children(): - disk_raid_info = storage_disk_info.get_child_by_name( - 'disk-raid-info') or netapp_api.NaElement('none') - disk_type = disk_raid_info.get_child_content( - 'effective-disk-type') or 'unknown' - return disk_type + disk_raid_info = storage_disk_info.get_child_by_name( + 'disk-raid-info') or netapp_api.NaElement('none') + disk_type = disk_raid_info.get_child_content( + 'effective-disk-type') + if disk_type: + disk_types.add(disk_type) - return 'unknown' + return disk_types + + def get_aggregate_capacities(self, aggregate_names): + """Gets capacity info for multiple aggregates.""" + + if not isinstance(aggregate_names, list): + return {} + + aggregates = {} + for aggregate_name in aggregate_names: + aggregates[aggregate_name] = self.get_aggregate_capacity( + aggregate_name) + + return aggregates + + def get_aggregate_capacity(self, aggregate_name): + """Gets capacity info for an aggregate.""" + + desired_attributes = { + 'aggr-attributes': { + 'aggr-space-attributes': { + 'percent-used-capacity': None, + 'size-available': None, + 'size-total': None, + }, + }, + } + + try: + aggrs = self._get_aggregates(aggregate_names=[aggregate_name], + desired_attributes=desired_attributes) + except netapp_api.NaApiError: + msg = _LE('Failed to get info for aggregate %s.') + LOG.exception(msg, aggregate_name) + return {} + + if len(aggrs) < 1: + return {} + + aggr_attributes = aggrs[0] + aggr_space_attributes = aggr_attributes.get_child_by_name( + 'aggr-space-attributes') or netapp_api.NaElement('none') + + percent_used = int(aggr_space_attributes.get_child_content( + 'percent-used-capacity')) + size_available = float(aggr_space_attributes.get_child_content( + 'size-available')) + size_total = float( + aggr_space_attributes.get_child_content('size-total')) + + return { + 'percent-used': percent_used, + 'size-available': size_available, + 'size-total': size_total, + } def get_performance_instance_uuids(self, object_name, node_name): """Get UUIDs of performance instances for a cluster node.""" @@ -1308,3 +1528,492 @@ class Client(client_base.Client): 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.VolumeBackendAPIException(data=msg % msg_args) + + def create_cluster_peer(self, addresses, username=None, password=None, + passphrase=None): + """Creates a cluster peer relationship.""" + + api_args = { + 'peer-addresses': [ + {'remote-inet-address': address} for address in addresses + ], + } + if username: + api_args['user-name'] = username + if password: + api_args['password'] = password + if passphrase: + api_args['passphrase'] = passphrase + + self.send_request('cluster-peer-create', api_args) + + def get_cluster_peers(self, remote_cluster_name=None): + """Gets one or more cluster peer relationships.""" + + api_args = {} + if remote_cluster_name: + api_args['query'] = { + 'cluster-peer-info': { + 'remote-cluster-name': remote_cluster_name, + } + } + + result = self.send_iter_request('cluster-peer-get-iter', api_args) + if not self._has_records(result): + return [] + + cluster_peers = [] + + for cluster_peer_info in result.get_child_by_name( + 'attributes-list').get_children(): + + cluster_peer = { + 'active-addresses': [], + 'peer-addresses': [] + } + + active_addresses = cluster_peer_info.get_child_by_name( + 'active-addresses') or netapp_api.NaElement('none') + for address in active_addresses.get_children(): + cluster_peer['active-addresses'].append(address.get_content()) + + peer_addresses = cluster_peer_info.get_child_by_name( + 'peer-addresses') or netapp_api.NaElement('none') + for address in peer_addresses.get_children(): + cluster_peer['peer-addresses'].append(address.get_content()) + + cluster_peer['availability'] = cluster_peer_info.get_child_content( + 'availability') + cluster_peer['cluster-name'] = cluster_peer_info.get_child_content( + 'cluster-name') + cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content( + 'cluster-uuid') + cluster_peer['remote-cluster-name'] = ( + cluster_peer_info.get_child_content('remote-cluster-name')) + cluster_peer['serial-number'] = ( + cluster_peer_info.get_child_content('serial-number')) + cluster_peer['timeout'] = cluster_peer_info.get_child_content( + 'timeout') + + cluster_peers.append(cluster_peer) + + return cluster_peers + + def delete_cluster_peer(self, cluster_name): + """Deletes a cluster peer relationship.""" + + api_args = {'cluster-name': cluster_name} + self.send_request('cluster-peer-delete', api_args) + + def get_cluster_peer_policy(self): + """Gets the cluster peering policy configuration.""" + + if not self.features.CLUSTER_PEER_POLICY: + return {} + + result = self.send_request('cluster-peer-policy-get') + + attributes = result.get_child_by_name( + 'attributes') or netapp_api.NaElement('none') + cluster_peer_policy = attributes.get_child_by_name( + 'cluster-peer-policy') or netapp_api.NaElement('none') + + policy = { + 'is-unauthenticated-access-permitted': + cluster_peer_policy.get_child_content( + 'is-unauthenticated-access-permitted'), + 'passphrase-minimum-length': + cluster_peer_policy.get_child_content( + 'passphrase-minimum-length'), + } + + if policy['is-unauthenticated-access-permitted'] is not None: + policy['is-unauthenticated-access-permitted'] = ( + strutils.bool_from_string( + policy['is-unauthenticated-access-permitted'])) + if policy['passphrase-minimum-length'] is not None: + policy['passphrase-minimum-length'] = int( + policy['passphrase-minimum-length']) + + return policy + + def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None, + passphrase_minimum_length=None): + """Modifies the cluster peering policy configuration.""" + + if not self.features.CLUSTER_PEER_POLICY: + return + + if (is_unauthenticated_access_permitted is None and + passphrase_minimum_length is None): + return + + api_args = {} + if is_unauthenticated_access_permitted is not None: + api_args['is-unauthenticated-access-permitted'] = ( + 'true' if strutils.bool_from_string( + is_unauthenticated_access_permitted) else 'false') + if passphrase_minimum_length is not None: + api_args['passphrase-minlength'] = six.text_type( + passphrase_minimum_length) + + self.send_request('cluster-peer-policy-modify', api_args) + + def create_vserver_peer(self, vserver_name, peer_vserver_name): + """Creates a Vserver peer relationship for SnapMirrors.""" + api_args = { + 'vserver': vserver_name, + 'peer-vserver': peer_vserver_name, + 'applications': [ + {'vserver-peer-application': 'snapmirror'}, + ], + } + self.send_request('vserver-peer-create', api_args) + + def delete_vserver_peer(self, vserver_name, peer_vserver_name): + """Deletes a Vserver peer relationship.""" + + api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} + self.send_request('vserver-peer-delete', api_args) + + def accept_vserver_peer(self, vserver_name, peer_vserver_name): + """Accepts a pending Vserver peer relationship.""" + + api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} + self.send_request('vserver-peer-accept', api_args) + + def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): + """Gets one or more Vserver peer relationships.""" + + api_args = None + if vserver_name or peer_vserver_name: + api_args = {'query': {'vserver-peer-info': {}}} + if vserver_name: + api_args['query']['vserver-peer-info']['vserver'] = ( + vserver_name) + if peer_vserver_name: + api_args['query']['vserver-peer-info']['peer-vserver'] = ( + peer_vserver_name) + + result = self.send_iter_request('vserver-peer-get-iter', api_args) + if not self._has_records(result): + return [] + + vserver_peers = [] + + for vserver_peer_info in result.get_child_by_name( + 'attributes-list').get_children(): + + vserver_peer = { + 'vserver': vserver_peer_info.get_child_content('vserver'), + 'peer-vserver': + vserver_peer_info.get_child_content('peer-vserver'), + 'peer-state': + vserver_peer_info.get_child_content('peer-state'), + 'peer-cluster': + vserver_peer_info.get_child_content('peer-cluster'), + } + vserver_peers.append(vserver_peer) + + return vserver_peers + + def _ensure_snapmirror_v2(self): + """Verify support for SnapMirror control plane v2.""" + if not self.features.SNAPMIRROR_V2: + msg = _('SnapMirror features require Data ONTAP 8.2 or later.') + raise exception.NetAppDriverException(msg) + + def create_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume, + schedule=None, policy=None, + relationship_type='data_protection'): + """Creates a SnapMirror relationship (cDOT 8.2 or later only).""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + 'relationship-type': relationship_type, + } + if schedule: + api_args['schedule'] = schedule + if policy: + api_args['policy'] = policy + + try: + self.send_request('snapmirror-create', api_args) + except netapp_api.NaApiError as e: + if e.code != netapp_api.ERELATION_EXISTS: + raise + + def initialize_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume, + source_snapshot=None, transfer_priority=None): + """Initializes a SnapMirror relationship (cDOT 8.2 or later only).""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + if source_snapshot: + api_args['source-snapshot'] = source_snapshot + if transfer_priority: + api_args['transfer-priority'] = transfer_priority + + result = self.send_request('snapmirror-initialize', api_args) + + result_info = {} + result_info['operation-id'] = result.get_child_content( + 'result-operation-id') + result_info['status'] = result.get_child_content('result-status') + result_info['jobid'] = result.get_child_content('result-jobid') + result_info['error-code'] = result.get_child_content( + 'result-error-code') + result_info['error-message'] = result.get_child_content( + 'result-error-message') + + return result_info + + def release_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume, + relationship_info_only=False): + """Removes a SnapMirror relationship on the source endpoint.""" + self._ensure_snapmirror_v2() + + api_args = { + 'query': { + 'snapmirror-destination-info': { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + 'relationship-info-only': ('true' if relationship_info_only + else 'false'), + } + } + } + self.send_request('snapmirror-release-iter', api_args) + + def quiesce_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume): + """Disables future transfers to a SnapMirror destination.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + self.send_request('snapmirror-quiesce', api_args) + + def abort_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume, + clear_checkpoint=False): + """Stops ongoing transfers for a SnapMirror relationship.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + 'clear-checkpoint': 'true' if clear_checkpoint else 'false', + } + try: + self.send_request('snapmirror-abort', api_args) + except netapp_api.NaApiError as e: + if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS: + raise + + def break_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume): + """Breaks a data protection SnapMirror relationship.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + self.send_request('snapmirror-break', api_args) + + def modify_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume, + schedule=None, policy=None, tries=None, + max_transfer_rate=None): + """Modifies a SnapMirror relationship.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + if schedule: + api_args['schedule'] = schedule + if policy: + api_args['policy'] = policy + if tries is not None: + api_args['tries'] = tries + if max_transfer_rate is not None: + api_args['max-transfer-rate'] = max_transfer_rate + + self.send_request('snapmirror-modify', api_args) + + def delete_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume): + """Destroys a SnapMirror relationship.""" + self._ensure_snapmirror_v2() + + api_args = { + 'query': { + 'snapmirror-info': { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + } + } + self.send_request('snapmirror-destroy-iter', api_args) + + def update_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume): + """Schedules a SnapMirror update.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + try: + self.send_request('snapmirror-update', api_args) + except netapp_api.NaApiError as e: + if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and + e.code != netapp_api.EANOTHER_OP_ACTIVE): + raise + + def resume_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume): + """Resume a SnapMirror relationship if it is quiesced.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + try: + self.send_request('snapmirror-resume', api_args) + except netapp_api.NaApiError as e: + if e.code != netapp_api.ERELATION_NOT_QUIESCED: + raise + + def resync_snapmirror(self, source_vserver, source_volume, + destination_vserver, destination_volume): + """Resync a SnapMirror relationship.""" + self._ensure_snapmirror_v2() + + api_args = { + 'source-volume': source_volume, + 'source-vserver': source_vserver, + 'destination-volume': destination_volume, + 'destination-vserver': destination_vserver, + } + self.send_request('snapmirror-resync', api_args) + + def _get_snapmirrors(self, source_vserver=None, source_volume=None, + destination_vserver=None, destination_volume=None, + desired_attributes=None): + + query = None + if (source_vserver or source_volume or destination_vserver or + destination_volume): + query = {'snapmirror-info': {}} + if source_volume: + query['snapmirror-info']['source-volume'] = source_volume + if destination_volume: + query['snapmirror-info']['destination-volume'] = ( + destination_volume) + if source_vserver: + query['snapmirror-info']['source-vserver'] = source_vserver + if destination_vserver: + query['snapmirror-info']['destination-vserver'] = ( + destination_vserver) + + api_args = {} + if query: + api_args['query'] = query + if desired_attributes: + api_args['desired-attributes'] = desired_attributes + + result = self.send_iter_request('snapmirror-get-iter', api_args) + if not self._has_records(result): + return [] + else: + return result.get_child_by_name('attributes-list').get_children() + + def get_snapmirrors(self, source_vserver, source_volume, + destination_vserver, destination_volume, + desired_attributes=None): + """Gets one or more SnapMirror relationships. + + Either the source or destination info may be omitted. + Desired attributes should be a flat list of attribute names. + """ + self._ensure_snapmirror_v2() + + if desired_attributes is not None: + desired_attributes = { + 'snapmirror-info': {attr: None for attr in desired_attributes}, + } + + result = self._get_snapmirrors( + source_vserver=source_vserver, + source_volume=source_volume, + destination_vserver=destination_vserver, + destination_volume=destination_volume, + desired_attributes=desired_attributes) + + snapmirrors = [] + + for snapmirror_info in result: + snapmirror = {} + for child in snapmirror_info.get_children(): + name = self._strip_xml_namespace(child.get_name()) + snapmirror[name] = child.get_content() + snapmirrors.append(snapmirror) + + return snapmirrors + + def get_provisioning_options_from_flexvol(self, flexvol_name): + """Get a dict of provisioning options matching existing flexvol.""" + + flexvol_info = self.get_flexvol(flexvol_name=flexvol_name) + dedupe_info = self.get_flexvol_dedupe_info(flexvol_name) + + provisioning_opts = { + 'aggregate': flexvol_info['aggregate'], + # space-guarantee can be 'none', 'file', 'volume' + 'space_guarantee_type': flexvol_info.get('space-guarantee'), + 'snapshot_policy': flexvol_info['snapshot-policy'], + 'language': flexvol_info['language'], + 'dedupe_enabled': dedupe_info['dedupe'], + 'compression_enabled': dedupe_info['compression'], + 'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'], + 'volume_type': flexvol_info['type'], + 'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)), + } + + return provisioning_opts diff --git a/cinder/volume/drivers/netapp/dataontap/fc_7mode.py b/cinder/volume/drivers/netapp/dataontap/fc_7mode.py index bd5113c86..8a67ccc99 100644 --- a/cinder/volume/drivers/netapp/dataontap/fc_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/fc_7mode.py @@ -33,6 +33,10 @@ class NetApp7modeFibreChannelDriver(driver.BaseVD, DRIVER_NAME = 'NetApp_FibreChannel_7mode_direct' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION + def __init__(self, *args, **kwargs): super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs) self.library = block_7mode.NetAppBlockStorage7modeLibrary( @@ -129,3 +133,6 @@ class NetApp7modeFibreChannelDriver(driver.BaseVD, return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) + + def failover_host(self, context, volumes, secondary_id=None): + raise NotImplementedError() diff --git a/cinder/volume/drivers/netapp/dataontap/fc_cmode.py b/cinder/volume/drivers/netapp/dataontap/fc_cmode.py index 391f42223..1c070cbf1 100644 --- a/cinder/volume/drivers/netapp/dataontap/fc_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/fc_cmode.py @@ -33,6 +33,10 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD, DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION + def __init__(self, *args, **kwargs): super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( @@ -129,3 +133,7 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD, return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) + + def failover_host(self, context, volumes, secondary_id=None): + return self.library.failover_host( + context, volumes, secondary_id=secondary_id) diff --git a/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py b/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py index f523cb5fe..9ba51d838 100644 --- a/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py @@ -32,6 +32,10 @@ class NetApp7modeISCSIDriver(driver.BaseVD, DRIVER_NAME = 'NetApp_iSCSI_7mode_direct' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION + def __init__(self, *args, **kwargs): super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs) self.library = block_7mode.NetAppBlockStorage7modeLibrary( @@ -126,3 +130,6 @@ class NetApp7modeISCSIDriver(driver.BaseVD, return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) + + def failover_host(self, context, volumes, secondary_id=None): + raise NotImplementedError() diff --git a/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py b/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py index 29e8d25d9..e2cd7a2eb 100644 --- a/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py @@ -32,6 +32,10 @@ class NetAppCmodeISCSIDriver(driver.BaseVD, DRIVER_NAME = 'NetApp_iSCSI_Cluster_direct' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION + def __init__(self, *args, **kwargs): super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( @@ -126,3 +130,7 @@ class NetAppCmodeISCSIDriver(driver.BaseVD, return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) + + def failover_host(self, context, volumes, secondary_id=None): + return self.library.failover_host( + context, volumes, secondary_id=secondary_id) diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py b/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py index a388b9279..98974d023 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py @@ -35,6 +35,7 @@ from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils +from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) @@ -45,6 +46,9 @@ LOG = logging.getLogger(__name__) class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): """NetApp NFS driver for Data ONTAP (7-mode).""" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + def __init__(self, *args, **kwargs): super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(na_opts.netapp_7mode_opts) @@ -79,14 +83,18 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): super(NetApp7modeNfsDriver, self).check_for_setup_error() def _clone_backing_file_for_volume(self, volume_name, clone_name, - volume_id, share=None): - """Clone backing file for Cinder volume.""" + volume_id, share=None, + is_snapshot=False, + source_snapshot=None): + """Clone backing file for Cinder volume. + :param: is_snapshot Not used, present for method signature consistency + """ (_host_ip, export_path) = self._get_export_ip_path(volume_id, share) storage_path = self.zapi_client.get_actual_path_for_export(export_path) target_path = '%s/%s' % (storage_path, clone_name) self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name), - target_path) + target_path, source_snapshot) def _update_volume_stats(self): """Retrieve stats info from vserver.""" @@ -124,6 +132,7 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): pool = dict() pool['pool_name'] = nfs_share pool['QoS_support'] = False + pool['multiattach'] = True pool.update(capacity) thick = not self.configuration.nfs_sparsed_volumes @@ -134,6 +143,7 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): pool['utilization'] = na_utils.round_down(utilization, '0.01') pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function + pool['consistencygroup_support'] = True pools.append(pool) @@ -212,3 +222,25 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): """Set QoS policy on backend from volume type information.""" # 7-mode DOT does not support QoS. return + + def _get_backing_flexvol_names(self, hosts): + """Returns a set of flexvol names.""" + flexvols = set() + for host in hosts: + pool_name = volume_utils.extract_host(host, level='pool') + flexvol_name = pool_name.rsplit('/', 1)[1] + flexvols.add(flexvol_name) + return flexvols + + @utils.trace_method + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Delete files backing each snapshot in the cgsnapshot. + + :return: An implicit update of snapshot models that the manager will + interpret and subsequently set the model state to deleted. + """ + for snapshot in snapshots: + self._delete_file(snapshot['volume_id'], snapshot['name']) + LOG.debug("Snapshot %s deletion successful", snapshot['name']) + + return None, None diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_base.py b/cinder/volume/drivers/netapp/dataontap/nfs_base.py index f626a086c..ac7919e43 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_base.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_base.py @@ -32,6 +32,7 @@ import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging +from oslo_service import loopingcall from oslo_utils import units import six from six.moves import urllib @@ -49,6 +50,7 @@ from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF +HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes @six.add_metaclass(utils.TraceWrapperWithABCMetaclass) @@ -60,6 +62,10 @@ class NetAppNfsDriver(driver.ManageableVD, # do not increment this as it may be used in volume type definitions VERSION = "1.0.0" + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + REQUIRED_FLAGS = ['netapp_login', 'netapp_password', 'netapp_server_hostname'] DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' @@ -76,6 +82,7 @@ class NetAppNfsDriver(driver.ManageableVD, self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values(na_opts.netapp_img_cache_opts) self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts) + self.backend_name = self.host.split('@')[1] def do_setup(self, context): super(NetAppNfsDriver, self).do_setup(context) @@ -86,6 +93,20 @@ class NetAppNfsDriver(driver.ManageableVD, def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" super(NetAppNfsDriver, self).check_for_setup_error() + self._start_periodic_tasks() + + def _start_periodic_tasks(self): + """Start recurring tasks common to all Data ONTAP NFS drivers.""" + + # Start the task that runs other housekeeping tasks, such as deletion + # of previously soft-deleted storage artifacts. + housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall( + self._handle_housekeeping_tasks) + housekeeping_periodic_task.start( + interval=HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0) + + def _handle_housekeeping_tasks(self): + """Handle various cleanup activities.""" def get_pool(self, volume): """Return pool name where volume resides. @@ -213,17 +234,24 @@ class NetAppNfsDriver(driver.ManageableVD, """Creates a snapshot.""" self._clone_backing_file_for_volume(snapshot['volume_name'], snapshot['name'], - snapshot['volume_id']) + snapshot['volume_id'], + is_snapshot=True) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - nfs_mount = self._get_provider_location(snapshot.volume_id) + self._delete_file(snapshot.volume_id, snapshot.name) - if self._volume_not_present(nfs_mount, snapshot.name): - return True + def _delete_file(self, file_id, file_name): + nfs_share = self._get_provider_location(file_id) - self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), - run_as_root=self._execute_as_root) + if self._volume_not_present(nfs_share, file_name): + LOG.debug('File %(file_name)s not found when attempting to delete ' + 'from share %(share)s', + {'file_name': file_name, 'share': nfs_share}) + return + + path = self._get_volume_path(nfs_share, file_name) + self._delete(path) def _get_volume_location(self, volume_id): """Returns NFS mount address as :.""" @@ -232,10 +260,16 @@ class NetAppNfsDriver(driver.ManageableVD, return nfs_server_ip + ':' + export_path def _clone_backing_file_for_volume(self, volume_name, clone_name, - volume_id, share=None): + volume_id, share=None, + is_snapshot=False, + source_snapshot=None): """Clone backing file for Cinder volume.""" raise NotImplementedError() + def _get_backing_flexvol_names(self, hosts): + """Returns a set of flexvol names.""" + raise NotImplementedError() + def _get_provider_location(self, volume_id): """Returns provider location for given volume.""" volume = self.db.volume_get(self._context, volume_id) @@ -982,3 +1016,139 @@ class NetAppNfsDriver(driver.ManageableVD, vol_path = os.path.join(volume['provider_location'], vol_str) LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is " "no longer being managed."), {'cr': vol_path}) + + @utils.trace_method + def create_consistencygroup(self, context, group): + """Driver entry point for creating a consistency group. + + ONTAP does not maintain an actual CG construct. As a result, no + communtication to the backend is necessary for consistency group + creation. + + :return: Hard-coded model update for consistency group model. + """ + model_update = {'status': 'available'} + return model_update + + @utils.trace_method + def delete_consistencygroup(self, context, group, volumes): + """Driver entry point for deleting a consistency group. + + :return: Updated consistency group model and list of volume models + for the volumes that were deleted. + """ + model_update = {'status': 'deleted'} + volumes_model_update = [] + for volume in volumes: + try: + self._delete_file(volume['id'], volume['name']) + volumes_model_update.append( + {'id': volume['id'], 'status': 'deleted'}) + except Exception: + volumes_model_update.append( + {'id': volume['id'], 'status': 'error_deleting'}) + LOG.exception(_LE("Volume %(vol)s in the consistency group " + "could not be deleted."), {'vol': volume}) + return model_update, volumes_model_update + + @utils.trace_method + def update_consistencygroup(self, context, group, add_volumes=None, + remove_volumes=None): + """Driver entry point for updating a consistency group. + + Since no actual CG construct is ever created in ONTAP, it is not + necessary to update any metadata on the backend. Since this is a NO-OP, + there is guaranteed to be no change in any of the volumes' statuses. + """ + return None, None, None + + @utils.trace_method + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a Cinder cgsnapshot object. + + The Cinder cgsnapshot object is created by making use of an ONTAP CG + snapshot in order to provide write-order consistency for a set of + backing flexvols. First, a list of the flexvols backing the given + Cinder volumes in the CG is determined. An ONTAP CG snapshot of the + flexvols creates a write-order consistent snapshot of each backing + flexvol. For each Cinder volume in the CG, it is then necessary to + clone its volume from the ONTAP CG snapshot. The naming convention + used to create the clones indicates the clone's role as a Cinder + snapshot and its inclusion in a Cinder CG snapshot. The ONTAP CG + snapshots, of each backing flexvol, are deleted after the cloning + operation is completed. + + :return: An implicit update for the cgsnapshot and snapshot models that + is then used by the manager to set the models to available. + """ + + hosts = [snapshot['volume']['host'] for snapshot in snapshots] + flexvols = self._get_backing_flexvol_names(hosts) + + # Create snapshot for backing flexvol + self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id']) + + # Start clone process for snapshot files + for snapshot in snapshots: + self._clone_backing_file_for_volume( + snapshot['volume']['name'], snapshot['name'], + snapshot['volume']['id'], source_snapshot=cgsnapshot['id']) + + # Delete backing flexvol snapshots + for flexvol_name in flexvols: + self.zapi_client.wait_for_busy_snapshot( + flexvol_name, cgsnapshot['id']) + self.zapi_client.delete_snapshot(flexvol_name, cgsnapshot['id']) + + return None, None + + @utils.trace_method + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Delete files backing each snapshot in the cgsnapshot.""" + raise NotImplementedError() + + @utils.trace_method + def create_consistencygroup_from_src(self, context, group, volumes, + cgsnapshot=None, snapshots=None, + source_cg=None, source_vols=None): + """Creates a CG from a either a cgsnapshot or group of cinder vols. + + :return: An implicit update for the volumes model that is + interpreted by the manager as a successful operation. + """ + LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes]) + model_update = None + + if cgsnapshot: + vols = zip(volumes, snapshots) + + for volume, snapshot in vols: + self.create_volume_from_snapshot(volume, snapshot) + + elif source_cg and source_vols: + hosts = [source_vol['host'] for source_vol in source_vols] + flexvols = self._get_backing_flexvol_names(hosts) + + # Create snapshot for backing flexvol + snapshot_name = 'snapshot-temp-' + source_cg['id'] + self.zapi_client.create_cg_snapshot(flexvols, snapshot_name) + + # Start clone process for new volumes + vols = zip(volumes, source_vols) + for volume, source_vol in vols: + self._clone_backing_file_for_volume( + source_vol['name'], volume['name'], + source_vol['id'], source_snapshot=snapshot_name) + + # Delete backing flexvol snapshots + for flexvol_name in flexvols: + self.zapi_client.wait_for_busy_snapshot( + flexvol_name, snapshot_name) + self.zapi_client.delete_snapshot(flexvol_name, snapshot_name) + else: + LOG.error(_LE("Unexpected set of parameters received when " + "creating consistency group from source.")) + model_update = {} + model_update['status'] = 'error' + + return model_update, None diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py index 87514ff6e..6e5471e06 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py @@ -34,23 +34,24 @@ from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import interface from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities +from cinder.volume.drivers.netapp.dataontap.utils import data_motion +from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) -QOS_CLEANUP_INTERVAL_SECONDS = 60 SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly @interface.volumedriver @six.add_metaclass(utils.TraceWrapperWithABCMetaclass) -class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): +class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, + data_motion.DataMotionMixin): """NetApp NFS driver for Data ONTAP (Cluster-mode).""" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] @@ -58,34 +59,48 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): def __init__(self, *args, **kwargs): super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(na_opts.netapp_cluster_opts) + self.failed_over_backend_name = kwargs.get('active_backend_id') + self.failed_over = self.failed_over_backend_name is not None + self.replication_enabled = ( + True if self.get_replication_backend_names( + self.configuration) else False) def do_setup(self, context): """Do the customized set up on client for cluster mode.""" super(NetAppCmodeNfsDriver, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) - self.vserver = self.configuration.netapp_vserver - - self.zapi_client = client_cmode.Client( - transport_type=self.configuration.netapp_transport_type, - username=self.configuration.netapp_login, - password=self.configuration.netapp_password, - hostname=self.configuration.netapp_server_hostname, - port=self.configuration.netapp_server_port, - vserver=self.vserver) + # cDOT API client + self.zapi_client = cmode_utils.get_client_for_backend( + self.failed_over_backend_name or self.backend_name) + self.vserver = self.zapi_client.vserver + # Performance monitoring library self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) + + # Storage service catalog self.ssc_library = capabilities.CapabilitiesLibrary( 'nfs', self.vserver, self.zapi_client, self.configuration) + def _update_zapi_client(self, backend_name): + """Set cDOT API client for the specified config backend stanza name.""" + + self.zapi_client = cmode_utils.get_client_for_backend(backend_name) + self.vserver = self.zapi_client.vserver + self.ssc_library._update_for_failover(self.zapi_client, + self._get_flexvol_to_pool_map()) + ssc = self.ssc_library.get_ssc() + self.perf_library._update_for_failover(self.zapi_client, ssc) + + @utils.trace_method def check_for_setup_error(self): """Check that the driver is working and can communicate.""" super(NetAppCmodeNfsDriver, self).check_for_setup_error() self.ssc_library.check_api_permissions() - self._start_periodic_tasks() def _start_periodic_tasks(self): + """Start recurring tasks for NetApp cDOT NFS driver.""" # Note(cknight): Run the task once in the current thread to prevent a # race with the first invocation of _update_volume_stats. @@ -98,12 +113,31 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): interval=SSC_UPDATE_INTERVAL_SECONDS, initial_delay=SSC_UPDATE_INTERVAL_SECONDS) - # Start the task that harvests soft-deleted QoS policy groups. - harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall( - self.zapi_client.remove_unused_qos_policy_groups) - harvest_qos_periodic_task.start( - interval=QOS_CLEANUP_INTERVAL_SECONDS, - initial_delay=QOS_CLEANUP_INTERVAL_SECONDS) + super(NetAppCmodeNfsDriver, self)._start_periodic_tasks() + + def _handle_housekeeping_tasks(self): + """Handle various cleanup activities.""" + super(NetAppCmodeNfsDriver, self)._handle_housekeeping_tasks() + + # Harvest soft-deleted QoS policy groups + self.zapi_client.remove_unused_qos_policy_groups() + + active_backend = self.failed_over_backend_name or self.backend_name + + LOG.debug("Current service state: Replication enabled: %(" + "replication)s. Failed-Over: %(failed)s. Active Backend " + "ID: %(active)s", + { + 'replication': self.replication_enabled, + 'failed': self.failed_over, + 'active': active_backend, + }) + + # Create pool mirrors if whole-backend replication configured + if self.replication_enabled and not self.failed_over: + self.ensure_snapmirrors( + self.configuration, self.backend_name, + self.ssc_library.get_ssc_flexvol_names()) def _do_qos_for_volume(self, volume, extra_specs, cleanup=True): try: @@ -135,11 +169,13 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): target_path) def _clone_backing_file_for_volume(self, volume_name, clone_name, - volume_id, share=None): + volume_id, share=None, + is_snapshot=False, + source_snapshot=None): """Clone backing file for Cinder volume.""" (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share) self.zapi_client.clone_file(exp_volume, volume_name, clone_name, - vserver) + vserver, is_snapshot=is_snapshot) def _get_vserver_and_exp_vol(self, volume_id=None, share=None): """Gets the vserver and export volume for share.""" @@ -165,6 +201,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function()) data['sparse_copy_volume'] = True + data.update(self.get_replication_backend_stats(self.configuration)) self._spawn_clean_cache_job() self.zapi_client.provide_ems(self, netapp_backend, self._app_version) @@ -185,8 +222,13 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): if not ssc: return pools + # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) + # Get up-to-date aggregate capacities just once + aggregates = self.ssc_library.get_ssc_aggregates() + aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates) + for ssc_vol_name, ssc_vol_info in ssc.items(): pool = dict() @@ -196,12 +238,19 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): # Add driver capabilities and config info pool['QoS_support'] = True + pool['consistencygroup_support'] = True + pool['multiattach'] = True # Add up-to-date capacity info nfs_share = ssc_vol_info['pool_name'] capacity = self._get_share_capacity_info(nfs_share) pool.update(capacity) + aggregate_name = ssc_vol_info.get('netapp_aggregate') + aggr_capacity = aggr_capacities.get(aggregate_name, {}) + pool['netapp_aggregate_used_percent'] = aggr_capacity.get( + 'percent-used', 0) + # Add utilization data utilization = self.perf_library.get_node_utilization_for_pool( ssc_vol_name) @@ -320,6 +369,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): return ssc_vol_name return None + @utils.trace_method def delete_volume(self, volume): """Deletes a logical volume.""" self._delete_backing_file_for_volume(volume) @@ -337,9 +387,9 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): """Deletes file on nfs share that backs a cinder volume.""" try: LOG.debug('Deleting backing file for volume %s.', volume['id']) - self._delete_volume_on_filer(volume) + self._delete_file(volume['id'], volume['name']) except Exception: - LOG.exception(_LE('Could not do delete of volume %s on filer, ' + LOG.exception(_LE('Could not delete volume %s on backend, ' 'falling back to exec of "rm" command.'), volume['id']) try: @@ -348,43 +398,35 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): LOG.exception(_LE('Exec of "rm" command on backing file for ' '%s was unsuccessful.'), volume['id']) - def _delete_volume_on_filer(self, volume): - (_vserver, flexvol) = self._get_export_ip_path(volume_id=volume['id']) - path_on_filer = '/vol' + flexvol + '/' + volume['name'] - LOG.debug('Attempting to delete backing file %s for volume %s on ' - 'filer.', path_on_filer, volume['id']) - self.zapi_client.delete_file(path_on_filer) + def _delete_file(self, file_id, file_name): + (_vserver, flexvol) = self._get_export_ip_path(volume_id=file_id) + path_on_backend = '/vol' + flexvol + '/' + file_name + LOG.debug('Attempting to delete file %(path)s for ID %(file_id)s on ' + 'backend.', {'path': path_on_backend, 'file_id': file_id}) + self.zapi_client.delete_file(path_on_backend) @utils.trace_method def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self._delete_backing_file_for_snapshot(snapshot) - @utils.trace_method def _delete_backing_file_for_snapshot(self, snapshot): """Deletes file on nfs share that backs a cinder volume.""" try: LOG.debug('Deleting backing file for snapshot %s.', snapshot['id']) - self._delete_snapshot_on_filer(snapshot) + self._delete_file(snapshot['volume_id'], snapshot['name']) except Exception: - LOG.exception(_LE('Could not do delete of snapshot %s on filer, ' + LOG.exception(_LE('Could not delete snapshot %s on backend, ' 'falling back to exec of "rm" command.'), snapshot['id']) try: + # delete_file_from_share super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot) except Exception: LOG.exception(_LE('Exec of "rm" command on backing file for' ' %s was unsuccessful.'), snapshot['id']) @utils.trace_method - def _delete_snapshot_on_filer(self, snapshot): - (_vserver, flexvol) = self._get_export_ip_path( - volume_id=snapshot['volume_id']) - path_on_filer = '/vol' + flexvol + '/' + snapshot['name'] - LOG.debug('Attempting to delete backing file %s for snapshot %s ' - 'on filer.', path_on_filer, snapshot['id']) - self.zapi_client.delete_file(path_on_filer) - def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" copy_success = False @@ -607,6 +649,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): if os.path.exists(dst_img_local): self._delete_file_at_path(dst_img_local) + @utils.trace_method def unmanage(self, volume): """Removes the specified volume from Cinder management. @@ -627,3 +670,35 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): pass super(NetAppCmodeNfsDriver, self).unmanage(volume) + + def failover_host(self, context, volumes, secondary_id=None): + """Failover a backend to a secondary replication target.""" + + return self._failover_host(volumes, secondary_id=secondary_id) + + def _get_backing_flexvol_names(self, hosts): + """Returns a set of flexvol names.""" + flexvols = set() + ssc = self.ssc_library.get_ssc() + + for host in hosts: + pool_name = volume_utils.extract_host(host, level='pool') + + for flexvol_name, ssc_volume_data in ssc.items(): + if ssc_volume_data['pool_name'] == pool_name: + flexvols.add(flexvol_name) + + return flexvols + + @utils.trace_method + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Delete files backing each snapshot in the cgsnapshot. + + :return: An implicit update of snapshot models that the manager will + interpret and subsequently set the model state to deleted. + """ + for snapshot in snapshots: + self._delete_backing_file_for_snapshot(snapshot) + LOG.debug("Snapshot %s deletion successful", snapshot['name']) + + return None, None diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py index 16a276c07..d086019f0 100644 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py @@ -97,7 +97,7 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): # Update pool utilization map atomically pool_utilization = {} for pool_name, pool_info in ssc_pools.items(): - aggr_name = pool_info.get('aggregate', 'unknown') + aggr_name = pool_info.get('netapp_aggregate', 'unknown') node_name = aggr_node_map.get(aggr_name) if node_name: pool_utilization[pool_name] = node_utilization.get( @@ -113,12 +113,16 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): return self.pool_utilization.get(pool_name, perf_base.DEFAULT_UTILIZATION) + def _update_for_failover(self, zapi_client, ssc_pools): + self.zapi_client = zapi_client + self.update_performance_cache(ssc_pools) + def _get_aggregates_for_pools(self, ssc_pools): """Get the set of aggregates that contain the specified pools.""" aggr_names = set() for pool_name, pool_info in ssc_pools.items(): - aggr_names.add(pool_info.get('aggregate')) + aggr_names.add(pool_info.get('netapp_aggregate')) return aggr_names def _get_nodes_for_aggregates(self, aggr_names): diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py index 9f3f1be2e..8447b8dc9 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py @@ -88,11 +88,25 @@ class CapabilitiesLibrary(object): return copy.deepcopy(self.ssc) + def get_ssc_flexvol_names(self): + """Get the names of the FlexVols in the Storage Service Catalog.""" + ssc = self.get_ssc() + return ssc.keys() + def get_ssc_for_flexvol(self, flexvol_name): """Get map of Storage Service Catalog entries for a single flexvol.""" return copy.deepcopy(self.ssc.get(flexvol_name, {})) + def get_ssc_aggregates(self): + """Get a list of aggregates for all SSC flexvols.""" + + aggregates = set() + for __, flexvol_info in self.ssc.items(): + if 'netapp_aggregate' in flexvol_info: + aggregates.add(flexvol_info['netapp_aggregate']) + return list(aggregates) + def update_ssc(self, flexvol_map): """Periodically runs to update Storage Service Catalog data. @@ -117,13 +131,18 @@ class CapabilitiesLibrary(object): ssc_volume.update(self._get_ssc_mirror_info(flexvol_name)) # Get aggregate info - aggregate_name = ssc_volume.get('aggregate') + aggregate_name = ssc_volume.get('netapp_aggregate') ssc_volume.update(self._get_ssc_aggregate_info(aggregate_name)) ssc[flexvol_name] = ssc_volume self.ssc = ssc + def _update_for_failover(self, zapi_client, flexvol_map): + + self.zapi_client = zapi_client + self.update_ssc(flexvol_map) + def _get_ssc_flexvol_info(self, flexvol_name): """Gather flexvol info and recast into SSC-style volume stats.""" @@ -138,7 +157,7 @@ class CapabilitiesLibrary(object): 'netapp_thin_provisioned': six.text_type(not netapp_thick).lower(), 'thick_provisioning_support': thick, 'thin_provisioning_support': not thick, - 'aggregate': volume_info.get('aggregate'), + 'netapp_aggregate': volume_info.get('aggregate'), } def _get_thick_provisioning_support(self, netapp_thick): @@ -181,14 +200,15 @@ class CapabilitiesLibrary(object): def _get_ssc_aggregate_info(self, aggregate_name): """Gather aggregate info and recast into SSC-style volume stats.""" - disk_type = self.zapi_client.get_aggregate_disk_type(aggregate_name) - aggr_info = self.zapi_client.get_aggregate(aggregate_name) - - raid_type = aggr_info.get('raid-type') + aggregate = self.zapi_client.get_aggregate(aggregate_name) + hybrid = (six.text_type(aggregate.get('is-hybrid')).lower() + if 'is-hybrid' in aggregate else None) + disk_types = self.zapi_client.get_aggregate_disk_types(aggregate_name) return { - 'netapp_disk_type': disk_type, - 'netapp_raid_type': raid_type, + 'netapp_raid_type': aggregate.get('raid-type'), + 'netapp_hybrid_aggregate': hybrid, + 'netapp_disk_type': disk_types, } def get_matching_flexvols_for_extra_specs(self, extra_specs): diff --git a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py new file mode 100644 index 000000000..428d32c5a --- /dev/null +++ b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py @@ -0,0 +1,638 @@ +# Copyright (c) 2016 Alex Meade. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +NetApp Data ONTAP data motion library. + +This library handles transferring data from a source to a destination. Its +responsibility is to handle this as efficiently as possible given the +location of the data's source and destination. This includes cloning, +SnapMirror, and copy-offload as improvements to brute force data transfer. +""" + +from oslo_log import log +from oslo_utils import excutils + +from cinder import exception +from cinder import utils +from cinder.i18n import _, _LE, _LI +from cinder.objects import fields +from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api +from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils +from cinder.volume import utils as volume_utils + +LOG = log.getLogger(__name__) +ENTRY_DOES_NOT_EXIST = "(entry doesn't exist)" +QUIESCE_RETRY_INTERVAL = 5 + + +class DataMotionMixin(object): + + def get_replication_backend_names(self, config): + """Get the backend names for all configured replication targets.""" + + backend_names = [] + + replication_devices = config.safe_get('replication_device') + if replication_devices: + for replication_device in replication_devices: + backend_id = replication_device.get('backend_id') + if backend_id: + backend_names.append(backend_id) + + return backend_names + + def get_replication_backend_stats(self, config): + """Get the driver replication info for merging into volume stats.""" + + backend_names = self.get_replication_backend_names(config) + + if len(backend_names) > 0: + stats = { + 'replication_enabled': True, + 'replication_count': len(backend_names), + 'replication_targets': backend_names, + 'replication_type': 'async', + } + else: + stats = {'replication_enabled': False} + + return stats + + def _get_replication_aggregate_map(self, src_backend_name, + target_backend_name): + """Get the aggregate mapping config between src and destination.""" + + aggregate_map = {} + + config = config_utils.get_backend_configuration(src_backend_name) + + all_replication_aggregate_maps = config.safe_get( + 'netapp_replication_aggregate_map') + if all_replication_aggregate_maps: + for replication_aggregate_map in all_replication_aggregate_maps: + if (replication_aggregate_map.get('backend_id') == + target_backend_name): + replication_aggregate_map.pop('backend_id') + aggregate_map = replication_aggregate_map + break + + return aggregate_map + + def get_snapmirrors(self, src_backend_name, dest_backend_name, + src_flexvol_name=None, dest_flexvol_name=None): + """Get info regarding SnapMirror relationship/s for given params.""" + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + src_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = src_backend_config.netapp_vserver + + snapmirrors = dest_client.get_snapmirrors( + src_vserver, src_flexvol_name, + dest_vserver, dest_flexvol_name, + desired_attributes=[ + 'relationship-status', + 'mirror-state', + 'source-vserver', + 'source-volume', + 'destination-vserver', + 'destination-volume', + 'last-transfer-end-timestamp', + 'lag-time', + ]) + return snapmirrors + + def create_snapmirror(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Set up a SnapMirror relationship b/w two FlexVols (cinder pools) + + 1. Create SnapMirror relationship + 2. Initialize data transfer asynchronously + + If a SnapMirror relationship already exists and is broken off or + quiesced, resume and re-sync the mirror. + """ + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + # 1. Create destination 'dp' FlexVol if it doesn't exist + if not dest_client.flexvol_exists(dest_flexvol_name): + self.create_destination_flexvol(src_backend_name, + dest_backend_name, + src_flexvol_name, + dest_flexvol_name) + + # 2. Check if SnapMirror relationship exists + existing_mirrors = dest_client.get_snapmirrors( + src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) + + msg_payload = { + 'src_vserver': src_vserver, + 'src_volume': src_flexvol_name, + 'dest_vserver': dest_vserver, + 'dest_volume': dest_flexvol_name, + } + + # 3. Create and initialize SnapMirror if it doesn't already exist + if not existing_mirrors: + # TODO(gouthamr): Change the schedule from hourly to a config value + msg = ("Creating a SnapMirror relationship between " + "%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:" + "%(dest_volume)s.") + LOG.debug(msg, msg_payload) + + dest_client.create_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name, + schedule='hourly') + + msg = ("Initializing SnapMirror transfers between " + "%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:" + "%(dest_volume)s.") + LOG.debug(msg, msg_payload) + + # Initialize async transfer of the initial data + dest_client.initialize_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + + # 4. Try to repair SnapMirror if existing + else: + snapmirror = existing_mirrors[0] + if snapmirror.get('mirror-state') != 'snapmirrored': + try: + msg = ("SnapMirror between %(src_vserver)s:%(src_volume)s " + "and %(dest_vserver)s:%(dest_volume)s is in " + "'%(state)s' state. Attempting to repair it.") + msg_payload['state'] = snapmirror.get('mirror-state') + LOG.debug(msg, msg_payload) + dest_client.resume_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + dest_client.resync_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + except netapp_api.NaApiError: + LOG.exception(_LE("Could not re-sync SnapMirror.")) + + def delete_snapmirror(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name, release=True): + """Ensure all information about a SnapMirror relationship is removed. + + 1. Abort SnapMirror + 2. Delete the SnapMirror + 3. Release SnapMirror to cleanup SnapMirror metadata and snapshots + """ + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + # 1. Abort any ongoing transfers + try: + dest_client.abort_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name, + clear_checkpoint=False) + except netapp_api.NaApiError: + # Snapmirror is already deleted + pass + + # 2. Delete SnapMirror Relationship and cleanup destination snapshots + try: + dest_client.delete_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + except netapp_api.NaApiError as e: + with excutils.save_and_reraise_exception() as exc_context: + if (e.code == netapp_api.EOBJECTNOTFOUND or + e.code == netapp_api.ESOURCE_IS_DIFFERENT or + ENTRY_DOES_NOT_EXIST in e.message): + LOG.info(_LI('No SnapMirror relationship to delete.')) + exc_context.reraise = False + + if release: + # If the source is unreachable, do not perform the release + try: + src_client = config_utils.get_client_for_backend( + src_backend_name, vserver_name=src_vserver) + except Exception: + src_client = None + # 3. Cleanup SnapMirror relationship on source + try: + if src_client: + src_client.release_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + except netapp_api.NaApiError as e: + with excutils.save_and_reraise_exception() as exc_context: + if (e.code == netapp_api.EOBJECTNOTFOUND or + e.code == netapp_api.ESOURCE_IS_DIFFERENT or + ENTRY_DOES_NOT_EXIST in e.message): + # Handle the case where the SnapMirror is already + # cleaned up + exc_context.reraise = False + + def update_snapmirror(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Schedule a SnapMirror update on the backend.""" + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + # Update SnapMirror + dest_client.update_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + + def quiesce_then_abort(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Quiesce a SnapMirror and wait with retries before aborting.""" + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + # 1. Attempt to quiesce, then abort + dest_client.quiesce_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + + retries = (source_backend_config.netapp_snapmirror_quiesce_timeout / + QUIESCE_RETRY_INTERVAL) + + @utils.retry(exception.NetAppDriverException, + interval=QUIESCE_RETRY_INTERVAL, + retries=retries, backoff_rate=1) + def wait_for_quiesced(): + snapmirror = dest_client.get_snapmirrors( + src_vserver, src_flexvol_name, dest_vserver, + dest_flexvol_name, + desired_attributes=['relationship-status', 'mirror-state'])[0] + if snapmirror.get('relationship-status') != 'quiesced': + msg = _("SnapMirror relationship is not quiesced.") + raise exception.NetAppDriverException(reason=msg) + + try: + wait_for_quiesced() + except exception.NetAppDriverException: + dest_client.abort_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name, + clear_checkpoint=False) + + def break_snapmirror(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Break SnapMirror relationship. + + 1. Quiesce any ongoing SnapMirror transfers + 2. Wait until SnapMirror finishes transfers and enters quiesced state + 3. Break SnapMirror + 4. Mount the destination volume so it is given a junction path + """ + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + # 1. Attempt to quiesce, then abort + self.quiesce_then_abort(src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name) + + # 2. Break SnapMirror + dest_client.break_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + + # 3. Mount the destination volume and create a junction path + dest_client.mount_flexvol(dest_flexvol_name) + + def resync_snapmirror(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Re-sync (repair / re-establish) SnapMirror relationship.""" + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + dest_client.resync_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + + def resume_snapmirror(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Resume SnapMirror relationship from a quiesced state.""" + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + + dest_client.resume_snapmirror(src_vserver, + src_flexvol_name, + dest_vserver, + dest_flexvol_name) + + def create_destination_flexvol(self, src_backend_name, dest_backend_name, + src_flexvol_name, dest_flexvol_name): + """Create a SnapMirror mirror target FlexVol for a given source.""" + dest_backend_config = config_utils.get_backend_configuration( + dest_backend_name) + dest_vserver = dest_backend_config.netapp_vserver + dest_client = config_utils.get_client_for_backend( + dest_backend_name, vserver_name=dest_vserver) + + source_backend_config = config_utils.get_backend_configuration( + src_backend_name) + src_vserver = source_backend_config.netapp_vserver + src_client = config_utils.get_client_for_backend( + src_backend_name, vserver_name=src_vserver) + + provisioning_options = ( + src_client.get_provisioning_options_from_flexvol( + src_flexvol_name) + ) + + # Remove size and volume_type + size = provisioning_options.pop('size', None) + if not size: + msg = _("Unable to read the size of the source FlexVol (%s) " + "to create a SnapMirror destination.") + raise exception.NetAppDriverException(msg % src_flexvol_name) + provisioning_options.pop('volume_type', None) + + source_aggregate = provisioning_options.pop('aggregate') + aggregate_map = self._get_replication_aggregate_map( + src_backend_name, dest_backend_name) + + if not aggregate_map.get(source_aggregate): + msg = _("Unable to find configuration matching the source " + "aggregate (%s) and the destination aggregate. Option " + "netapp_replication_aggregate_map may be incorrect.") + raise exception.NetAppDriverException( + message=msg % source_aggregate) + + destination_aggregate = aggregate_map[source_aggregate] + + # NOTE(gouthamr): The volume is intentionally created as a Data + # Protection volume; junction-path will be added on breaking + # the mirror. + dest_client.create_flexvol(dest_flexvol_name, + destination_aggregate, + size, + volume_type='dp', + **provisioning_options) + + def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names): + """Ensure all the SnapMirrors needed for whole-backend replication.""" + backend_names = self.get_replication_backend_names(config) + for dest_backend_name in backend_names: + for src_flexvol_name in src_flexvol_names: + + dest_flexvol_name = src_flexvol_name + + self.create_snapmirror(src_backend_name, + dest_backend_name, + src_flexvol_name, + dest_flexvol_name) + + def break_snapmirrors(self, config, src_backend_name, src_flexvol_names, + chosen_target): + """Break all existing SnapMirror relationships for a given back end.""" + failed_to_break = [] + backend_names = self.get_replication_backend_names(config) + for dest_backend_name in backend_names: + for src_flexvol_name in src_flexvol_names: + + dest_flexvol_name = src_flexvol_name + try: + self.break_snapmirror(src_backend_name, + dest_backend_name, + src_flexvol_name, + dest_flexvol_name) + except netapp_api.NaApiError: + msg = _("Unable to break SnapMirror between FlexVol " + "%(src)s and Flexvol %(dest)s. Associated volumes " + "will have their replication state set to error.") + payload = { + 'src': ':'.join([src_backend_name, src_flexvol_name]), + 'dest': ':'.join([dest_backend_name, + dest_flexvol_name]), + } + if dest_backend_name == chosen_target: + failed_to_break.append(src_flexvol_name) + LOG.exception(msg, payload) + + return failed_to_break + + def update_snapmirrors(self, config, src_backend_name, src_flexvol_names): + """Update all existing SnapMirror relationships on a given back end.""" + backend_names = self.get_replication_backend_names(config) + for dest_backend_name in backend_names: + for src_flexvol_name in src_flexvol_names: + + dest_flexvol_name = src_flexvol_name + try: + self.update_snapmirror(src_backend_name, + dest_backend_name, + src_flexvol_name, + dest_flexvol_name) + except netapp_api.NaApiError: + # Ignore any errors since the current source may be + # unreachable + pass + + def _choose_failover_target(self, backend_name, flexvols, + replication_targets): + target_lag_times = [] + + for target in replication_targets: + all_target_mirrors = self.get_snapmirrors( + backend_name, target, None, None) + flexvol_mirrors = self._filter_and_sort_mirrors( + all_target_mirrors, flexvols) + + if not flexvol_mirrors: + msg = ("Ignoring replication target %(target)s because no " + "SnapMirrors were found for any of the flexvols " + "in (%(flexvols)s).") + payload = { + 'flexvols': ', '.join(flexvols), + 'target': target, + } + LOG.debug(msg, payload) + continue + + target_lag_times.append( + { + 'target': target, + 'highest-lag-time': flexvol_mirrors[0]['lag-time'], + } + ) + + # The best target is one with the least 'worst' lag time. + best_target = (sorted(target_lag_times, + key=lambda x: int(x['highest-lag-time']))[0] + if len(target_lag_times) > 0 else {}) + + return best_target.get('target') + + def _filter_and_sort_mirrors(self, mirrors, flexvols): + """Return mirrors reverse-sorted by lag time. + + The 'slowest' mirror determines the best update that occurred on a + given replication target. + """ + filtered_mirrors = list(filter(lambda x: x.get('destination-volume') + in flexvols, mirrors)) + sorted_mirrors = sorted(filtered_mirrors, + key=lambda x: int(x.get('lag-time')), + reverse=True) + + return sorted_mirrors + + def _complete_failover(self, source_backend_name, replication_targets, + flexvols, volumes, failover_target=None): + """Failover a backend to a secondary replication target.""" + volume_updates = [] + + active_backend_name = failover_target or self._choose_failover_target( + source_backend_name, flexvols, replication_targets) + + if active_backend_name is None: + msg = _("No suitable host was found to failover.") + raise exception.NetAppDriverException(msg) + + source_backend_config = config_utils.get_backend_configuration( + source_backend_name) + + # 1. Start an update to try to get a last minute transfer before we + # quiesce and break + self.update_snapmirrors(source_backend_config, source_backend_name, + flexvols) + # 2. Break SnapMirrors + failed_to_break = self.break_snapmirrors(source_backend_config, + source_backend_name, + flexvols, active_backend_name) + + # 3. Update cinder volumes within this host + for volume in volumes: + replication_status = fields.ReplicationStatus.FAILED_OVER + volume_pool = volume_utils.extract_host(volume['host'], + level='pool') + if volume_pool in failed_to_break: + replication_status = 'error' + + volume_update = { + 'volume_id': volume['id'], + 'updates': { + 'replication_status': replication_status, + }, + } + volume_updates.append(volume_update) + + return active_backend_name, volume_updates + + def _failover_host(self, volumes, secondary_id=None): + + if secondary_id == self.backend_name: + msg = _("Cannot failover to the same host as the primary.") + raise exception.InvalidReplicationTarget(reason=msg) + + replication_targets = self.get_replication_backend_names( + self.configuration) + + if not replication_targets: + msg = _("No replication targets configured for backend " + "%s. Cannot failover.") + raise exception.InvalidReplicationTarget(reason=msg % self.host) + elif secondary_id and secondary_id not in replication_targets: + msg = _("%(target)s is not among replication targets configured " + "for back end %(host)s. Cannot failover.") + payload = { + 'target': secondary_id, + 'host': self.host, + } + raise exception.InvalidReplicationTarget(reason=msg % payload) + + flexvols = self.ssc_library.get_ssc_flexvol_names() + + try: + active_backend_name, volume_updates = self._complete_failover( + self.backend_name, replication_targets, flexvols, volumes, + failover_target=secondary_id) + except exception.NetAppDriverException as e: + msg = _("Could not complete failover: %s") % e + raise exception.UnableToFailOver(reason=msg) + + # Update the ZAPI client to the backend we failed over to + self._update_zapi_client(active_backend_name) + + self.failed_over = True + self.failed_over_backend_name = active_backend_name + + return active_backend_name, volume_updates diff --git a/cinder/volume/drivers/netapp/dataontap/utils/utils.py b/cinder/volume/drivers/netapp/dataontap/utils/utils.py new file mode 100644 index 000000000..b79b92c19 --- /dev/null +++ b/cinder/volume/drivers/netapp/dataontap/utils/utils.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utilities for NetApp FAS drivers. + +This module contains common utilities to be used by one or more +NetApp FAS drivers to achieve the desired functionality. +""" + +from oslo_config import cfg +from oslo_log import log + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume import configuration +from cinder.volume import driver +from cinder.volume.drivers.netapp.dataontap.client import client_cmode +from cinder.volume.drivers.netapp import options as na_opts + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + + +def get_backend_configuration(backend_name): + """Get a cDOT configuration object for a specific backend.""" + + config_stanzas = CONF.list_all_sections() + if backend_name not in config_stanzas: + msg = _("Could not find backend stanza %(backend_name)s in " + "configuration. Available stanzas are %(stanzas)s") + params = { + "stanzas": config_stanzas, + "backend_name": backend_name, + } + raise exception.ConfigNotFound(message=msg % params) + + config = configuration.Configuration(driver.volume_opts, + config_group=backend_name) + config.append_config_values(na_opts.netapp_proxy_opts) + config.append_config_values(na_opts.netapp_connection_opts) + config.append_config_values(na_opts.netapp_transport_opts) + config.append_config_values(na_opts.netapp_basicauth_opts) + config.append_config_values(na_opts.netapp_provisioning_opts) + config.append_config_values(na_opts.netapp_cluster_opts) + config.append_config_values(na_opts.netapp_san_opts) + config.append_config_values(na_opts.netapp_replication_opts) + + return config + + +def get_client_for_backend(backend_name, vserver_name=None): + """Get a cDOT API client for a specific backend.""" + + config = get_backend_configuration(backend_name) + client = client_cmode.Client( + transport_type=config.netapp_transport_type, + username=config.netapp_login, + password=config.netapp_password, + hostname=config.netapp_server_hostname, + port=config.netapp_server_port, + vserver=vserver_name or config.netapp_vserver, + trace=utils.TRACE_API) + + return client diff --git a/cinder/volume/drivers/netapp/eseries/fc_driver.py b/cinder/volume/drivers/netapp/eseries/fc_driver.py index e06864b25..ce3542240 100644 --- a/cinder/volume/drivers/netapp/eseries/fc_driver.py +++ b/cinder/volume/drivers/netapp/eseries/fc_driver.py @@ -34,6 +34,10 @@ class NetAppEseriesFibreChannelDriver(driver.BaseVD, DRIVER_NAME = 'NetApp_FibreChannel_ESeries' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + VERSION = library.NetAppESeriesLibrary.VERSION + def __init__(self, *args, **kwargs): super(NetAppEseriesFibreChannelDriver, self).__init__(*args, **kwargs) na_utils.validate_instantiation(**kwargs) diff --git a/cinder/volume/drivers/netapp/eseries/iscsi_driver.py b/cinder/volume/drivers/netapp/eseries/iscsi_driver.py index 5bee8da68..816f791e6 100644 --- a/cinder/volume/drivers/netapp/eseries/iscsi_driver.py +++ b/cinder/volume/drivers/netapp/eseries/iscsi_driver.py @@ -35,6 +35,10 @@ class NetAppEseriesISCSIDriver(driver.BaseVD, DRIVER_NAME = 'NetApp_iSCSI_ESeries' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "NetApp_CI" + VERSION = library.NetAppESeriesLibrary.VERSION + def __init__(self, *args, **kwargs): super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs) na_utils.validate_instantiation(**kwargs) diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py index 0967b1827..4d3564424 100644 --- a/cinder/volume/drivers/netapp/options.py +++ b/cinder/volume/drivers/netapp/options.py @@ -25,6 +25,7 @@ place to ensure re usability and better management of configuration options. """ from oslo_config import cfg +from oslo_config import types NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2 @@ -187,6 +188,30 @@ netapp_san_opts = [ 'is only utilized when the storage protocol is ' 'configured to use iSCSI or FC.')), ] +netapp_replication_opts = [ + cfg.MultiOpt('netapp_replication_aggregate_map', + item_type=types.Dict(), + help="Multi opt of dictionaries to represent the aggregate " + "mapping between source and destination back ends when " + "using whole back end replication. For every " + "source aggregate associated with a cinder pool (NetApp " + "FlexVol), you would need to specify the destination " + "aggregate on the replication target device. A " + "replication target device is configured with the " + "configuration option replication_device. Specify this " + "option as many times as you have replication devices. " + "Each entry takes the standard dict config form: " + "netapp_replication_aggregate_map = " + "backend_id:," + "src_aggr_name1:dest_aggr_name1," + "src_aggr_name2:dest_aggr_name2,..."), + cfg.IntOpt('netapp_snapmirror_quiesce_timeout', + min=0, + default=3600, # One Hour + help='The maximum time in seconds to wait for existing ' + 'SnapMirror transfers to complete before aborting ' + 'during a failover.'), ] + CONF = cfg.CONF CONF.register_opts(netapp_proxy_opts) CONF.register_opts(netapp_connection_opts) @@ -199,3 +224,4 @@ CONF.register_opts(netapp_img_cache_opts) CONF.register_opts(netapp_eseries_opts) CONF.register_opts(netapp_nfs_extra_opts) CONF.register_opts(netapp_san_opts) +CONF.register_opts(netapp_replication_opts) diff --git a/cinder/volume/drivers/nexenta/iscsi.py b/cinder/volume/drivers/nexenta/iscsi.py index 52e5440ea..c29656e32 100644 --- a/cinder/volume/drivers/nexenta/iscsi.py +++ b/cinder/volume/drivers/nexenta/iscsi.py @@ -56,6 +56,9 @@ class NexentaISCSIDriver(driver.ISCSIDriver): VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nexenta_CI" + def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) self.nms = None diff --git a/cinder/volume/drivers/nexenta/jsonrpc.py b/cinder/volume/drivers/nexenta/jsonrpc.py index c5c1bebde..161249a52 100644 --- a/cinder/volume/drivers/nexenta/jsonrpc.py +++ b/cinder/volume/drivers/nexenta/jsonrpc.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import socket - from oslo_log import log as logging from oslo_serialization import jsonutils import requests @@ -23,7 +21,7 @@ from cinder import exception from cinder.utils import retry LOG = logging.getLogger(__name__) -socket.setdefaulttimeout(100) +TIMEOUT = 60 class NexentaJSONProxy(object): @@ -31,7 +29,13 @@ class NexentaJSONProxy(object): retry_exc_tuple = (requests.exceptions.ConnectionError,) def __init__(self, scheme, host, port, path, user, password, auto=False, - obj=None, method=None): + obj=None, method=None, session=None): + if session: + self.session = session + else: + self.session = requests.Session() + self.session.auth = (user, password) + self.session.headers.update({'Content-Type': 'application/json'}) self.scheme = scheme.lower() self.host = host self.port = port @@ -51,7 +55,7 @@ class NexentaJSONProxy(object): obj, method = '%s.%s' % (self.obj, self.method), name return NexentaJSONProxy(self.scheme, self.host, self.port, self.path, self.user, self.password, self.auto, obj, - method) + method, self.session) @property def url(self): @@ -70,15 +74,10 @@ class NexentaJSONProxy(object): 'method': self.method, 'params': args }) - auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1] - headers = { - 'Content-Type': 'application/json', - 'Authorization': 'Basic %s' % auth - } + LOG.debug('Sending JSON data: %s', data) - req = requests.post(self.url, data=data, headers=headers) - response = req.json() - req.close() + r = self.session.post(self.url, data=data, timeout=TIMEOUT) + response = r.json() LOG.debug('Got response: %s', response) if response.get('error') is not None: diff --git a/cinder/volume/drivers/nexenta/nexentaedge/iscsi.py b/cinder/volume/drivers/nexenta/nexentaedge/iscsi.py index 76a52270a..e0b10af24 100644 --- a/cinder/volume/drivers/nexenta/nexentaedge/iscsi.py +++ b/cinder/volume/drivers/nexenta/nexentaedge/iscsi.py @@ -43,6 +43,9 @@ class NexentaEdgeISCSIDriver(driver.ISCSIDriver): VERSION = '1.0.2' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nexenta_Edge_CI" + def __init__(self, *args, **kwargs): super(NexentaEdgeISCSIDriver, self).__init__(*args, **kwargs) if self.configuration: diff --git a/cinder/volume/drivers/nexenta/nexentaedge/nbd.py b/cinder/volume/drivers/nexenta/nexentaedge/nbd.py index 518b4ea57..e699c2195 100644 --- a/cinder/volume/drivers/nexenta/nexentaedge/nbd.py +++ b/cinder/volume/drivers/nexenta/nexentaedge/nbd.py @@ -46,6 +46,9 @@ class NexentaEdgeNBDDriver(driver.VolumeDriver): VERSION = '1.0.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nexenta_Edge_CI" + def __init__(self, vg_obj=None, *args, **kwargs): LOG.debug('NexentaEdgeNBDDriver. Trying to initialize.') super(NexentaEdgeNBDDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/nexenta/nfs.py b/cinder/volume/drivers/nexenta/nfs.py index 71cfbb46f..41e666e67 100644 --- a/cinder/volume/drivers/nexenta/nfs.py +++ b/cinder/volume/drivers/nexenta/nfs.py @@ -32,7 +32,7 @@ from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils from cinder.volume.drivers import nfs -VERSION = '1.3.0' +VERSION = '1.3.1' LOG = logging.getLogger(__name__) @@ -53,6 +53,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 RemoteFsDriver. 1.2.0 - Added migrate and retype methods. 1.3.0 - Extend volume method. + 1.3.1 - Cache capacity info and check shared folders on setup. """ driver_prefix = 'nexenta' @@ -60,6 +61,9 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 VERSION = VERSION VOLUME_FILE_NAME = 'volume' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nexenta_CI" + def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) if self.configuration: @@ -86,6 +90,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 self._nms2volroot = {} self.share2nms = {} self.nfs_versions = {} + self.shares_with_capacities = {} @property def backend_name(self): @@ -121,7 +126,10 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 if not nms.folder.object_exists(folder): raise LookupError(_("Folder %s does not exist in Nexenta " "Store appliance"), folder) - self._share_folder(nms, volume_name, dataset) + if (folder not in nms.netstorsvc.get_shared_folders( + 'svc:/network/nfs/server:default', '')): + self._share_folder(nms, volume_name, dataset) + self._get_capacity_info(nfs_share) def migrate_volume(self, ctxt, volume, host): """Migrate if volume and host are managed by Nexenta appliance. @@ -353,6 +361,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, volume) self._ensure_share_mounted(sub_share, mnt_path) + self._get_capacity_info(nfs_share) except exception.NexentaException: try: nms.folder.destroy('%s/%s' % (vol, folder)) @@ -453,6 +462,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 'already deleted.'), folder) return raise + self._get_capacity_info(nfs_share) origin = props.get('origin') if origin and self._is_clone_snapshot_name(origin): try: @@ -744,6 +754,9 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 'used|available') free = utils.str2size(folder_props['available']) allocated = utils.str2size(folder_props['used']) + self.shares_with_capacities[nfs_share] = { + 'free': utils.str2gib_size(free), + 'total': utils.str2gib_size(free + allocated)} return free + allocated, free, allocated def _get_nms_for_url(self, url): @@ -787,15 +800,12 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 LOG.debug('Updating volume stats') total_space = 0 free_space = 0 - shares_with_capacities = {} - for mounted_share in self._mounted_shares: - total, free, allocated = self._get_capacity_info(mounted_share) - shares_with_capacities[mounted_share] = utils.str2gib_size(total) - if total_space < utils.str2gib_size(total): - total_space = utils.str2gib_size(total) - if free_space < utils.str2gib_size(free): - free_space = utils.str2gib_size(free) - share = mounted_share + share = None + for _share in self._mounted_shares: + if self.shares_with_capacities[_share]['free'] > free_space: + free_space = self.shares_with_capacities[_share]['free'] + total_space = self.shares_with_capacities[_share]['total'] + share = _share location_info = '%(driver)s:%(share)s' % { 'driver': self.__class__.__name__, @@ -808,7 +818,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 'compression': self.volume_compression, 'description': self.volume_description, 'nms_url': nms_url, - 'ns_shares': shares_with_capacities, + 'ns_shares': self.shares_with_capacities, 'driver_version': self.VERSION, 'storage_protocol': 'NFS', 'total_capacity_gb': total_space, diff --git a/cinder/volume/drivers/nexenta/ns5/iscsi.py b/cinder/volume/drivers/nexenta/ns5/iscsi.py index bf26ef4c6..f4a49fa2a 100644 --- a/cinder/volume/drivers/nexenta/ns5/iscsi.py +++ b/cinder/volume/drivers/nexenta/ns5/iscsi.py @@ -40,6 +40,9 @@ class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921 VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nexenta_CI" + def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) self.nef = None diff --git a/cinder/volume/drivers/nexenta/ns5/nfs.py b/cinder/volume/drivers/nexenta/ns5/nfs.py index 5db46a250..1d9d0f71d 100644 --- a/cinder/volume/drivers/nexenta/ns5/nfs.py +++ b/cinder/volume/drivers/nexenta/ns5/nfs.py @@ -44,6 +44,9 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 volume_backend_name = 'NexentaNfsDriver' VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nexenta_CI" + def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) if self.configuration: diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index cdd7a0e9f..573f6b24b 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -76,6 +76,9 @@ class NfsDriver(driver.ExtendVD, remotefs.RemoteFSDriver): volume_backend_name = 'Generic_NFS' VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) @@ -111,17 +114,28 @@ class NfsDriver(driver.ExtendVD, remotefs.RemoteFSDriver): """Any initialization the volume driver does while starting.""" super(NfsDriver, self).do_setup(context) - config = self.configuration.nfs_shares_config - if not config: - msg = (_("There's no NFS config file configured (%s)") % - 'nfs_shares_config') - LOG.warning(msg) - raise exception.NfsException(msg) - if not os.path.exists(config): - msg = (_("NFS config file at %(config)s doesn't exist") % - {'config': config}) - LOG.warning(msg) - raise exception.NfsException(msg) + nas_host = getattr(self.configuration, + 'nas_host', + None) + nas_share_path = getattr(self.configuration, + 'nas_share_path', + None) + + # If both nas_host and nas_share_path are set we are not + # going to use the nfs_shares_config file. So, only check + # for its existence if it is going to be used. + if((not nas_host) or (not nas_share_path)): + config = self.configuration.nfs_shares_config + if not config: + msg = (_("There's no NFS config file configured (%s)") % + 'nfs_shares_config') + LOG.warning(msg) + raise exception.NfsException(msg) + if not os.path.exists(config): + msg = (_("NFS config file at %(config)s doesn't exist") % + {'config': config}) + LOG.warning(msg) + raise exception.NfsException(msg) self.shares = {} # address : options diff --git a/cinder/volume/drivers/nimble.py b/cinder/volume/drivers/nimble.py index 923babcc6..3d1d2d9c9 100644 --- a/cinder/volume/drivers/nimble.py +++ b/cinder/volume/drivers/nimble.py @@ -23,6 +23,7 @@ import math import random import re import six +import ssl import string import sys @@ -40,7 +41,7 @@ from cinder.volume.drivers.san import san from cinder.volume import volume_types -DRIVER_VERSION = '2.0.2' +DRIVER_VERSION = '3.0.0' AES_256_XTS_CIPHER = 2 DEFAULT_CIPHER = 3 EXTRA_SPEC_ENCRYPTION = 'nimble:encryption' @@ -63,6 +64,12 @@ SM_SUBNET_MGMT_PLUS_DATA = 4 LUN_ID = '0' WARN_LEVEL = 0.8 +# Work around for ubuntu_openssl_bug_965371. Python soap client suds +# throws the error ssl-certificate-verify-failed-error, workaround to disable +# ssl check for now +if hasattr(ssl, '_create_unverified_context'): + ssl._create_default_https_context = ssl._create_unverified_context + LOG = logging.getLogger(__name__) nimble_opts = [ @@ -103,10 +110,14 @@ class NimbleISCSIDriver(san.SanISCSIDriver): Added Manage/Unmanage volume support 2.0.1 - Added multi-initiator support through extra-specs 2.0.2 - Fixed supporting extra specs while cloning vols + 3.0.0 - Newton Support for Force Backup """ VERSION = DRIVER_VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Nimble_Storage_CI" + def __init__(self, *args, **kwargs): super(NimbleISCSIDriver, self).__init__(*args, **kwargs) self.APIExecutor = None @@ -215,14 +226,57 @@ class NimbleISCSIDriver(san.SanISCSIDriver): self.configuration.nimble_pool_name, reserve) return self._get_model_info(volume['name']) + def is_volume_backup_clone(self, volume): + """Check if the volume is created through cinder-backup workflow. + + :param volume: reference to volume from delete_volume() + """ + vol_info = self.APIExecutor.get_vol_info(volume.name) + if vol_info['clone'] and vol_info['base-snap'] and vol_info[ + 'parent-vol']: + LOG.debug("Nimble base-snap exists for volume :%s", volume['name']) + volume_name_prefix = volume.name.replace(volume.id, "") + LOG.debug("volume_name_prefix : %s", volume_name_prefix) + snap_info = self.APIExecutor.get_snap_info(vol_info['base-snap'], + vol_info['parent-vol']) + if snap_info['description'] and "backup-vol-" in snap_info[ + 'description']: + parent_vol_id = vol_info['parent-vol' + ].replace(volume_name_prefix, "") + if "backup-vol-" + parent_vol_id in snap_info['description']: + LOG.info(_LI("nimble backup-snapshot exists name: %s"), + snap_info['name']) + return snap_info['name'], snap_info['vol'] + return "", "" + def delete_volume(self, volume): """Delete the specified volume.""" + snap_name, vol_name = self.is_volume_backup_clone(volume) self.APIExecutor.online_vol(volume['name'], False, ignore_list=['SM-enoent']) self.APIExecutor.dissociate_volcoll(volume['name'], ignore_list=['SM-enoent']) self.APIExecutor.delete_vol(volume['name'], ignore_list=['SM-enoent']) + # Nimble backend does not delete the snapshot from the parent volume + # if there is a dependent clone. So the deletes need to be in reverse + # order i.e. + # 1. First delete the clone volume used for backup + # 2. Delete the base snapshot used for clone from the parent volume. + # This is only done for the force backup clone operation as it is + # a temporary operation in which we are certain that the snapshot does + # not need to be preserved after the backup is completed. + + if snap_name and vol_name: + self.APIExecutor.online_snap(vol_name, + False, + snap_name, + ignore_list=['SM-ealready', + 'SM-enoent']) + self.APIExecutor.delete_snap(vol_name, + snap_name, + ignore_list=['SM-enoent']) + def _generate_random_string(self, length): """Generates random_string.""" char_set = string.ascii_lowercase @@ -256,7 +310,7 @@ class NimbleISCSIDriver(san.SanISCSIDriver): snapshot = {'volume_name': src_vref['name'], 'name': snapshot_name, 'volume_size': src_vref['size'], - 'display_name': '', + 'display_name': volume.display_name, 'display_description': ''} self.APIExecutor.snap_vol(snapshot) self._clone_volume_from_snapshot(volume, snapshot) @@ -483,7 +537,7 @@ class NimbleISCSIDriver(san.SanISCSIDriver): properties['target_discovered'] = False # whether discovery was used properties['target_portal'] = iscsi_portal properties['target_iqn'] = iqn - properties['target_lun'] = lun_num + properties['target_lun'] = int(lun_num) properties['volume_id'] = volume['id'] # used by xen currently return { 'driver_volume_type': 'iscsi', @@ -754,6 +808,31 @@ class NimbleAPIExecutor(object): vol_name) return response['vol'] + @_connection_checker + @_response_checker + def _execute_get_snap_info(self, snap_name, vol_name): + LOG.info(_LI('Getting snapshot information for %(vol_name)s ' + '%(snap_name)s'), {'vol_name': vol_name, + 'snap_name': snap_name}) + return self.client.service.getSnapInfo(request={'sid': self.sid, + 'vol': vol_name, + 'name': snap_name}) + + def get_snap_info(self, snap_name, vol_name): + """Get snapshot information. + + :param snap_name: snapshot name + :param vol_name: volume name + :return: response object + """ + + response = self._execute_get_snap_info(snap_name, vol_name) + LOG.info(_LI('Successfully got snapshot information for snapshot ' + '%(snap)s and %(volume)s'), + {'snap': snap_name, + 'volume': vol_name}) + return response['snap'] + @_connection_checker @_response_checker def online_vol(self, vol_name, online_flag, *args, **kwargs): @@ -799,10 +878,12 @@ class NimbleAPIExecutor(object): volume_name = snapshot['volume_name'] snap_name = snapshot['name'] # Set snapshot description - display_list = [getattr(snapshot, 'display_name', ''), + display_list = [getattr(snapshot, 'display_name', snapshot[ + 'display_name']), getattr(snapshot, 'display_description', '')] snap_description = ':'.join(filter(None, display_list)) # Limit to 254 characters + LOG.debug("snap_description %s", snap_description) snap_description = snap_description[:254] LOG.info(_LI('Creating snapshot for volume_name=%(vol)s' ' snap_name=%(name)s snap_description=%(desc)s'), diff --git a/cinder/volume/drivers/prophetstor/dplcommon.py b/cinder/volume/drivers/prophetstor/dplcommon.py index 4c5c9ca5c..042bf9663 100644 --- a/cinder/volume/drivers/prophetstor/dplcommon.py +++ b/cinder/volume/drivers/prophetstor/dplcommon.py @@ -703,6 +703,9 @@ class DPLCOMMONDriver(driver.ConsistencyGroupVD, driver.ExtendVD, """Class of dpl storage adapter.""" VERSION = '2.0.4' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "ProphetStor_CI" + def __init__(self, *args, **kwargs): super(DPLCOMMONDriver, self).__init__(*args, **kwargs) if self.configuration: diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py index 29c663e67..02a847a98 100644 --- a/cinder/volume/drivers/pure.py +++ b/cinder/volume/drivers/pure.py @@ -146,6 +146,9 @@ class PureBaseVolumeDriver(san.SanDriver): SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4', '1.5'] + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Pure_Storage_CI" + def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureBaseVolumeDriver, self).__init__(execute=execute, *args, @@ -410,7 +413,8 @@ class PureBaseVolumeDriver(san.SanDriver): with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ( ERR_MSG_NOT_EXIST in err.text or - ERR_MSG_NO_SUCH_SNAPSHOT in err.text): + ERR_MSG_NO_SUCH_SNAPSHOT in err.text or + ERR_MSG_PENDING_ERADICATION in err.text): # Happens if the snapshot does not exist. ctxt.reraise = False LOG.warning(_LW("Unable to delete snapshot, assuming " @@ -894,7 +898,7 @@ class PureBaseVolumeDriver(san.SanDriver): """ volume_info = self._validate_manage_existing_ref(existing_ref) - size = int(math.ceil(float(volume_info["size"]) / units.Gi)) + size = self._round_bytes_to_gib(volume_info['size']) return size @@ -970,7 +974,7 @@ class PureBaseVolumeDriver(san.SanDriver): self._verify_manage_snap_api_requirements() snap_info = self._validate_manage_existing_ref(existing_ref, is_snap=True) - size = int(math.ceil(float(snap_info["size"]) / units.Gi)) + size = self._round_bytes_to_gib(snap_info['size']) return size def unmanage_snapshot(self, snapshot): @@ -989,6 +993,100 @@ class PureBaseVolumeDriver(san.SanDriver): "new_name": unmanaged_snap_name}) self._rename_volume_object(snap_name, unmanaged_snap_name) + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder. + + Rule out volumes that are attached to a Purity host or that + are already in the list of cinder_volumes. We return references + of the volume names for any others. + """ + array = self._get_current_array() + pure_vols = array.list_volumes() + hosts_with_connections = array.list_hosts(all=True) + + # Put together a map of volumes that are connected to hosts + connected_vols = {} + for host in hosts_with_connections: + vol = host.get('vol') + if vol: + connected_vols[vol] = host['name'] + + # Put together a map of existing cinder volumes on the array + # so we can lookup cinder id's by purity volume names + existing_vols = {} + for cinder_vol in cinder_volumes: + existing_vols[self._get_vol_name(cinder_vol)] = cinder_vol.name_id + + manageable_vols = [] + for pure_vol in pure_vols: + vol_name = pure_vol['name'] + cinder_id = existing_vols.get(vol_name) + is_safe = True + reason_not_safe = None + host = connected_vols.get(vol_name) + + if host: + is_safe = False + reason_not_safe = _('Volume connected to host %s.') % host + + if cinder_id: + is_safe = False + reason_not_safe = _('Volume already managed.') + + manageable_vols.append({ + 'reference': {'name': vol_name}, + 'size': self._round_bytes_to_gib(pure_vol['size']), + 'safe_to_manage': is_safe, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': None, + }) + + return volume_utils.paginate_entries_list( + manageable_vols, marker, limit, offset, sort_keys, sort_dirs) + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder.""" + array = self._get_current_array() + pure_snapshots = array.list_volumes(snap=True) + + # Put together a map of existing cinder snapshots on the array + # so we can lookup cinder id's by purity snapshot names + existing_snapshots = {} + for cinder_snap in cinder_snapshots: + name = self._get_snap_name(cinder_snap) + existing_snapshots[name] = cinder_snap.id + + manageable_snaps = [] + for pure_snap in pure_snapshots: + snap_name = pure_snap['name'] + cinder_id = existing_snapshots.get(snap_name) + is_safe = True + reason_not_safe = None + + if cinder_id: + is_safe = False + reason_not_safe = _("Snapshot already managed.") + + manageable_snaps.append({ + 'reference': {'name': snap_name}, + 'size': self._round_bytes_to_gib(pure_snap['size']), + 'safe_to_manage': is_safe, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': None, + 'source_reference': {'name': pure_snap['source']}, + }) + + return volume_utils.paginate_entries_list( + manageable_snaps, marker, limit, offset, sort_keys, sort_dirs) + + @staticmethod + def _round_bytes_to_gib(size): + return int(math.ceil(float(size) / units.Gi)) + def _get_flasharray(self, san_ip, api_token, rest_version=None, verify_https=None, ssl_cert_path=None): @@ -1697,7 +1795,7 @@ class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver): hosts = array.list_hosts() for host in hosts: for wwn in connector["wwpns"]: - if wwn in str(host["wwn"]).lower(): + if wwn.lower() in str(host["wwn"]).lower(): return host @staticmethod diff --git a/cinder/volume/drivers/quobyte.py b/cinder/volume/drivers/quobyte.py index 62d68b5c4..7d135fb0d 100644 --- a/cinder/volume/drivers/quobyte.py +++ b/cinder/volume/drivers/quobyte.py @@ -59,7 +59,7 @@ CONF.register_opts(volume_opts) @interface.volumedriver -class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver): +class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): """Cinder driver for Quobyte USP. Volumes are stored as files on the mounted Quobyte volume. The hypervisor @@ -86,6 +86,9 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver): volume_backend_name = 'Quobyte' VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Quobyte_CI" + def __init__(self, execute=processutils.execute, *args, **kwargs): super(QuobyteDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index 8ecff13d5..356e2fed1 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -270,6 +270,9 @@ class RBDDriver(driver.TransferVD, driver.ExtendVD, VERSION = '1.2.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + def __init__(self, *args, **kwargs): super(RBDDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(RBD_OPTS) diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py index 31221c07d..0160b0a4d 100644 --- a/cinder/volume/drivers/remotefs.py +++ b/cinder/volume/drivers/remotefs.py @@ -29,6 +29,7 @@ from oslo_utils import units import six from cinder import compute +from cinder import coordination from cinder import db from cinder import exception from cinder.objects import fields @@ -104,6 +105,7 @@ CONF.register_opts(nas_opts) CONF.register_opts(volume_opts) +# TODO(bluex): remove when drivers stop using it def locked_volume_id_operation(f, external=False): """Lock decorator for volume operations. @@ -158,6 +160,7 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD): """Just to override parent behavior.""" pass + @utils.trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. @@ -225,6 +228,7 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD): " mount_point_base.") return None + @utils.trace def create_volume(self, volume): """Creates a volume. @@ -276,6 +280,7 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD): LOG.debug('Available shares %s', self._mounted_shares) + @utils.trace def delete_volume(self, volume): """Deletes a logical volume. @@ -627,7 +632,7 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD): return nas_option -class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): +class RemoteFSSnapDriverBase(RemoteFSDriver, driver.SnapshotVD): """Base class for remotefs drivers implementing qcow2 snapshots. Driver must implement: @@ -640,10 +645,10 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): self._remotefsclient = None self.base = None self._nova = None - super(RemoteFSSnapDriver, self).__init__(*args, **kwargs) + super(RemoteFSSnapDriverBase, self).__init__(*args, **kwargs) def do_setup(self, context): - super(RemoteFSSnapDriver, self).do_setup(context) + super(RemoteFSSnapDriverBase, self).do_setup(context) self._nova = compute.API() @@ -742,6 +747,24 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): return json.loads(self._read_file(info_path)) + def _get_higher_image_path(self, snapshot): + volume = snapshot.volume + info_path = self._local_path_volume_info(volume) + snap_info = self._read_info_file(info_path) + + snapshot_file = snap_info[snapshot.id] + active_file = self.get_active_image_from_info(volume) + active_file_path = os.path.join(self._local_volume_dir(volume), + active_file) + backing_chain = self._get_backing_chain_for_path( + volume, active_file_path) + higher_file = next((os.path.basename(f['filename']) + for f in backing_chain + if f.get('backing-filename', '') == + snapshot_file), + None) + return higher_file + def _get_backing_chain_for_path(self, volume, path): """Returns list of dicts containing backing-chain information. @@ -1013,7 +1036,6 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): # Find what file has this as its backing file active_file = self.get_active_image_from_info(snapshot.volume) - active_file_path = os.path.join(vol_path, active_file) if volume_status == 'in-use': # Online delete @@ -1060,15 +1082,9 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): # exist, not | committed down) | exist, needs | # used here) | | ptr update) | - backing_chain = self._get_backing_chain_for_path( - snapshot.volume, active_file_path) # This file is guaranteed to exist since we aren't operating on # the active file. - higher_file = next((os.path.basename(f['filename']) - for f in backing_chain - if f.get('backing-filename', '') == - snapshot_file), - None) + higher_file = self._get_higher_image_path(snapshot) if higher_file is None: msg = _('No file found with %s as backing file.') %\ snapshot_file @@ -1127,19 +1143,20 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): new qcow2 file :param new_snap_path: filename of new qcow2 file """ - backing_path_full_path = os.path.join( self._local_volume_dir(snapshot.volume), backing_filename) - - command = ['qemu-img', 'create', '-f', 'qcow2', '-o', - 'backing_file=%s' % backing_path_full_path, new_snap_path] - self._execute(*command, run_as_root=self._execute_as_root) - info = self._qemu_img_info(backing_path_full_path, snapshot.volume.name) backing_fmt = info.file_format + command = ['qemu-img', 'create', '-f', 'qcow2', '-o', + 'backing_file=%s,backing_fmt=%s' % + (backing_path_full_path, backing_fmt), + new_snap_path, + "%dG" % snapshot.volume.size] + self._execute(*command, run_as_root=self._execute_as_root) + command = ['qemu-img', 'rebase', '-u', '-b', backing_filename, '-F', backing_fmt, @@ -1440,6 +1457,8 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): self._local_volume_dir(snapshot.volume), file_to_delete) self._execute('rm', '-f', path_to_delete, run_as_root=True) + +class RemoteFSSnapDriver(RemoteFSSnapDriverBase): @locked_volume_id_operation def create_snapshot(self, snapshot): """Apply locking to the create snapshot operation.""" @@ -1459,13 +1478,43 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): @locked_volume_id_operation def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" + return self._create_cloned_volume(volume, src_vref) @locked_volume_id_operation def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" - return self._copy_volume_to_image(context, - volume, - image_service, + return self._copy_volume_to_image(context, volume, image_service, + image_meta) + + +class RemoteFSSnapDriverDistributed(RemoteFSSnapDriverBase): + @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') + def create_snapshot(self, snapshot): + """Apply locking to the create snapshot operation.""" + + return self._create_snapshot(snapshot) + + @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') + def delete_snapshot(self, snapshot): + """Apply locking to the delete snapshot operation.""" + + return self._delete_snapshot(snapshot) + + @coordination.synchronized('{self.driver_prefix}-{volume.id}') + def create_volume_from_snapshot(self, volume, snapshot): + return self._create_volume_from_snapshot(volume, snapshot) + + @coordination.synchronized('{self.driver_prefix}-{volume.id}') + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + + return self._create_cloned_volume(volume, src_vref) + + @coordination.synchronized('{self.driver_prefix}-{volume.id}') + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + + return self._copy_volume_to_image(context, volume, image_service, image_meta) diff --git a/cinder/volume/drivers/scality.py b/cinder/volume/drivers/scality.py index fd25df843..eb1c6b763 100644 --- a/cinder/volume/drivers/scality.py +++ b/cinder/volume/drivers/scality.py @@ -67,6 +67,9 @@ class ScalityDriver(remotefs_drv.RemoteFSSnapDriver): volume_backend_name = 'Scality_SOFS' VERSION = '2.0.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Scality_CI" + def __init__(self, *args, **kwargs): super(ScalityDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py index 312662865..1f3e7e0f9 100644 --- a/cinder/volume/drivers/sheepdog.py +++ b/cinder/volume/drivers/sheepdog.py @@ -313,6 +313,15 @@ class SheepdogClient(object): LOG.error(_LE('Failed to get volume status. %s'), e) return _stdout + def get_vdi_info(self, vdiname): + # Get info of the specified vdi. + try: + (_stdout, _stderr) = self._run_dog('vdi', 'list', vdiname, '-r') + except exception.SheepdogCmdError as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE('Failed to get vdi info. %s'), e) + return _stdout + def update_node_list(self): try: (_stdout, _stderr) = self._run_dog('node', 'list', '-r') @@ -430,6 +439,9 @@ class SheepdogDriver(driver.VolumeDriver): VERSION = "1.0.0" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + def __init__(self, *args, **kwargs): super(SheepdogDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(sheepdog_opts) @@ -463,21 +475,19 @@ class SheepdogDriver(driver.VolumeDriver): image_meta['disk_format']) return False - cloneable = False # check whether volume is stored in sheepdog - try: - # The image location would be like - # "sheepdog://192.168.10.2:7000:Alice" - (ip, port, name) = image_location[len(prefix):].split(":", 2) + # The image location would be like + # "sheepdog://192.168.10.2:7000:Alice" + (ip, port, name) = image_location[len(prefix):].split(":", 2) - self._try_execute('collie', 'vdi', 'list', '--address', ip, - '--port', port, name) - cloneable = True - except processutils.ProcessExecutionError as e: - LOG.debug("Can not find vdi %(image)s: %(err)s", - {'image': name, 'err': e}) - - return cloneable + stdout = self.client.get_vdi_info(name) + # Dog command return 0 and has a null output if the volume not exists + if stdout: + return True + else: + LOG.debug("Can not find vdi %(image)s, is not cloneable", + {'image': name}) + return False def clone_image(self, context, volume, image_location, image_meta, diff --git a/cinder/volume/drivers/smbfs.py b/cinder/volume/drivers/smbfs.py index 37668592a..16728e9e6 100644 --- a/cinder/volume/drivers/smbfs.py +++ b/cinder/volume/drivers/smbfs.py @@ -108,6 +108,9 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver): SHARE_FORMAT_REGEX = r'//.+/.+' VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Cinder_Jenkins" + _MINIMUM_QEMU_IMG_VERSION = '1.7' _DISK_FORMAT_VHD = 'vhd' diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index cbb7fc6fb..796fc2c47 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -1,6 +1,6 @@ # All Rights Reserved. # Copyright 2013 SolidFire Inc -# + # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import inspect import json import math import random @@ -37,6 +38,7 @@ from cinder.i18n import _, _LE, _LW from cinder.image import image_utils from cinder import interface from cinder.objects import fields +from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume.targets import iscsi as iscsi_driver @@ -151,9 +153,15 @@ class SolidFireDriver(san.SanISCSIDriver): 2.0.4 - Implement volume replication 2.0.5 - Try and deal with the stupid retry/clear issues from objects and tflow + 2.0.6 - Add a lock decorator around the clone_image method """ - VERSION = '2.0.2' + VERSION = '2.0.6' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "SolidFire_CI" + + driver_prefix = 'solidfire' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, @@ -746,6 +754,24 @@ class SolidFireDriver(san.SanISCSIDriver): return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] + def locked_image_id_operation(f, external=False): + def lvo_inner1(inst, *args, **kwargs): + lock_tag = inst.driver_prefix + call_args = inspect.getcallargs(f, inst, *args, **kwargs) + + if call_args.get('image_meta'): + image_id = call_args['image_meta']['id'] + else: + err_msg = _('The decorated method must accept image_meta.') + raise exception.VolumeBackendAPIException(data=err_msg) + + @utils.synchronized('%s-%s' % (lock_tag, image_id), + external=external) + def lvo_inner2(): + return f(inst, *args, **kwargs) + return lvo_inner2() + return lvo_inner1 + def _create_image_volume(self, context, image_meta, image_service, image_id): @@ -839,24 +865,22 @@ class SolidFireDriver(san.SanISCSIDriver): params = {'accountID': self.template_account_id} sf_vol = self._get_sf_volume(image_meta['id'], params) - if sf_vol is None: + if not sf_vol: + self._create_image_volume(context, + image_meta, + image_service, + image_meta['id']) return - # Check updated_at field, delete copy and update if needed - if sf_vol['attributes']['image_info']['image_updated_at'] == ( + if sf_vol['attributes']['image_info']['image_updated_at'] != ( image_meta['updated_at'].isoformat()): - return - else: - # Bummer, it's been updated, delete it params = {'accountID': self.template_account_id} params['volumeID'] = sf_vol['volumeID'] self._issue_api_request('DeleteVolume', params) - if not self._create_image_volume(context, - image_meta, - image_service, - image_meta['id']): - msg = _("Failed to create SolidFire Image-Volume") - raise exception.SolidFireAPIException(msg) + self._create_image_volume(context, + image_meta, + image_service, + image_meta['id']) def _get_sfaccounts_for_tenant(self, cinder_project_id): accounts = self._issue_api_request( @@ -1082,6 +1106,7 @@ class SolidFireDriver(san.SanISCSIDriver): for vag in sorted_targets[:limit]: self._remove_vag(vag['volumeAccessGroupID']) + @locked_image_id_operation def clone_image(self, context, volume, image_location, image_meta, image_service): @@ -1116,21 +1141,9 @@ class SolidFireDriver(san.SanISCSIDriver): except exception.SolidFireAPIException: return None, False - try: - (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], - volume) - except exception.VolumeNotFound: - if self._create_image_volume(context, - image_meta, - image_service, - image_meta['id']) is None: - # We failed, dump out - return None, False - - # Ok, should be good to go now, try it again - (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], - volume) - + # Ok, should be good to go now, try it again + (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], + volume) return model, True def _retrieve_qos_setting(self, volume): @@ -1317,7 +1330,7 @@ class SolidFireDriver(san.SanISCSIDriver): for acc in accounts: vols = self._get_volumes_for_account(acc['accountID'], - volume['id']) + volume['name_id']) if vols: sf_vol = vols[0] break diff --git a/cinder/volume/drivers/synology/synology_common.py b/cinder/volume/drivers/synology/synology_common.py index 7f6475ced..3be35cb26 100644 --- a/cinder/volume/drivers/synology/synology_common.py +++ b/cinder/volume/drivers/synology/synology_common.py @@ -15,16 +15,17 @@ import base64 import functools -from hashlib import md5 +import hashlib import json +import math from random import randint import string -import time from Crypto.Cipher import AES from Crypto.Cipher import PKCS1_v1_5 from Crypto.PublicKey import RSA from Crypto import Random +import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils @@ -53,7 +54,7 @@ cinder_opts = [ help='Administrator of Synology storage.'), cfg.StrOpt('password', default='', - help='Password of administator for logging in ' + help='Password of administrator for logging in ' 'Synology storage.', secret=True), cfg.BoolOpt('ssl_verify', @@ -62,7 +63,7 @@ cinder_opts = [ '$driver_use_ssl is True'), cfg.StrOpt('one_time_pass', default=None, - help='One time password of administator for logging in ' + help='One time password of administrator for logging in ' 'Synology storage if OTP is enabled.', secret=True), cfg.StrOpt('device_id', @@ -99,7 +100,7 @@ class AESCipher(object): d = d_i = '' while len(d) < key_length + iv_length: md5_str = d_i + password + salt - d_i = md5(md5_str).digest() + d_i = hashlib.md5(md5_str).digest() d += d_i return d[:key_length], d[key_length:key_length + iv_length] @@ -162,7 +163,7 @@ class Session(object): if one_time_pass and not device_id: self._did = result['data']['did'] else: - raise exception.SynoAuthError(_('Login failed.')) + raise exception.SynoAuthError(reason=_('Login failed.')) def _random_AES_passpharse(self, length): available = ('0123456789' @@ -369,7 +370,8 @@ class APIRequest(object): if ('error' in result and 'code' in result["error"] and result['error']['code'] == 105): - raise exception.SynoAuthError(_('Session might have expired.')) + raise exception.SynoAuthError(reason=_('Session might have ' + 'expired.')) return result @@ -462,8 +464,39 @@ class SynoCommon(object): free_capacity_gb = int(int(info['size_free_byte']) / units.Gi) total_capacity_gb = int(int(info['size_total_byte']) / units.Gi) + other_user_data_gb = int(math.ceil((float(info['size_total_byte']) - + float(info['size_free_byte']) - + float(info['eppool_used_byte'])) / + units.Gi)) - return free_capacity_gb, total_capacity_gb + return free_capacity_gb, total_capacity_gb, other_user_data_gb + + def _get_pool_lun_provisioned_size(self): + pool_name = self.config.pool_name + if not pool_name: + raise exception.InvalidConfigurationValue(option='pool_name', + value=pool_name) + try: + out = self.exec_webapi('SYNO.Core.ISCSI.LUN', + 'list', + 1, + location='/' + pool_name) + + self.check_response(out) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_LE('Failed to _get_pool_lun_provisioned_size.')) + + if not self.check_value_valid(out, ['data', 'luns'], list): + raise exception.MalformedResponse( + cmd='_get_pool_lun_provisioned_size', + reason=_('no data found')) + + size = 0 + for lun in out['data']['luns']: + size += lun['size'] + + return int(math.ceil(float(size) / units.Gi)) def _get_lun_info(self, lun_name, additional=None): if not lun_name: @@ -503,7 +536,7 @@ class SynoCommon(object): LOG.exception(_LE('Failed to _get_lun_uuid. [%s]'), lun_name) if not self.check_value_valid(lun_info, ['uuid'], string_types): - raise exception.MalformedResponse(cmd='_get_lun_info', + raise exception.MalformedResponse(cmd='_get_lun_uuid', reason=_('uuid not found')) return lun_info['uuid'] @@ -719,13 +752,13 @@ class SynoCommon(object): status, locked = self._get_lun_status(volume_name) if not locked: break - time.sleep(2) + eventlet.sleep(2) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to get lun status. [%s]'), volume_name) - LOG.debug(_LE('Lun [%(vol)s], status [%(status)s].'), + LOG.debug('Lun [%(vol)s], status [%(status)s].', {'vol': volume_name, 'status': status}) return status == 'normal' @@ -737,13 +770,13 @@ class SynoCommon(object): status, locked = self._get_snapshot_status(snapshot_uuid) if not locked: break - time.sleep(2) + eventlet.sleep(2) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to get snapshot status. [%s]'), snapshot_uuid) - LOG.debug(_LE('Lun [%(snapshot)s], status [%(status)s].'), + LOG.debug('Lun [%(snapshot)s], status [%(status)s].', {'snapshot': snapshot_uuid, 'status': status}) return status == 'Healthy' @@ -760,7 +793,7 @@ class SynoCommon(object): LUN_NO_SUCH_SNAPSHOT = 18990532 if not self.check_value_valid(out, ['error', 'code'], int): - raise exception.MalformedResponse(cmd='exec_webapi', + raise exception.MalformedResponse(cmd='_check_iscsi_response', reason=_('no error code found')) code = out['error']['code'] @@ -785,7 +818,7 @@ class SynoCommon(object): def _check_ds_pool_status(self): pool_info = self._get_pool_info() if not self.check_value_valid(pool_info, ['readonly'], bool): - raise exception.MalformedResponse(cmd='check_for_setup_error', + raise exception.MalformedResponse(cmd='_check_ds_pool_status', reason=_('no readonly found')) if pool_info['readonly']: @@ -955,9 +988,22 @@ class SynoCommon(object): self._check_ds_ability() def update_volume_stats(self): - """Update volume statistics.""" + """Update volume statistics. - free_capacity_gb, total_capacity_gb = self._get_pool_size() + Three kinds of data are stored on the Synology backend pool: + 1. Thin volumes (LUNs on the pool), + 2. Thick volumes (LUNs on the pool), + 3. Other user data. + + other_user_data_gb is the size of the 3rd one. + lun_provisioned_gb is the summation of all thin/thick volume + provisioned size. + + Only thin type is available for Cinder volumes. + """ + + free_gb, total_gb, other_user_data_gb = self._get_pool_size() + lun_provisioned_gb = self._get_pool_lun_provisioned_size() data = {} data['volume_backend_name'] = self.volume_backend_name @@ -967,10 +1013,14 @@ class SynoCommon(object): data['QoS_support'] = False data['thin_provisioning_support'] = True data['thick_provisioning_support'] = False - data['reserved_percentage'] = 0 + data['reserved_percentage'] = self.config.reserved_percentage - data['free_capacity_gb'] = free_capacity_gb - data['total_capacity_gb'] = total_capacity_gb + data['free_capacity_gb'] = free_gb + data['total_capacity_gb'] = total_gb + data['provisioned_capacity_gb'] = (lun_provisioned_gb + + other_user_data_gb) + data['max_over_subscription_ratio'] = (self.config. + max_over_subscription_ratio) data['iscsi_ip_address'] = self.config.iscsi_ip_address data['pool_name'] = self.config.pool_name @@ -1091,7 +1141,7 @@ class SynoCommon(object): if not self.check_value_valid(resp, ['data', 'snapshot_uuid'], string_types): - raise exception.MalformedResponse(cmd='take_snapshot', + raise exception.MalformedResponse(cmd='create_snapshot', reason=_('uuid not found')) snapshot_uuid = resp['data']['snapshot_uuid'] @@ -1116,7 +1166,7 @@ class SynoCommon(object): 'delete_snapshot', 1, snapshot_uuid=ds_snapshot_uuid, - delete_by='Cinder') + deleted_by='Cinder') self.check_response(out, snapshot_id=snapshot['id']) diff --git a/cinder/volume/drivers/tegile.py b/cinder/volume/drivers/tegile.py index 5b84fc79e..019043e0c 100644 --- a/cinder/volume/drivers/tegile.py +++ b/cinder/volume/drivers/tegile.py @@ -152,6 +152,9 @@ class TegileIntelliFlashVolumeDriver(san.SanDriver): 'san_password', 'tegile_default_pool'] SNAPSHOT_PREFIX = 'Manual-V-' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Tegile_Storage_CI" + _api_executor = None def __init__(self, *args, **kwargs): diff --git a/cinder/volume/drivers/tintri.py b/cinder/volume/drivers/tintri.py index 1dd492c56..8654df184 100644 --- a/cinder/volume/drivers/tintri.py +++ b/cinder/volume/drivers/tintri.py @@ -86,6 +86,9 @@ class TintriDriver(driver.ManageableVD, VENDOR = 'Tintri' VERSION = '2.2.0.1' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Tintri_CI" + REQUIRED_OPTIONS = ['tintri_server_hostname', 'tintri_server_username', 'tintri_server_password'] @@ -735,8 +738,8 @@ class TintriDriver(driver.ManageableVD, try: volume_path = os.path.join(nfs_mount, volume_name) - vol_size = math.ceil(float(utils.get_file_size(volume_path)) / - units.Gi) + vol_size = int(math.ceil(float(utils.get_file_size(volume_path)) / + units.Gi)) except OSError: msg = (_('Failed to get size of volume %s') % existing_ref['source-name']) diff --git a/cinder/volume/drivers/violin/v7000_common.py b/cinder/volume/drivers/violin/v7000_common.py index 49860b6aa..dbf056ad2 100644 --- a/cinder/volume/drivers/violin/v7000_common.py +++ b/cinder/volume/drivers/violin/v7000_common.py @@ -77,17 +77,20 @@ violin_opts = [ help='Global backend request timeout, in seconds.'), cfg.ListOpt('violin_dedup_only_pools', default=[], - help='Storage pools to be used to setup dedup luns only.'), + help='Storage pools to be used to setup dedup luns only.' + '(Comma separated list)'), cfg.ListOpt('violin_dedup_capable_pools', default=[], - help='Storage pools capable of dedup and other luns.'), + help='Storage pools capable of dedup and other luns.' + '(Comma separated list)'), cfg.StrOpt('violin_pool_allocation_method', default='random', choices=['random', 'largest', 'smallest'], help='Method of choosing a storage pool for a lun.'), cfg.ListOpt('violin_iscsi_target_ips', default=[], - help='List of target iSCSI addresses to use.'), + help='Target iSCSI addresses to use.' + '(Comma separated list)'), ] CONF = cfg.CONF @@ -123,7 +126,7 @@ class V7000Common(object): if (self.config.violin_dedup_only_pools == [] and self.config.violin_dedup_capable_pools == []): - LOG.warning(_LW("Storage pools not configured")) + LOG.warning(_LW("Storage pools not configured.")) raise exception.InvalidInput( reason=_('Storage pool configuration is ' 'mandatory for external head')) @@ -1046,8 +1049,14 @@ class V7000Common(object): 'oid': oid, 'snap_id': cinder_snapshot_id}) - ans = self.vmem_mg.snapshot.delete_lun_snapshot( - snapshot_object_id=oid) + try: + ans = self.vmem_mg.snapshot.delete_lun_snapshot( + snapshot_object_id=oid) + except Exception: + msg = (_("Failed to delete snapshot " + "%(snap)s of volume %(vol)s") % + {'snap': cinder_snapshot_id, 'vol': cinder_volume_id}) + raise exception.ViolinBackendErr(msg) if ans['success']: LOG.debug("Delete snapshot %(snap_id)s of %(vol)s: " @@ -1064,11 +1073,7 @@ class V7000Common(object): timer = loopingcall.FixedIntervalLoopingCall(_loop_func) success = timer.start(interval=1).wait() - if not success: - msg = (_("Failed to delete snapshot " - "%(snap)s of volume %(vol)s") % - {'snap': cinder_snapshot_id, 'vol': cinder_volume_id}) - raise exception.ViolinBackendErr(msg) + return success def _validate_lun_type_for_copy(self, lun_type): """Make sure volume type is thick. diff --git a/cinder/volume/drivers/violin/v7000_fcp.py b/cinder/volume/drivers/violin/v7000_fcp.py index 618d112cd..7a8fa3a0f 100644 --- a/cinder/volume/drivers/violin/v7000_fcp.py +++ b/cinder/volume/drivers/violin/v7000_fcp.py @@ -61,6 +61,9 @@ class V7000FCPDriver(driver.FibreChannelDriver): VERSION = '1.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Violin_Memory_CI" + def __init__(self, *args, **kwargs): super(V7000FCPDriver, self).__init__(*args, **kwargs) self.gateway_fc_wwns = [] diff --git a/cinder/volume/drivers/violin/v7000_iscsi.py b/cinder/volume/drivers/violin/v7000_iscsi.py index baf384a1e..57d3f07a0 100644 --- a/cinder/volume/drivers/violin/v7000_iscsi.py +++ b/cinder/volume/drivers/violin/v7000_iscsi.py @@ -54,6 +54,9 @@ class V7000ISCSIDriver(driver.ISCSIDriver): VERSION = '1.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Violin_Memory_CI" + def __init__(self, *args, **kwargs): super(V7000ISCSIDriver, self).__init__(*args, **kwargs) self.stats = {} @@ -79,7 +82,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): # Getting iscsi IPs from the array is incredibly expensive, # so only do it once. if not self.configuration.violin_iscsi_target_ips: - LOG.warning(_LW("iSCSI target ip addresses not configured ")) + LOG.warning(_LW("iSCSI target ip addresses not configured. ")) self.gateway_iscsi_ip_addresses = ( self.common.vmem_mg.utility.get_iscsi_interfaces()) else: @@ -260,7 +263,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): """ v = self.common.vmem_mg - LOG.info(_LI("Unexporting lun %(vol)s host is %(host)s"), + LOG.info(_LI("Unexporting lun %(vol)s host is %(host)s."), {'vol': volume['id'], 'host': connector['host']}) try: @@ -269,7 +272,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): volume['id'], target, True) except exception.ViolinBackendErrNotFound: - LOG.info(_LI("Lun %s already unexported, continuing"), + LOG.info(_LI("Lun %s already unexported, continuing..."), volume['id']) except Exception: @@ -284,7 +287,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): client info has the lun_id. Note: The structure returned for iscsi is different from the - one returned for FC. Therefore this funtion is here instead of + one returned for FC. Therefore this function is here instead of common. Arguments: @@ -310,7 +313,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): client info has the lun_id. Note: The structure returned for iscsi is different from the - one returned for FC. Therefore this funtion is here instead of + one returned for FC. Therefore this function is here instead of common. Arguments: diff --git a/cinder/volume/drivers/vmware/datastore.py b/cinder/volume/drivers/vmware/datastore.py index 24da57a91..eab7c7675 100644 --- a/cinder/volume/drivers/vmware/datastore.py +++ b/cinder/volume/drivers/vmware/datastore.py @@ -294,7 +294,12 @@ class DatastoreSelector(object): return True profile_id = self.get_profile_id(profile_name) - is_compliant = bool(self._filter_by_profile([datastore], profile_id)) + # _filter_by_profile expects a map of datastore references to its + # properties. It only uses the properties to construct a map of + # filtered datastores to its properties. Here we don't care about + # the datastore property, so pass it as None. + is_compliant = bool(self._filter_by_profile({datastore: None}, + profile_id)) LOG.debug("Compliance is %(is_compliant)s for datastore: " "%(datastore)s against profile: %(profile)s.", {'is_compliant': is_compliant, diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index 1496b2918..c9ffbe03f 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -221,6 +221,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): # 1.6.0 - support for manage existing VERSION = '1.6.0' + # ThirdaPartySystems wiki page + CI_WIKI_NAME = "VMware_CI" + # Minimum supported vCenter version. MIN_SUPPORTED_VC_VERSION = dist_version.LooseVersion('5.1') @@ -510,7 +513,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): """ connection_info = {'driver_volume_type': 'vmdk'} - backing = self.volumeops.get_backing(volume['name']) + backing = self.volumeops.get_backing(volume.name) if 'instance' in connector: # The instance exists instance = vim_util.get_moref(connector['instance'], @@ -523,7 +526,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): # Create a backing in case it does not exist under the # host managing the instance. LOG.info(_LI("There is no backing for the volume: %s. " - "Need to create one."), volume['name']) + "Need to create one."), volume.name) backing = self._create_backing(volume, host) else: # Relocate volume is necessary @@ -536,18 +539,19 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): # Create a backing in case it does not exist. It is a bad use # case to boot from an empty volume. LOG.warning(_LW("Trying to boot from an empty volume: %s."), - volume['name']) + volume.name) # Create backing backing = self._create_backing(volume) - # Set volume's moref value and name + # Set volume ID and backing moref value and name. connection_info['data'] = {'volume': backing.value, - 'volume_id': volume['id']} + 'volume_id': volume.id, + 'name': volume.name} LOG.info(_LI("Returning connection_info: %(info)s for volume: " "%(volume)s with connector: %(connector)s."), {'info': connection_info, - 'volume': volume['name'], + 'volume': volume.name, 'connector': connector}) return connection_info @@ -1199,8 +1203,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], - image_version=1, - is_public=image_meta['is_public']) + image_version=1) LOG.info(_LI("Done copying volume %(vol)s to a new image %(img)s"), {'vol': volume['name'], 'img': image_meta['name']}) @@ -1297,8 +1300,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): req[hub.DatastoreSelector.PROFILE_NAME] = new_profile # Select datastore satisfying the requirements. - best_candidate = self.ds_sel.select_datastore(req) - if not best_candidate: + try: + best_candidate = self._select_datastore(req) + except vmdk_exceptions.NoValidDatastoreException: # No candidate datastores; can't retype. LOG.warning(_LW("There are no datastores matching new " "requirements; can't retype volume: %s."), diff --git a/cinder/volume/drivers/vzstorage.py b/cinder/volume/drivers/vzstorage.py index 1216587d4..09509c646 100644 --- a/cinder/volume/drivers/vzstorage.py +++ b/cinder/volume/drivers/vzstorage.py @@ -23,6 +23,7 @@ from os_brick.remotefs import remotefs from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging +from oslo_utils import imageutils from oslo_utils import units from cinder import exception @@ -133,6 +134,9 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): driver_prefix = 'vzstorage' volume_backend_name = 'Virtuozzo_Storage' VERSION = VERSION + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Virtuozzo_Storage_CI" + SHARE_FORMAT_REGEX = r'(?:(\S+):\/)?([a-zA-Z0-9_-]+)(?::(\S+))?' def __init__(self, execute=putils.execute, *args, **kwargs): @@ -155,8 +159,31 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): vzstorage_mount_options=opts) def _qemu_img_info(self, path, volume_name): - return super(VZStorageDriver, self)._qemu_img_info_base( - path, volume_name, self.configuration.vzstorage_mount_point_base) + qemu_img_cache = path + ".qemu_img_info" + if os.path.isdir(path): + # 'parallels' disks stored along with metadata xml as directories + # qemu-img should explore base data file inside + path = os.path.join(path, PLOOP_BASE_DELTA_NAME) + if os.path.isfile(qemu_img_cache): + info_tm = os.stat(qemu_img_cache).st_mtime + snap_tm = os.stat(path).st_mtime + ret = None + if not os.path.isfile(qemu_img_cache) or snap_tm > info_tm: + LOG.debug("Cached qemu-img info %s not present or outdated," + " refresh", qemu_img_cache) + ret = super(VZStorageDriver, self)._qemu_img_info_base( + path, volume_name, + self.configuration.vzstorage_mount_point_base) + # We need only backing_file and file_format + d = {'file_format': ret.file_format, + 'backing_file': ret.backing_file} + with open(qemu_img_cache, "w") as f: + json.dump(d, f) + else: + ret = imageutils.QemuImgInfo() + with open(qemu_img_cache, "r") as f: + ret.__dict__ = json.load(f) + return ret @remotefs_drv.locked_volume_id_operation def initialize_connection(self, volume, connector): @@ -294,9 +321,12 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): self.configuration.vzstorage_default_volume_format) def get_volume_format(self, volume): - info_path = self._local_path_volume_info(volume) - snap_info = self._read_info_file(info_path) - return snap_info['volume_format'] + active_file = self.get_active_image_from_info(volume) + active_file_path = os.path.join(self._local_volume_dir(volume), + active_file) + + img_info = self._qemu_img_info(active_file_path, volume.name) + return img_info.file_format def _create_ploop(self, volume_path, volume_size): os.mkdir(volume_path) @@ -309,7 +339,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): raise def _do_create_volume(self, volume): - """Create a volume on given smbfs_share. + """Create a volume on given vzstorage share. :param volume: volume reference """ @@ -334,10 +364,12 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): self._create_regular_file(volume_path, volume_size) info_path = self._local_path_volume_info(volume) - snap_info = {'volume_format': volume_format, - 'active': 'volume-%s' % volume.id} + snap_info = {'active': os.path.basename(volume_path)} self._write_info_file(info_path, snap_info) + # Query qemu-img info to cache the output + self._qemu_img_info(volume_path, volume.name) + def _delete(self, path): self._execute('rm', '-rf', path, run_as_root=True) @@ -415,6 +447,8 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): self._do_extend_volume(self.local_path(volume), volume.size, volume_format) + # Query qemu-img info to cache the output + self._qemu_img_info(self.local_path(volume), volume.name) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. @@ -450,19 +484,15 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): with PloopDevice(self.local_path(snapshot.volume), snapshot.id, execute=self._execute) as dev: - image_utils.convert_image(dev, volume_path, out_format) + base_file = os.path.join(volume_path, 'root.hds') + image_utils.convert_image(dev, base_file, out_format) else: msg = _("Unsupported volume format %s") % volume_format raise exception.InvalidVolume(msg) - if out_format == DISK_FORMAT_PLOOP: - img_path = os.path.join(volume_path, 'root.hds') - os.rename(volume_path, volume_path + '.tmp') - os.mkdir(volume_path) - os.rename(volume_path + '.tmp', img_path) - self._execute('ploop', 'restore-descriptor', volume_path, img_path) - self._extend_volume(volume, volume_size, out_format) + # Query qemu-img info to cache the output + img_info = self._qemu_img_info(volume_path, volume.name) @remotefs_drv.locked_volume_id_operation def delete_volume(self, volume): @@ -472,7 +502,6 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): 'specified, skipping.') % volume.name) LOG.error(msg) return -# raise exception.VzStorageException(msg) self._ensure_share_mounted(volume.provider_location) volume_dir = self._local_volume_dir(volume) @@ -480,6 +509,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): self.get_active_image_from_info(volume)) if os.path.exists(mounted_path): self._delete(mounted_path) + self._delete(mounted_path + ".qemu_img_info") else: LOG.info(_LI("Skipping deletion of volume %s " "as it does not exist."), mounted_path) @@ -530,13 +560,69 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): else: super(VZStorageDriver, self)._create_snapshot(snapshot) + def _do_create_snapshot(self, snapshot, backing_filename, + new_snap_path): + super(VZStorageDriver, self)._do_create_snapshot(snapshot, + backing_filename, + new_snap_path) + # Cache qemu-img info for created snapshot + self._qemu_img_info(new_snap_path, snapshot.volume.name) + def delete_snapshot(self, snapshot): + info_path = self._local_path_volume_info(snapshot.volume) + snap_info = self._read_info_file(info_path, empty_if_missing=True) + snap_file = os.path.join(self._local_volume_dir(snapshot.volume), + snap_info[snapshot.id]) + active_file = os.path.join(self._local_volume_dir(snapshot.volume), + snap_info['active']) + higher_file = self._get_higher_image_path(snapshot) + if higher_file: + higher_file = os.path.join(self._local_volume_dir(snapshot.volume), + higher_file) + elif active_file != snap_file: + msg = (_("Expected higher file exists for snapshot %s") % + snapshot.id) + raise exception.VzStorageException(msg) + + img_info = self._qemu_img_info(snap_file, snapshot.volume.name) + base_file = os.path.join(self._local_volume_dir(snapshot.volume), + img_info.backing_file) volume_format = self.get_volume_format(snapshot.volume) + online = snapshot.volume.status == 'in-use' + if volume_format == 'parallels': self._delete_snapshot_ploop(snapshot) else: super(VZStorageDriver, self)._delete_snapshot(snapshot) + def _qemu_info_cache(fn): + return fn + ".qemu_img_info" + + def _update_backing_file(info_src, info_dst): + with open(info_src, 'r') as fs, open(info_dst, 'r') as fd: + src = json.load(fs) + dst = json.load(fd) + dst['backing_file'] = src['backing_file'] + with open(info_dst, 'w') as fdw: + json.dump(dst, fdw) + + if volume_format == "qcow2": + if snap_file != active_file: + # mv snap_file.info higher_file.info + _update_backing_file( + _qemu_info_cache(snap_file), + _qemu_info_cache(higher_file)) + self._delete(_qemu_info_cache(snap_file)) + elif online: + # mv base_file.info snap_file.info + _update_backing_file( + _qemu_info_cache(base_file), + _qemu_info_cache(snap_file)) + self._delete(_qemu_info_cache(base_file)) + else: + # rm snap_file.info + self._delete(_qemu_info_cache(snap_file)) + def _copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" @@ -583,8 +669,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): try: volume.provider_location = src_vref.provider_location info_path = self._local_path_volume_info(volume) - snap_info = {'volume_format': DISK_FORMAT_PLOOP, - 'active': 'volume-%s' % volume.id} + snap_info = {'active': 'volume-%s' % volume.id} self._write_info_file(info_path, snap_info) self._copy_volume_from_snapshot(temp_snapshot, volume, @@ -596,10 +681,11 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): return {'provider_location': src_vref.provider_location} @remotefs_drv.locked_volume_id_operation - def create_cloned_volume(self, vol, src_vref): + def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" volume_format = self.get_volume_format(src_vref) if volume_format == 'parallels': - self._create_cloned_volume(vol, src_vref) + self._create_cloned_volume(volume, src_vref) else: - super(VZStorageDriver, self)._create_cloned_volume(vol, src_vref) + super(VZStorageDriver, self)._create_cloned_volume(volume, + src_vref) diff --git a/cinder/volume/drivers/windows/smbfs.py b/cinder/volume/drivers/windows/smbfs.py index 0841f5f2b..d1c7f3023 100644 --- a/cinder/volume/drivers/windows/smbfs.py +++ b/cinder/volume/drivers/windows/smbfs.py @@ -46,6 +46,10 @@ CONF.set_default('smbfs_default_volume_format', 'vhd') @interface.volumedriver class WindowsSmbfsDriver(smbfs.SmbfsDriver): VERSION = VERSION + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Microsoft_iSCSI_CI" + _MINIMUM_QEMU_IMG_VERSION = '1.6' def __init__(self, *args, **kwargs): diff --git a/cinder/volume/drivers/windows/windows.py b/cinder/volume/drivers/windows/windows.py index a6a7229c8..3b2db9949 100644 --- a/cinder/volume/drivers/windows/windows.py +++ b/cinder/volume/drivers/windows/windows.py @@ -50,6 +50,9 @@ class WindowsDriver(driver.ISCSIDriver): VERSION = '1.0.0' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Microsoft_iSCSI_CI" + def __init__(self, *args, **kwargs): super(WindowsDriver, self).__init__(*args, **kwargs) self.configuration = kwargs.get('configuration', None) diff --git a/cinder/volume/drivers/xio.py b/cinder/volume/drivers/xio.py index 1e69a91ac..be46071f7 100644 --- a/cinder/volume/drivers/xio.py +++ b/cinder/volume/drivers/xio.py @@ -62,7 +62,7 @@ def RaiseXIODriverException(): raise exception.XIODriverException() -class XIOISEDriver(object): +class XIOISEDriver(driver.VolumeDriver): VERSION = '1.1.4' @@ -74,6 +74,9 @@ class XIOISEDriver(object): # 1.1.3 Wait for volume/snapshot to be deleted. # 1.1.4 Force target_lun to be int (Bug 1549048) + # ThirdPartySystems wiki page + CI_WIKI_NAME = "X-IO_technologies_CI" + def __init__(self, *args, **kwargs): super(XIOISEDriver, self).__init__() LOG.debug("XIOISEDriver __init__ called.") @@ -1385,6 +1388,7 @@ class XIOISEDriver(object): class XIOISEISCSIDriver(driver.ISCSIDriver): """Requires ISE Running FW version 3.1.0 or higher""" + VERSION = XIOISEDriver.VERSION def __init__(self, *args, **kwargs): super(XIOISEISCSIDriver, self).__init__(*args, **kwargs) @@ -1512,6 +1516,7 @@ class XIOISEISCSIDriver(driver.ISCSIDriver): class XIOISEFCDriver(driver.FibreChannelDriver): """Requires ISE Running FW version 2.8.0 or higher""" + VERSION = XIOISEDriver.VERSION def __init__(self, *args, **kwargs): super(XIOISEFCDriver, self).__init__(*args, **kwargs) diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py index e25884258..5eff0224e 100644 --- a/cinder/volume/drivers/zadara.py +++ b/cinder/volume/drivers/zadara.py @@ -267,6 +267,9 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): VERSION = '15.07' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "ZadaraStorage_VPSA_CI" + def __init__(self, *args, **kwargs): super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(zadara_opts) diff --git a/cinder/volume/drivers/zfssa/zfssaiscsi.py b/cinder/volume/drivers/zfssa/zfssaiscsi.py index 315189e6c..07d4cdbf6 100644 --- a/cinder/volume/drivers/zfssa/zfssaiscsi.py +++ b/cinder/volume/drivers/zfssa/zfssaiscsi.py @@ -123,6 +123,9 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): VERSION = '1.0.2' protocol = 'iSCSI' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Oracle_ZFSSA_CI" + def __init__(self, *args, **kwargs): super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(ZFSSA_OPTS) diff --git a/cinder/volume/drivers/zfssa/zfssanfs.py b/cinder/volume/drivers/zfssa/zfssanfs.py index 04594054f..ca51a034e 100644 --- a/cinder/volume/drivers/zfssa/zfssanfs.py +++ b/cinder/volume/drivers/zfssa/zfssanfs.py @@ -95,6 +95,9 @@ class ZFSSANFSDriver(nfs.NfsDriver): volume_backend_name = 'ZFSSA_NFS' protocol = driver_prefix = driver_volume_type = 'nfs' + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Oracle_ZFSSA_CI" + def __init__(self, *args, **kwargs): super(ZFSSANFSDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(ZFSSA_OPTS) diff --git a/cinder/volume/drivers/zte/zte_ks.py b/cinder/volume/drivers/zte/zte_ks.py index 2c667d15e..abb44ccb9 100644 --- a/cinder/volume/drivers/zte/zte_ks.py +++ b/cinder/volume/drivers/zte/zte_ks.py @@ -85,6 +85,7 @@ CONF.register_opts(zte_opts) @interface.volumedriver class ZTEVolumeDriver(driver.VolumeDriver): + def __init__(self, *args, **kwargs): super(ZTEVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(zte_opts) diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py index bc5f6541e..a13850cef 100644 --- a/cinder/volume/flows/api/create_volume.py +++ b/cinder/volume/flows/api/create_volume.py @@ -19,6 +19,7 @@ import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft +from cinder.common import constants from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LW @@ -47,6 +48,7 @@ SRC_VOL_PROCEED_STATUS = ('available', 'in-use',) REPLICA_PROCEED_STATUS = ('active', 'active-stopped',) CG_PROCEED_STATUS = ('available', 'creating',) CGSNAPSHOT_PROCEED_STATUS = ('available',) +GROUP_PROCEED_STATUS = ('available', 'creating',) class ExtractVolumeRequestTask(flow_utils.CinderTask): @@ -67,7 +69,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): 'source_volid', 'volume_type', 'volume_type_id', 'encryption_key_id', 'source_replicaid', 'consistencygroup_id', 'cgsnapshot_id', - 'qos_specs']) + 'qos_specs', 'group_id']) def __init__(self, image_service, availability_zones, **kwargs): super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION], @@ -115,6 +117,11 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): exception.InvalidConsistencyGroup, 'consistencygroup') + def _extract_group(self, group): + return self._extract_resource(group, (GROUP_PROCEED_STATUS,), + exception.InvalidGroup, + 'group') + def _extract_cgsnapshot(self, cgsnapshot): return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,), exception.InvalidCgSnapshot, @@ -269,7 +276,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): return volume_type def _extract_availability_zone(self, availability_zone, snapshot, - source_volume): + source_volume, group): """Extracts and returns a validated availability zone. This function will extract the availability zone (if not provided) from @@ -278,6 +285,14 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): the validated availability zone. """ + # If the volume will be created in a group, it should be placed in + # in same availability zone as the group. + if group: + try: + availability_zone = group['availability_zone'] + except (TypeError, KeyError): + pass + # Try to extract the availability zone from the corresponding snapshot # or source volume if either is valid so that we can be in the same # availability zone as the source. @@ -354,10 +369,22 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): # Clone the existing key and associate a separate -- but # identical -- key with each volume. if encryption_key_id is not None: - encryption_key_id = key_manager.copy_key(context, - encryption_key_id) + encryption_key_id = key_manager.store( + context, key_manager.get(context, encryption_key_id)) else: - encryption_key_id = key_manager.create_key(context) + volume_type_encryption = ( + volume_types.get_volume_type_encryption(context, + volume_type_id)) + cipher = volume_type_encryption.cipher + length = volume_type_encryption.key_size + + # NOTE(kaitlin-farr): dm-crypt expects the cipher in a + # hyphenated format (aes-xts-plain64). The algorithm needs + # to be parsed out to pass to the key manager (aes). + algorithm = cipher.split('-')[0] if cipher else None + encryption_key_id = key_manager.create_key(context, + algorithm=algorithm, + length=length) return encryption_key_id @@ -377,7 +404,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): def execute(self, context, size, snapshot, image_id, source_volume, availability_zone, volume_type, metadata, key_manager, - source_replica, consistencygroup, cgsnapshot): + source_replica, consistencygroup, cgsnapshot, group): utils.check_exclusive_options(snapshot=snapshot, imageRef=image_id, @@ -392,12 +419,14 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): size = self._extract_size(size, source_volume, snapshot) consistencygroup_id = self._extract_consistencygroup(consistencygroup) cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot) + group_id = self._extract_group(group) self._check_image_metadata(context, image_id, size) availability_zone = self._extract_availability_zone(availability_zone, snapshot, - source_volume) + source_volume, + group) # TODO(joel-coffman): This special handling of snapshots to ensure that # their volume type matches the source volume is too convoluted. We @@ -419,13 +448,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): volume_type_id = self._get_volume_type_id(volume_type, source_volume, snapshot) - if image_id and volume_types.is_encrypted(context, volume_type_id): - msg = _('Create encrypted volumes with type %(type)s ' - 'from image %(image)s is not supported.') - msg = msg % {'type': volume_type_id, - 'image': image_id, } - raise exception.InvalidInput(reason=msg) - encryption_key_id = self._get_encryption_key_id(key_manager, context, volume_type_id, @@ -441,8 +463,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): # to make sure we don't pass empty dict specs = None - utils.check_metadata_properties(metadata) - return { 'size': size, 'snapshot_id': snapshot_id, @@ -455,6 +475,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): 'source_replicaid': source_replicaid, 'consistencygroup_id': consistencygroup_id, 'cgsnapshot_id': cgsnapshot_id, + 'group_id': group_id, } @@ -471,7 +492,8 @@ class EntryCreateTask(flow_utils.CinderTask): 'name', 'reservations', 'size', 'snapshot_id', 'source_volid', 'volume_type_id', 'encryption_key_id', 'source_replicaid', 'consistencygroup_id', - 'cgsnapshot_id', 'multiattach', 'qos_specs'] + 'cgsnapshot_id', 'multiattach', 'qos_specs', + 'group_id', ] super(EntryCreateTask, self).__init__(addons=[ACTION], requires=requires) self.db = db @@ -506,6 +528,16 @@ class EntryCreateTask(flow_utils.CinderTask): volume = objects.Volume(context=context, **volume_properties) volume.create() + # FIXME(dulek): We're passing this volume_properties dict through RPC + # in request_spec. This shouldn't be needed, most data is replicated + # in both volume and other places. We should make Newton read data + # from just one correct place and leave just compatibility code. + # + # Right now - let's move it to versioned objects to be able to make + # non-backward compatible changes. + + volume_properties = objects.VolumeProperties(**volume_properties) + return { 'volume_id': volume['id'], 'volume_properties': volume_properties, @@ -665,7 +697,7 @@ class VolumeCastTask(flow_utils.CinderTask): requires = ['image_id', 'scheduler_hints', 'snapshot_id', 'source_volid', 'volume_id', 'volume', 'volume_type', 'volume_properties', 'source_replicaid', - 'consistencygroup_id', 'cgsnapshot_id', ] + 'consistencygroup_id', 'cgsnapshot_id', 'group_id', ] super(VolumeCastTask, self).__init__(addons=[ACTION], requires=requires) self.volume_rpcapi = volume_rpcapi @@ -682,16 +714,21 @@ class VolumeCastTask(flow_utils.CinderTask): cgroup_id = request_spec['consistencygroup_id'] host = None cgsnapshot_id = request_spec['cgsnapshot_id'] - + group_id = request_spec['group_id'] if cgroup_id: # If cgroup_id existed, we should cast volume to the scheduler # to choose a proper pool whose backend is same as CG's backend. cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) - # FIXME(wanghao): CG_backend got added before request_spec was + request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host) + elif group_id: + # If group_id exists, we should cast volume to the scheduler + # to choose a proper pool whose backend is same as group's backend. + group = objects.Group.get_by_id(context, group_id) + # FIXME(wanghao): group_backend got added before request_spec was # converted to versioned objects. We should make sure that this # will be handled by object version translations once we add # RequestSpec object. - request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host) + request_spec['group_backend'] = vol_utils.extract_host(group.host) elif snapshot_id and CONF.snapshot_same_host: # NOTE(Rongze Zhu): A simple solution for bug 1008866. # @@ -717,7 +754,7 @@ class VolumeCastTask(flow_utils.CinderTask): # to select the target host for this volume. self.scheduler_rpcapi.create_volume( context, - CONF.volume_topic, + constants.VOLUME_TOPIC, volume_id, snapshot_id=snapshot_id, image_id=image_id, @@ -741,21 +778,26 @@ class VolumeCastTask(flow_utils.CinderTask): def execute(self, context, **kwargs): scheduler_hints = kwargs.pop('scheduler_hints', None) - request_spec = kwargs.copy() + db_vt = kwargs.pop('volume_type') + kwargs['volume_type'] = None + if db_vt: + kwargs['volume_type'] = objects.VolumeType() + objects.VolumeType()._from_db_object(context, + kwargs['volume_type'], db_vt) + request_spec = objects.RequestSpec(**kwargs) filter_properties = {} if scheduler_hints: filter_properties['scheduler_hints'] = scheduler_hints self._cast_create_volume(context, request_spec, filter_properties) - def revert(self, context, result, flow_failures, **kwargs): + def revert(self, context, result, flow_failures, volume, **kwargs): if isinstance(result, ft.Failure): return # Restore the source volume status and set the volume to error status. - volume_id = kwargs['volume_id'] common.restore_source_status(context, self.db, kwargs) - common.error_out_volume(context, self.db, volume_id) - LOG.error(_LE("Volume %s: create failed"), volume_id) + common.error_out(volume) + LOG.error(_LE("Volume %s: create failed"), volume.id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info diff --git a/cinder/volume/flows/api/manage_existing.py b/cinder/volume/flows/api/manage_existing.py index 3cb800ae4..50c129680 100644 --- a/cinder/volume/flows/api/manage_existing.py +++ b/cinder/volume/flows/api/manage_existing.py @@ -11,12 +11,12 @@ # under the License. -from oslo_config import cfg from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft +from cinder.common import constants from cinder import exception from cinder import flow_utils from cinder.i18n import _LE @@ -26,7 +26,6 @@ from cinder.volume.flows import common LOG = logging.getLogger(__name__) ACTION = 'volume:manage_existing' -CONF = cfg.CONF class EntryCreateTask(flow_utils.CinderTask): @@ -103,23 +102,21 @@ class ManageCastTask(flow_utils.CinderTask): self.scheduler_rpcapi = scheduler_rpcapi self.db = db - def execute(self, context, **kwargs): - volume = kwargs.pop('volume') + def execute(self, context, volume, **kwargs): request_spec = kwargs.copy() request_spec['volume_id'] = volume.id # Call the scheduler to ensure that the host exists and that it can # accept the volume - self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic, + self.scheduler_rpcapi.manage_existing(context, constants.VOLUME_TOPIC, volume.id, request_spec=request_spec, volume=volume) - def revert(self, context, result, flow_failures, **kwargs): + def revert(self, context, result, flow_failures, volume, **kwargs): # Restore the source volume status and set the volume to error status. - volume_id = kwargs['volume_id'] - common.error_out_volume(context, self.db, volume_id) - LOG.error(_LE("Volume %s: manage failed."), volume_id) + common.error_out(volume) + LOG.error(_LE("Volume %s: manage failed."), volume.id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info diff --git a/cinder/volume/flows/common.py b/cinder/volume/flows/common.py index 3886935a0..4d6f788c7 100644 --- a/cinder/volume/flows/common.py +++ b/cinder/volume/flows/common.py @@ -21,7 +21,7 @@ import six from cinder import exception from cinder.i18n import _LE -from cinder import objects + LOG = logging.getLogger(__name__) @@ -67,7 +67,7 @@ def restore_source_status(context, db, volume_spec): def _clean_reason(reason): if reason is None: - return '???' + return 'Unknown reason' reason = six.text_type(reason) if len(reason) <= REASON_LENGTH: return reason @@ -75,35 +75,19 @@ def _clean_reason(reason): return reason[0:REASON_LENGTH] + '...' -def _update_object(context, db, status, reason, object_type, object_id): - update = { - 'status': status, - } +def error_out(resource, reason=None): + """Sets status to error for any persistent OVO.""" + reason = _clean_reason(reason) try: - LOG.debug('Updating %(object_type)s: %(object_id)s with %(update)s' - ' due to: %(reason)s', {'object_type': object_type, - 'object_id': object_id, - 'reason': reason, - 'update': update}) - if object_type == 'volume': - db.volume_update(context, object_id, update) - elif object_type == 'snapshot': - snapshot = objects.Snapshot.get_by_id(context, object_id) - snapshot.update(update) - snapshot.save() - except exception.CinderException: + LOG.debug('Setting %(object_type)s %(object_id)s to error due to: ' + '%(reason)s', {'object_type': resource.obj_name(), + 'object_id': resource.id, + 'reason': reason}) + resource.status = 'error' + resource.save() + except Exception: # Don't let this cause further exceptions. - LOG.exception(_LE("Failed updating %(object_type)s %(object_id)s with" - " %(update)s"), {'object_type': object_type, - 'object_id': object_id, - 'update': update}) - - -def error_out_volume(context, db, volume_id, reason=None): - reason = _clean_reason(reason) - _update_object(context, db, 'error', reason, 'volume', volume_id) - - -def error_out_snapshot(context, db, snapshot_id, reason=None): - reason = _clean_reason(reason) - _update_object(context, db, 'error', reason, 'snapshot', snapshot_id) + LOG.exception(_LE("Failed setting %(object_type)s %(object_id)s to " + " error status."), + {'object_type': resource.obj_name(), + 'object_id': resource.id}) diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py index d301bab25..8ca86526c 100644 --- a/cinder/volume/flows/manager/create_volume.py +++ b/cinder/volume/flows/manager/create_volume.py @@ -20,6 +20,7 @@ import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft +from cinder.common import constants from cinder import context as cinder_context from cinder import exception from cinder import flow_utils @@ -60,7 +61,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): def __init__(self, reschedule_context, db, scheduler_rpcapi, do_reschedule): - requires = ['filter_properties', 'request_spec', 'volume_ref', + requires = ['filter_properties', 'request_spec', 'volume', 'context'] super(OnFailureRescheduleTask, self).__init__(addons=[ACTION], requires=requires) @@ -92,7 +93,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): def execute(self, **kwargs): pass - def _pre_reschedule(self, context, volume): + def _pre_reschedule(self, volume): """Actions that happen before the rescheduling attempt occur here.""" try: @@ -143,7 +144,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): # Stringify to avoid circular ref problem in json serialization retry_info['exc'] = traceback.format_exception(*cause.exc_info) - return create_volume(context, CONF.volume_topic, volume.id, + return create_volume(context, constants.VOLUME_TOPIC, volume.id, request_spec=request_spec, filter_properties=filter_properties, volume=volume) @@ -153,7 +154,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): LOG.debug("Volume %s: re-scheduled", volume.id) - def revert(self, context, result, flow_failures, volume_ref, **kwargs): + def revert(self, context, result, flow_failures, volume, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will # indicate that. It which will be available in flow engine store @@ -162,16 +163,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: - common.error_out_volume(context, self.db, volume_ref.id) - LOG.error(_LE("Volume %s: create failed"), volume_ref.id) + common.error_out(volume) + LOG.error(_LE("Volume %s: create failed"), volume.id) return False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): - common.error_out_volume(context, self.db, volume_ref.id) - LOG.error(_LE("Volume %s: create failed"), volume_ref.id) + common.error_out(volume) + LOG.error(_LE("Volume %s: create failed"), volume.id) return False # Use a different context when rescheduling. @@ -179,13 +180,13 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): cause = list(flow_failures.values())[0] context = self.reschedule_context try: - self._pre_reschedule(context, volume_ref) - self._reschedule(context, cause, volume=volume_ref, **kwargs) - self._post_reschedule(volume_ref) + self._pre_reschedule(volume) + self._reschedule(context, cause, volume=volume, **kwargs) + self._post_reschedule(volume) return True except exception.CinderException: LOG.exception(_LE("Volume %s: rescheduling failed"), - volume_ref.id) + volume.id) return False @@ -193,7 +194,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): class ExtractVolumeRefTask(flow_utils.CinderTask): """Extracts volume reference for given volume id.""" - default_provides = 'volume_ref' + default_provides = 'refreshed' def __init__(self, db, host, set_error=True): super(ExtractVolumeRefTask, self).__init__(addons=[ACTION]) @@ -201,21 +202,22 @@ class ExtractVolumeRefTask(flow_utils.CinderTask): self.host = host self.set_error = set_error - def execute(self, context, volume_id): + def execute(self, context, volume): # NOTE(harlowja): this will fetch the volume from the database, if # the volume has been deleted before we got here then this should fail. # # In the future we might want to have a lock on the volume_id so that # the volume can not be deleted while its still being created? - return objects.Volume.get_by_id(context, volume_id) + volume.refresh() + return volume - def revert(self, context, volume_id, result, **kwargs): + def revert(self, context, volume, result, **kwargs): if isinstance(result, ft.Failure) or not self.set_error: return reason = _('Volume create failed while extracting volume ref.') - common.error_out_volume(context, self.db, volume_id, reason=reason) - LOG.error(_LE("Volume %s: create failed"), volume_id) + common.error_out(volume, reason) + LOG.error(_LE("Volume %s: create failed"), volume.id) class ExtractVolumeSpecTask(flow_utils.CinderTask): @@ -232,43 +234,43 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask): default_provides = 'volume_spec' def __init__(self, db): - requires = ['volume_ref', 'request_spec'] + requires = ['volume', 'request_spec'] super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION], requires=requires) self.db = db - def execute(self, context, volume_ref, request_spec): + def execute(self, context, volume, request_spec): get_remote_image_service = glance.get_remote_image_service - volume_name = volume_ref['name'] - volume_size = utils.as_int(volume_ref['size'], quiet=False) + volume_name = volume.name + volume_size = utils.as_int(volume.size, quiet=False) # Create a dictionary that will represent the volume to be so that # later tasks can easily switch between the different types and create # the volume according to the volume types specifications (which are # represented in this dictionary). specs = { - 'status': volume_ref['status'], + 'status': volume.status, 'type': 'raw', # This will have the type of the volume to be # created, which should be one of [raw, snap, # source_vol, image] - 'volume_id': volume_ref['id'], + 'volume_id': volume.id, 'volume_name': volume_name, 'volume_size': volume_size, } - if volume_ref.get('snapshot_id'): + if volume.snapshot_id: # We are making a snapshot based volume instead of a raw volume. specs.update({ 'type': 'snap', - 'snapshot_id': volume_ref['snapshot_id'], + 'snapshot_id': volume.snapshot_id, }) - elif volume_ref.get('source_volid'): + elif volume.source_volid: # We are making a source based volume instead of a raw volume. # # NOTE(harlowja): This will likely fail if the source volume # disappeared by the time this call occurred. - source_volid = volume_ref.get('source_volid') + source_volid = volume.source_volid source_volume_ref = objects.Volume.get_by_id(context, source_volid) specs.update({ @@ -276,7 +278,7 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask): # This is captured incase we have to revert and we want to set # back the source volume status to its original status. This # may or may not be sketchy to do?? - 'source_volstatus': source_volume_ref['status'], + 'source_volstatus': source_volume_ref.status, 'type': 'source_vol', }) elif request_spec.get('source_replicaid'): @@ -289,7 +291,7 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask): source_volid) specs.update({ 'source_replicaid': source_volid, - 'source_replicastatus': source_volume_ref['status'], + 'source_replicastatus': source_volume_ref.status, 'type': 'source_replica', }) elif request_spec.get('image_id'): @@ -334,19 +336,18 @@ class NotifyVolumeActionTask(flow_utils.CinderTask): self.db = db self.event_suffix = event_suffix - def execute(self, context, volume_ref): - volume_id = volume_ref['id'] + def execute(self, context, volume): try: - volume_utils.notify_about_volume_usage(context, volume_ref, + volume_utils.notify_about_volume_usage(context, volume, self.event_suffix, - host=volume_ref['host']) + host=volume.host) except exception.CinderException: # If notification sending of volume database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for volumes to operate LOG.exception(_LE("Failed notifying about the volume" " action %(event)s for volume %(volume_id)s"), - {'event': self.event_suffix, 'volume_id': volume_id}) + {'event': self.event_suffix, 'volume_id': volume.id}) class CreateVolumeFromSpecTask(flow_utils.CinderTask): @@ -355,8 +356,6 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): Reversion strategy: N/A """ - default_provides = 'volume' - def __init__(self, manager, db, driver, image_volume_cache=None): super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION]) self.manager = manager @@ -364,7 +363,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): self.driver = driver self.image_volume_cache = image_volume_cache - def _handle_bootable_volume_glance_meta(self, context, volume_id, + def _handle_bootable_volume_glance_meta(self, context, volume, **kwargs): """Enable bootable flag and properly handle glance metadata. @@ -380,7 +379,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): " %(src_id)s metadata") src_type = None src_id = None - self._enable_bootable_flag(context, volume_id) + self._enable_bootable_flag(context, volume) try: if kwargs.get('snapshot_id'): src_type = 'snapshot' @@ -388,31 +387,31 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): snapshot_id = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, - 'vol_id': volume_id}) + 'vol_id': volume.id}) self.db.volume_glance_metadata_copy_to_volume( - context, volume_id, snapshot_id) + context, volume.id, snapshot_id) elif kwargs.get('source_volid'): src_type = 'source volume' src_id = kwargs['source_volid'] source_volid = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, - 'vol_id': volume_id}) + 'vol_id': volume.id}) self.db.volume_glance_metadata_copy_from_volume_to_volume( context, source_volid, - volume_id) + volume.id) elif kwargs.get('source_replicaid'): src_type = 'source replica' src_id = kwargs['source_replicaid'] source_replicaid = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, - 'vol_id': volume_id}) + 'vol_id': volume.id}) self.db.volume_glance_metadata_copy_from_volume_to_volume( context, source_replicaid, - volume_id) + volume.id) elif kwargs.get('image_id'): src_type = 'image' src_id = kwargs['image_id'] @@ -420,8 +419,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): image_meta = kwargs.get('image_meta', {}) LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, - 'vol_id': volume_id}) - self._capture_volume_image_metadata(context, volume_id, + 'vol_id': volume.id}) + self._capture_volume_image_metadata(context, volume.id, image_id, image_meta) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata @@ -431,14 +430,13 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): except exception.CinderException as ex: LOG.exception(exception_template, {'src_type': src_type, 'src_id': src_id, - 'vol_id': volume_id}) + 'vol_id': volume.id}) raise exception.MetadataCopyFailure(reason=ex) - def _create_from_snapshot(self, context, volume_ref, snapshot_id, + def _create_from_snapshot(self, context, volume, snapshot_id, **kwargs): - volume_id = volume_ref['id'] snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot(volume_ref, + model_update = self.driver.create_volume_from_snapshot(volume, snapshot) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures @@ -457,21 +455,22 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): 'snapshot_ref_id': snapshot.volume_id}) raise exception.MetadataUpdateFailure(reason=ex) if make_bootable: - self._handle_bootable_volume_glance_meta(context, volume_id, + self._handle_bootable_volume_glance_meta(context, volume, snapshot_id=snapshot_id) return model_update - def _enable_bootable_flag(self, context, volume_id): + def _enable_bootable_flag(self, context, volume): try: - LOG.debug('Marking volume %s as bootable.', volume_id) - self.db.volume_update(context, volume_id, {'bootable': True}) + LOG.debug('Marking volume %s as bootable.', volume.id) + volume.bootable = True + volume.save() except exception.CinderException as ex: LOG.exception(_LE("Failed updating volume %(volume_id)s bootable " - "flag to true"), {'volume_id': volume_id}) + "flag to true"), {'volume_id': volume.id}) raise exception.MetadataUpdateFailure(reason=ex) - def _create_from_source_volume(self, context, volume_ref, - source_volid, **kwargs): + def _create_from_source_volume(self, context, volume, source_volid, + **kwargs): # NOTE(harlowja): if the source volume has disappeared this will be our # detection of that since this database call should fail. # @@ -479,17 +478,17 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # and we should have proper locks on the source volume while actions # that use the source volume are underway. srcvol_ref = objects.Volume.get_by_id(context, source_volid) - model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref) + model_update = self.driver.create_cloned_volume(volume, srcvol_ref) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). if srcvol_ref.bootable: self._handle_bootable_volume_glance_meta( - context, volume_ref.id, source_volid=srcvol_ref.id) + context, volume, source_volid=srcvol_ref.id) return model_update - def _create_from_source_replica(self, context, volume_ref, - source_replicaid, **kwargs): + def _create_from_source_replica(self, context, volume, source_replicaid, + **kwargs): # NOTE(harlowja): if the source volume has disappeared this will be our # detection of that since this database call should fail. # @@ -497,7 +496,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # and we should have proper locks on the source volume while actions # that use the source volume are underway. srcvol_ref = objects.Volume.get_by_id(context, source_replicaid) - model_update = self.driver.create_replica_test_volume(volume_ref, + model_update = self.driver.create_replica_test_volume(volume, srcvol_ref) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures @@ -505,34 +504,37 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): if srcvol_ref.bootable: self._handle_bootable_volume_glance_meta( context, - volume_ref['id'], + volume, source_replicaid=source_replicaid) return model_update - def _copy_image_to_volume(self, context, volume_ref, + def _copy_image_to_volume(self, context, volume, image_id, image_location, image_service): """Downloads Glance image to the specified volume.""" - copy_image_to_volume = self.driver.copy_image_to_volume - volume_id = volume_ref['id'] LOG.debug("Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.", - {'image_id': image_id, 'volume_id': volume_id, + {'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location}) try: - copy_image_to_volume(context, volume_ref, image_service, image_id) + if volume.encryption_key_id: + self.driver.copy_image_to_encrypted_volume( + context, volume, image_service, image_id) + else: + self.driver.copy_image_to_volume( + context, volume, image_service, image_id) except processutils.ProcessExecutionError as ex: LOG.exception(_LE("Failed to copy image %(image_id)s to volume: " "%(volume_id)s"), - {'volume_id': volume_id, 'image_id': image_id}) + {'volume_id': volume.id, 'image_id': image_id}) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"), - {'volume_id': volume_id}) + {'volume_id': volume.id}) raise exception.ImageUnacceptable(ex) except Exception as ex: LOG.exception(_LE("Failed to copy image %(image_id)s to " "volume: %(volume_id)s"), - {'volume_id': volume_id, 'image_id': image_id}) + {'volume_id': volume.id, 'image_id': image_id}) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: @@ -540,7 +542,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): LOG.debug("Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.", - {'image_id': image_id, 'volume_id': volume_id, + {'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location}) def _capture_volume_image_metadata(self, context, volume_id, @@ -585,7 +587,9 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred """ - if not image_location: + # NOTE (lixiaoy1): currently can't create volume from source vol with + # different encryptions, so just return. + if not image_location or volume.encryption_key_id: return None, False if (image_meta.get('container_format') != 'bare' or @@ -632,7 +636,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): {'id': image_volume['id']}) return None, False - def _create_from_image_download(self, context, volume_ref, image_location, + def _create_from_image_download(self, context, volume, image_location, image_id, image_service): # TODO(harlowja): what needs to be rolled back in the clone if this # volume create fails?? Likely this should be a subflow or broken @@ -640,21 +644,21 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # do we make said subflow/task which is only triggered in the # clone image 'path' resumable and revertable in the correct # manner. - model_update = self.driver.create_volume(volume_ref) - updates = dict(model_update or dict(), status='downloading') + model_update = self.driver.create_volume(volume) or {} + model_update['status'] = 'downloading' try: - volume_ref = self.db.volume_update(context, - volume_ref['id'], updates) + volume.update(model_update) + volume.save() except exception.CinderException: LOG.exception(_LE("Failed updating volume %(volume_id)s with " "%(updates)s"), - {'volume_id': volume_ref['id'], - 'updates': updates}) - self._copy_image_to_volume(context, volume_ref, - image_id, image_location, image_service) + {'volume_id': volume.id, + 'updates': model_update}) + self._copy_image_to_volume(context, volume, image_id, image_location, + image_service) return model_update - def _create_from_image_cache(self, context, internal_context, volume_ref, + def _create_from_image_cache(self, context, internal_context, volume, image_id, image_meta): """Attempt to create the volume using the image cache. @@ -664,10 +668,15 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): """ LOG.debug('Attempting to retrieve cache entry for image = ' '%(image_id)s on host %(host)s.', - {'image_id': image_id, 'host': volume_ref['host']}) + {'image_id': image_id, 'host': volume.host}) + # Currently can't create volume from source vol with different + # encryptions, so just return + if volume.encryption_key_id: + return None, False + try: cache_entry = self.image_volume_cache.get_entry(internal_context, - volume_ref, + volume, image_id, image_meta) if cache_entry: @@ -675,7 +684,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): {'volume_id': cache_entry['volume_id']}) model_update = self._create_from_source_volume( context, - volume_ref, + volume, cache_entry['volume_id'] ) return model_update, True @@ -685,18 +694,18 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): '%(exception)s'), {'exception': e}) return None, False - def _create_from_image(self, context, volume_ref, + def _create_from_image(self, context, volume, image_location, image_id, image_meta, image_service, **kwargs): LOG.debug("Cloning %(volume_id)s from image %(image_id)s " " at location %(image_location)s.", - {'volume_id': volume_ref['id'], + {'volume_id': volume.id, 'image_location': image_location, 'image_id': image_id}) virtual_size = image_meta.get('virtual_size') if virtual_size: virtual_size = image_utils.check_virtual_size(virtual_size, - volume_ref.size, + volume.size, image_id) # Create the volume from an image. @@ -706,16 +715,22 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # NOTE (singn): two params need to be returned # dict containing provider_location for cloned volume # and clone status. - model_update, cloned = self.driver.clone_image(context, - volume_ref, - image_location, - image_meta, - image_service) + # NOTE (lixiaoy1): Currently all images are raw data, we can't + # use clone_image to copy data if new volume is encrypted. + volume_is_encrypted = volume.encryption_key_id is not None + cloned = False + model_update = None + if not volume_is_encrypted: + model_update, cloned = self.driver.clone_image(context, + volume, + image_location, + image_meta, + image_service) # Try and clone the image if we have it set as a glance location. if not cloned and 'cinder' in CONF.allowed_direct_url_schemes: model_update, cloned = self._clone_image_volume(context, - volume_ref, + volume, image_location, image_meta) # Try and use the image cache. @@ -729,16 +744,17 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): model_update, cloned = self._create_from_image_cache( context, internal_context, - volume_ref, + volume, image_id, image_meta ) - if not cloned: + # Don't cache encrypted volume. + if not cloned and not volume_is_encrypted: should_create_cache_entry = True # Fall back to default behavior of creating volume, # download the image data and copy it into the volume. - original_size = volume_ref['size'] + original_size = volume.size try: if not cloned: with image_utils.TemporaryImages.fetch( @@ -748,16 +764,16 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): data = image_utils.qemu_img_info(tmp_image) virtual_size = image_utils.check_virtual_size( - data.virtual_size, volume_ref.size, image_id) + data.virtual_size, volume.size, image_id) if should_create_cache_entry: if virtual_size and virtual_size != original_size: - volume_ref.size = virtual_size - volume_ref.save() + volume.size = virtual_size + volume.save() model_update = self._create_from_image_download( context, - volume_ref, + volume, image_location, image_id, image_service @@ -767,10 +783,10 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # Update the newly created volume db entry before we clone it # for the image-volume creation. if model_update: - volume_ref.update(model_update) - volume_ref.save() + volume.update(model_update) + volume.save() self.manager._create_image_cache_volume_entry(internal_context, - volume_ref, + volume, image_id, image_meta) finally: @@ -778,24 +794,24 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # what was originally requested. If an exception has occurred we # still need to put this back before letting it be raised further # up the stack. - if volume_ref.size != original_size: - self.driver.extend_volume(volume_ref, original_size) - volume_ref.size = original_size - volume_ref.save() + if volume.size != original_size: + self.driver.extend_volume(volume, original_size) + volume.size = original_size + volume.save() - self._handle_bootable_volume_glance_meta(context, volume_ref.id, + self._handle_bootable_volume_glance_meta(context, volume, image_id=image_id, image_meta=image_meta) return model_update - def _create_raw_volume(self, volume_ref, **kwargs): - return self.driver.create_volume(volume_ref) + def _create_raw_volume(self, volume, **kwargs): + return self.driver.create_volume(volume) - def execute(self, context, volume_ref, volume_spec): + def execute(self, context, volume, volume_spec): volume_spec = dict(volume_spec) volume_id = volume_spec.pop('volume_id', None) if not volume_id: - volume_id = volume_ref['id'] + volume_id = volume.id # we can't do anything if the driver didn't init if not self.driver.initialized: @@ -810,21 +826,19 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): {'volume_spec': volume_spec, 'volume_id': volume_id, 'create_type': create_type}) if create_type == 'raw': - model_update = self._create_raw_volume(volume_ref=volume_ref, - **volume_spec) + model_update = self._create_raw_volume(volume, **volume_spec) elif create_type == 'snap': - model_update = self._create_from_snapshot(context, - volume_ref=volume_ref, + model_update = self._create_from_snapshot(context, volume, **volume_spec) elif create_type == 'source_vol': model_update = self._create_from_source_volume( - context, volume_ref=volume_ref, **volume_spec) + context, volume, **volume_spec) elif create_type == 'source_replica': model_update = self._create_from_source_replica( - context, volume_ref=volume_ref, **volume_spec) + context, volume, **volume_spec) elif create_type == 'image': model_update = self._create_from_image(context, - volume_ref=volume_ref, + volume, **volume_spec) else: raise exception.VolumeTypeNotFound(volume_type_id=create_type) @@ -832,8 +846,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # Persist any model information provided on creation. try: if model_update: - volume_ref.update(model_update) - volume_ref.save() + volume.update(model_update) + volume.save() except exception.CinderException: # If somehow the update failed we want to ensure that the # failure is logged (but not try rescheduling since the volume at @@ -843,8 +857,6 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): {'volume_id': volume_id, 'model': model_update}) raise - return volume_ref - class CreateVolumeOnFinishTask(NotifyVolumeActionTask): """On successful volume creation this will perform final volume actions. @@ -891,7 +903,7 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask): 'volume_id': volume.id}) -def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id, +def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume, allow_reschedule, reschedule_context, request_spec, filter_properties, image_volume_cache=None): @@ -920,7 +932,7 @@ def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id, 'context': context, 'filter_properties': filter_properties, 'request_spec': request_spec, - 'volume_id': volume_id, + 'volume': volume, } volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False)) diff --git a/cinder/volume/flows/manager/manage_existing.py b/cinder/volume/flows/manager/manage_existing.py index 03732a06a..8785ddb9e 100644 --- a/cinder/volume/flows/manager/manage_existing.py +++ b/cinder/volume/flows/manager/manage_existing.py @@ -45,10 +45,8 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask): driver_name = self.driver.__class__.__name__ LOG.error(_LE("Unable to manage existing volume. " "Volume driver %s not initialized.") % driver_name) - flow_common.error_out_volume(context, self.db, volume_id, - reason=_("Volume driver %s " - "not initialized.") % - driver_name) + flow_common.error_out(volume_ref, _("Volume driver %s not " + "initialized.") % driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_get_size(volume_ref, @@ -59,7 +57,13 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask): 'volume_properties': volume_ref, 'volume_spec': {'status': volume_ref.status, 'volume_name': volume_ref.name, - 'volume_id': volume_ref.id}} + 'volume_id': volume_id}} + + def revert(self, context, result, flow_failures, volume_ref, **kwargs): + volume_id = volume_ref.id + reason = _('Volume manage failed.') + flow_common.error_out(volume_ref, reason=reason) + LOG.error(_LE("Volume %s: manage failed."), volume_id) class ManageExistingTask(flow_utils.CinderTask): @@ -75,6 +79,7 @@ class ManageExistingTask(flow_utils.CinderTask): def execute(self, context, volume_ref, manage_existing_ref, size): model_update = self.driver.manage_existing(volume_ref, manage_existing_ref) + if not model_update: model_update = {} model_update.update({'size': size}) diff --git a/cinder/volume/flows/manager/manage_existing_snapshot.py b/cinder/volume/flows/manager/manage_existing_snapshot.py index ff1ab991e..8d62a3ed8 100644 --- a/cinder/volume/flows/manager/manage_existing_snapshot.py +++ b/cinder/volume/flows/manager/manage_existing_snapshot.py @@ -60,8 +60,8 @@ class ExtractSnapshotRefTask(flow_utils.CinderTask): if isinstance(result, ft.Failure): return - flow_common.error_out_snapshot(context, self.db, snapshot_id) - LOG.error(_LE("Snapshot %s: create failed"), snapshot_id) + flow_common.error_out(result) + LOG.error(_LE("Snapshot %s: create failed"), result.id) class NotifySnapshotActionTask(flow_utils.CinderTask): @@ -104,16 +104,14 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask): self.driver = driver def execute(self, context, snapshot_ref, manage_existing_ref): - snapshot_id = snapshot_ref['id'] if not self.driver.initialized: driver_name = (self.driver.configuration. safe_get('volume_backend_name')) LOG.error(_LE("Unable to manage existing snapshot. " "Volume driver %s not initialized."), driver_name) - flow_common.error_out_snapshot(context, self.db, snapshot_id, - reason=_("Volume driver %s " - "not initialized.") % - driver_name) + flow_common.error_out(snapshot_ref, reason=_("Volume driver %s " + "not initialized.") % + driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_snapshot_get_size( @@ -270,10 +268,6 @@ class CreateSnapshotOnFinishTask(NotifySnapshotActionTask): Reversion strategy: N/A """ - def __init__(self, db, event_suffix, host): - super(CreateSnapshotOnFinishTask, self).__init__(db, event_suffix, - host) - def execute(self, context, snapshot, new_status): LOG.debug("Begin to call CreateSnapshotOnFinishTask execute.") snapshot_id = snapshot['id'] diff --git a/cinder/volume/group_types.py b/cinder/volume/group_types.py new file mode 100644 index 000000000..1dd19a67c --- /dev/null +++ b/cinder/volume/group_types.py @@ -0,0 +1,180 @@ +# Copyright (c) 2016 EMC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Built-in group type properties.""" + + +from oslo_config import cfg +from oslo_db import exception as db_exc +from oslo_log import log as logging + +from cinder import context +from cinder import db +from cinder import exception +from cinder.i18n import _, _LE + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def create(context, + name, + group_specs=None, + is_public=True, + projects=None, + description=None): + """Creates group types.""" + group_specs = group_specs or {} + projects = projects or [] + elevated = context if context.is_admin else context.elevated() + try: + type_ref = db.group_type_create(elevated, + dict(name=name, + group_specs=group_specs, + is_public=is_public, + description=description), + projects=projects) + except db_exc.DBError: + LOG.exception(_LE('DB error:')) + raise exception.GroupTypeCreateFailed(name=name, + group_specs=group_specs) + return type_ref + + +def update(context, id, name, description, is_public=None): + """Update group type by id.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidGroupType(reason=msg) + elevated = context if context.is_admin else context.elevated() + try: + type_updated = db.group_type_update(elevated, + id, + dict(name=name, + description=description, + is_public=is_public)) + except db_exc.DBError: + LOG.exception(_LE('DB error:')) + raise exception.GroupTypeUpdateFailed(id=id) + return type_updated + + +def destroy(context, id): + """Marks group types as deleted.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidGroupType(reason=msg) + else: + elevated = context if context.is_admin else context.elevated() + db.group_type_destroy(elevated, id) + + +def get_all_group_types(context, inactive=0, filters=None, marker=None, + limit=None, sort_keys=None, sort_dirs=None, + offset=None, list_result=False): + """Get all non-deleted group_types. + + Pass true as argument if you want deleted group types returned also. + + """ + grp_types = db.group_type_get_all(context, inactive, filters=filters, + marker=marker, limit=limit, + sort_keys=sort_keys, + sort_dirs=sort_dirs, offset=offset, + list_result=list_result) + return grp_types + + +def get_group_type(ctxt, id, expected_fields=None): + """Retrieves single group type by id.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidGroupType(reason=msg) + + if ctxt is None: + ctxt = context.get_admin_context() + + return db.group_type_get(ctxt, id, expected_fields=expected_fields) + + +def get_group_type_by_name(context, name): + """Retrieves single group type by name.""" + if name is None: + msg = _("name cannot be None") + raise exception.InvalidGroupType(reason=msg) + + return db.group_type_get_by_name(context, name) + + +def get_default_group_type(): + """Get the default group type.""" + name = CONF.default_group_type + grp_type = {} + + if name is not None: + ctxt = context.get_admin_context() + try: + grp_type = get_group_type_by_name(ctxt, name) + except exception.GroupTypeNotFoundByName: + # Couldn't find group type with the name in default_group_type + # flag, record this issue and move on + LOG.exception(_LE('Default group type is not found. ' + 'Please check default_group_type config.')) + + return grp_type + + +def get_group_type_specs(group_type_id, key=False): + group_type = get_group_type(context.get_admin_context(), + group_type_id) + group_specs = group_type['group_specs'] + if key: + if group_specs.get(key): + return group_specs.get(key) + else: + return False + else: + return group_specs + + +def is_public_group_type(context, group_type_id): + """Return is_public boolean value of group type""" + group_type = db.group_type_get(context, group_type_id) + return group_type['is_public'] + + +def add_group_type_access(context, group_type_id, project_id): + """Add access to group type for project_id.""" + if group_type_id is None: + msg = _("group_type_id cannot be None") + raise exception.InvalidGroupType(reason=msg) + elevated = context if context.is_admin else context.elevated() + if is_public_group_type(elevated, group_type_id): + msg = _("Type access modification is not applicable to public group " + "type.") + raise exception.InvalidGroupType(reason=msg) + return db.group_type_access_add(elevated, group_type_id, project_id) + + +def remove_group_type_access(context, group_type_id, project_id): + """Remove access to group type for project_id.""" + if group_type_id is None: + msg = _("group_type_id cannot be None") + raise exception.InvalidGroupType(reason=msg) + elevated = context if context.is_admin else context.elevated() + if is_public_group_type(elevated, group_type_id): + msg = _("Type access modification is not applicable to public group " + "type.") + raise exception.InvalidGroupType(reason=msg) + return db.group_type_access_remove(elevated, group_type_id, project_id) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 0cb36faee..a46a3c971 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -24,7 +24,6 @@ intact. **Related Flags** -:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`). :volume_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`cinder.volume.manager.Manager`). @@ -54,11 +53,13 @@ profiler = importutils.try_import('osprofiler.profiler') import six from taskflow import exceptions as tfe +from cinder.common import constants from cinder import compute from cinder import context from cinder import coordination from cinder import exception from cinder import flow_utils +from cinder import keymgr as key_manager from cinder.i18n import _, _LE, _LI, _LW from cinder.image import cache as image_cache from cinder.image import glance @@ -83,16 +84,27 @@ LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS CGQUOTAS = quota.CGQUOTAS +GROUP_QUOTAS = quota.GROUP_QUOTAS VALID_REMOVE_VOL_FROM_CG_STATUS = ( 'available', 'in-use', 'error', 'error_deleting') +VALID_REMOVE_VOL_FROM_GROUP_STATUS = ( + 'available', + 'in-use', + 'error', + 'error_deleting') VALID_ADD_VOL_TO_CG_STATUS = ( 'available', 'in-use') +VALID_ADD_VOL_TO_GROUP_STATUS = ( + 'available', + 'in-use') VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,) +VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,) VALID_CREATE_CG_SRC_CG_STATUS = ('available',) +VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',) volume_manager_opts = [ cfg.StrOpt('volume_driver', @@ -135,14 +147,14 @@ MAPPING = { 'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver', 'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver': 'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver', - 'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver': - 'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver', - 'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver': - 'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver', 'cinder.volume.drivers.hds.nfs.HDSNFSDriver': - 'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver', + 'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver', 'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver': - 'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver', + 'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver', + 'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver': + 'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver', + 'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver': + 'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver', 'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver': 'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver', 'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver': @@ -151,13 +163,15 @@ MAPPING = { 'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver', 'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver': 'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver', + 'cinder.volume.drivers.ibm.xiv_ds8k': + 'cinder.volume.drivers.ibm.ibm_storage', } class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - RPC_API_VERSION = '2.2' + RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION target = messaging.Target(version=RPC_API_VERSION) @@ -168,7 +182,7 @@ class VolumeManager(manager.SchedulerDependentManager): _VOLUME_CLONE_SKIP_PROPERTIES = { 'id', '_name_id', 'name_id', 'name', 'status', 'attach_status', 'migration_status', 'volume_type', - 'consistencygroup', 'volume_attachment'} + 'consistencygroup', 'volume_attachment', 'group'} def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): @@ -201,7 +215,7 @@ class VolumeManager(manager.SchedulerDependentManager): service = objects.Service.get_by_args( context.get_admin_context(), svc_host, - 'cinder-volume') + constants.VOLUME_BINARY) except exception.ServiceNotFound: # NOTE(jdg): This is to solve problems with unit tests LOG.info(_LI("Service not found for updating " @@ -217,6 +231,7 @@ class VolumeManager(manager.SchedulerDependentManager): requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecurePlatformWarning) + self.key_manager = key_manager.API(CONF) self.driver = importutils.import_object( volume_driver, configuration=self.configuration, @@ -355,9 +370,40 @@ class VolumeManager(manager.SchedulerDependentManager): update['id'], {'provider_id': update['provider_id']}) - def init_host(self): + def _include_resources_in_cluster(self, ctxt): + + LOG.info(_LI('Including all resources from host %(host)s in cluster ' + '%(cluster)s.'), + {'host': self.host, 'cluster': self.cluster}) + num_vols = objects.VolumeList.include_in_cluster( + ctxt, self.cluster, host=self.host) + num_cgs = objects.ConsistencyGroupList.include_in_cluster( + ctxt, self.cluster, host=self.host) + LOG.info(_LI('%(num_vols)s volumes and %(num_cgs)s consistency groups ' + 'from host %(host)s have been included in cluster ' + '%(cluster)s.'), + {'num_vols': num_vols, 'num_cgs': num_cgs, + 'host': self.host, 'cluster': self.cluster}) + + def init_host(self, added_to_cluster=None): """Perform any required initialization.""" ctxt = context.get_admin_context() + if not self.driver.supported: + utils.log_unsupported_driver_warning(self.driver) + + if not self.configuration.enable_unsupported_driver: + LOG.error(_LE("Unsupported drivers are disabled." + " You can re-enable by adding " + "enable_unsupported_driver=True to the " + "driver section in cinder.conf"), + resource={'type': 'driver', + 'id': self.__class__.__name__}) + return + + # If we have just added this host to a cluster we have to include all + # our resources in that cluster. + if added_to_cluster: + self._include_resources_in_cluster(ctxt) LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"), {'driver_name': self.driver.__class__.__name__, @@ -463,13 +509,24 @@ class VolumeManager(manager.SchedulerDependentManager): {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) + try: + # Make sure the driver is initialized first + utils.log_unsupported_driver_warning(self.driver) + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + LOG.error(_LE("Cannot complete RPC initialization because " + "driver isn't initialized properly."), + resource={'type': 'driver', + 'id': self.driver.__class__.__name__}) + return + stats = self.driver.get_volume_stats(refresh=True) svc_host = vol_utils.extract_host(self.host, 'backend') try: service = objects.Service.get_by_args( context.get_admin_context(), svc_host, - 'cinder-volume') + constants.VOLUME_BINARY) except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("Service not found for updating " @@ -500,18 +557,26 @@ class VolumeManager(manager.SchedulerDependentManager): filter_properties=None, allow_reschedule=True, volume=None): """Creates the volume.""" + # Log about unsupported drivers + utils.log_unsupported_driver_warning(self.driver) + # FIXME(dulek): Remove this in v3.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) + # FIXME(dulek): Remove this in v3.0 of RPC API. + if isinstance(request_spec, dict): + # We may receive request_spec as dict from older clients. + request_spec = objects.RequestSpec.from_primitives(request_spec) + context_elevated = context.elevated() if filter_properties is None: filter_properties = {} if request_spec is None: - request_spec = {} + request_spec = objects.RequestSpec() try: # NOTE(flaper87): Driver initialization is @@ -523,7 +588,7 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver, self.scheduler_rpcapi, self.host, - volume.id, + volume, allow_reschedule, context, request_spec, @@ -562,7 +627,6 @@ class VolumeManager(manager.SchedulerDependentManager): # NOTE(dulek): Flag to indicate if volume was rescheduled. Used to # decide if allocated_capacity should be incremented. rescheduled = False - vol_ref = None try: if locked_action is None: @@ -572,7 +636,7 @@ class VolumeManager(manager.SchedulerDependentManager): _run_flow() finally: try: - vol_ref = flow_engine.storage.fetch('volume_ref') + flow_engine.storage.fetch('refreshed') except tfe.NotFound: # If there's no vol_ref, then flow is reverted. Lets check out # if rescheduling occurred. @@ -584,16 +648,12 @@ class VolumeManager(manager.SchedulerDependentManager): pass if not rescheduled: - if not vol_ref: - # Flow was reverted and not rescheduled, fetching - # volume_ref from the DB, because it will be needed. - vol_ref = objects.Volume.get_by_id(context, volume.id) # NOTE(dulek): Volume wasn't rescheduled so we need to update # volume stats as these are decremented on delete. - self._update_allocated_capacity(vol_ref) + self._update_allocated_capacity(volume) - LOG.info(_LI("Created volume successfully."), resource=vol_ref) - return vol_ref.id + LOG.info(_LI("Created volume successfully."), resource=volume) + return volume.id # FIXME(bluex): replace volume_id with volume.id when volume_id is removed @coordination.synchronized('{volume_id}-{f_name}') @@ -1288,7 +1348,8 @@ class VolumeManager(manager.SchedulerDependentManager): exc_info=True, resource={'type': 'image', 'id': image_id}) - def initialize_connection(self, context, volume_id, connector): + def initialize_connection(self, context, volume_id, connector, + volume=None): """Prepare volume for connection from host represented by connector. This method calls the driver initialize_connection and returns @@ -1325,12 +1386,16 @@ class VolumeManager(manager.SchedulerDependentManager): json in various places, so it should not contain any non-json data types. """ + # FIXME(bluex): Remove this in v3.0 of RPC API. + if volume is None: + # For older clients, mimic the old behavior and look up the volume + # by its volume_id. + volume = objects.Volume.get_by_id(context, volume_id) + # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) - volume = self.db.volume_get(context, volume_id) - model_update = None try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: @@ -1351,9 +1416,8 @@ class VolumeManager(manager.SchedulerDependentManager): try: if model_update: - volume = self.db.volume_update(context, - volume_id, - model_update) + volume.update(model_update) + volume.save() except exception.CinderException as ex: LOG.exception(_LE("Model update failed."), resource=volume) raise exception.ExportFailure(reason=six.text_type(ex)) @@ -1370,7 +1434,7 @@ class VolumeManager(manager.SchedulerDependentManager): raise exception.VolumeBackendAPIException(data=err_msg) # Add qos_specs to connection info - typeid = volume['volume_type_id'] + typeid = volume.volume_type_id specs = None if typeid: res = volume_types.get_volume_type_qos_specs(typeid) @@ -1384,8 +1448,7 @@ class VolumeManager(manager.SchedulerDependentManager): conn_info['data'].update(qos_spec) # Add access_mode to connection info - volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), - volume_id) + volume_metadata = volume.admin_metadata access_mode = volume_metadata.get('attached_mode') if access_mode is None: # NOTE(zhiyan): client didn't call 'os-attach' before @@ -1396,7 +1459,7 @@ class VolumeManager(manager.SchedulerDependentManager): # Add encrypted flag to connection_info if not set in the driver. if conn_info['data'].get('encrypted') is None: - encrypted = bool(volume.get('encryption_key_id')) + encrypted = bool(volume.encryption_key_id) conn_info['data']['encrypted'] = encrypted # Add discard flag to connection_info if not set in the driver and @@ -1511,7 +1574,8 @@ class VolumeManager(manager.SchedulerDependentManager): return {'conn': conn, 'device': vol_handle, 'connector': connector} - def _attach_volume(self, ctxt, volume, properties, remote=False): + def _attach_volume(self, ctxt, volume, properties, remote=False, + attach_encryptor=False): status = volume['status'] if remote: @@ -1527,11 +1591,35 @@ class VolumeManager(manager.SchedulerDependentManager): else: conn = self.initialize_connection(ctxt, volume['id'], properties) - return self._connect_device(conn) + attach_info = self._connect_device(conn) + try: + if attach_encryptor and ( + volume_types.is_encrypted(ctxt, + volume.volume_type_id)): + encryption = self.db.volume_encryption_metadata_get( + ctxt.elevated(), volume.id) + if encryption: + utils.brick_attach_volume_encryptor(ctxt, + attach_info, + encryption) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Failed to attach volume encryptor" + " %(vol)s."), {'vol': volume['id']}) + self._detach_volume(ctxt, attach_info, volume, properties) + return attach_info def _detach_volume(self, ctxt, attach_info, volume, properties, - force=False, remote=False): + force=False, remote=False, + attach_encryptor=False): connector = attach_info['connector'] + if attach_encryptor and ( + volume_types.is_encrypted(ctxt, + volume.volume_type_id)): + encryption = self.db.volume_encryption_metadata_get( + ctxt.elevated(), volume.id) + if encryption: + utils.brick_detach_volume_encryptor(attach_info, encryption) connector.disconnect_volume(attach_info['conn']['data'], attach_info['device']) @@ -1554,22 +1642,34 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {'src': src_vol['name'], 'dest': dest_vol['name']}) - + attach_encryptor = False + # If the encryption method or key is changed, we have to + # copy data through dm-crypt. + if volume_types.volume_types_encryption_changed( + ctxt, + src_vol.volume_type_id, + dest_vol.volume_type_id): + attach_encryptor = True properties = utils.brick_get_connector_properties() dest_remote = remote in ['dest', 'both'] - dest_attach_info = self._attach_volume(ctxt, dest_vol, properties, - remote=dest_remote) + dest_attach_info = self._attach_volume( + ctxt, dest_vol, properties, + remote=dest_remote, + attach_encryptor=attach_encryptor) try: src_remote = remote in ['src', 'both'] - src_attach_info = self._attach_volume(ctxt, src_vol, properties, - remote=src_remote) + src_attach_info = self._attach_volume( + ctxt, src_vol, properties, + remote=src_remote, + attach_encryptor=attach_encryptor) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to attach source volume for copy.")) self._detach_volume(ctxt, dest_attach_info, dest_vol, - properties, remote=dest_remote) + properties, remote=dest_remote, + attach_encryptor=attach_encryptor) # Check the backend capabilities of migration destination host. rpcapi = volume_rpcapi.VolumeAPI() @@ -1596,11 +1696,13 @@ class VolumeManager(manager.SchedulerDependentManager): try: self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, force=copy_error, - remote=dest_remote) + remote=dest_remote, + attach_encryptor=attach_encryptor) finally: self._detach_volume(ctxt, src_attach_info, src_vol, properties, force=copy_error, - remote=src_remote) + remote=src_remote, + attach_encryptor=attach_encryptor) def _migrate_volume_generic(self, ctxt, volume, host, new_type_id): rpcapi = volume_rpcapi.VolumeAPI() @@ -1610,6 +1712,12 @@ class VolumeManager(manager.SchedulerDependentManager): new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip} if new_type_id: new_vol_values['volume_type_id'] = new_type_id + if volume_types.volume_types_encryption_changed( + ctxt, volume.volume_type_id, new_type_id): + encryption_key_id = vol_utils.create_encryption_key( + ctxt, self.key_manager, new_type_id) + new_vol_values['encryption_key_id'] = encryption_key_id + new_volume = objects.Volume( context=ctxt, host=host['host'], @@ -1763,11 +1871,15 @@ class VolumeManager(manager.SchedulerDependentManager): volume.save() # Detach the source volume (if it fails, don't fail the migration) + # As after detach and refresh, volume_attchments will be None. + # We keep volume_attachment for later attach. + if orig_volume_status == 'in-use': + attachments = volume.volume_attachment + else: + attachments = None try: - if orig_volume_status == 'in-use': - attachments = volume.volume_attachment - for attachment in attachments: - self.detach_volume(ctxt, volume.id, attachment['id']) + for attachment in attachments: + self.detach_volume(ctxt, volume.id, attachment['id']) except Exception as ex: LOG.error(_LE("Detach migration source volume failed: %(err)s"), {'err': ex}, resource=volume) @@ -1789,7 +1901,6 @@ class VolumeManager(manager.SchedulerDependentManager): 'migration_status': 'success'} if orig_volume_status == 'in-use': - attachments = volume.volume_attachment for attachment in attachments: rpcapi.attach_volume(ctxt, volume, attachment['instance_uuid'], @@ -1975,6 +2086,25 @@ class VolumeManager(manager.SchedulerDependentManager): context, volume, event_suffix, extra_usage_info=extra_usage_info, host=self.host) + def _notify_about_group_usage(self, + context, + group, + event_suffix, + volumes=None, + extra_usage_info=None): + vol_utils.notify_about_group_usage( + context, group, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + if not volumes: + volumes = self.db.volume_get_all_by_generic_group( + context, group.id) + if volumes: + for volume in volumes: + vol_utils.notify_about_volume_usage( + context, volume, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + def _notify_about_cgsnapshot_usage(self, context, cgsnapshot, @@ -1994,6 +2124,25 @@ class VolumeManager(manager.SchedulerDependentManager): context, snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) + def _notify_about_group_snapshot_usage(self, + context, + group_snapshot, + event_suffix, + snapshots=None, + extra_usage_info=None): + vol_utils.notify_about_group_snapshot_usage( + context, group_snapshot, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + if not snapshots: + snapshots = objects.SnapshotList.get_all_for_group_snapshot( + context, group_snapshot.id) + if snapshots: + for snapshot in snapshots: + vol_utils.notify_about_snapshot_usage( + context, snapshot, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + def extend_volume(self, context, volume_id, new_size, reservations, volume=None): # FIXME(dulek): Remove this in v3.0 of RPC API. @@ -2052,7 +2201,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.info(_LI("Extend volume completed successfully."), resource=volume) - def retype(self, ctxt, volume_id, new_type_id, host, + def retype(self, context, volume_id, new_type_id, host, migration_policy='never', reservations=None, volume=None, old_reservations=None): @@ -2065,8 +2214,6 @@ class VolumeManager(manager.SchedulerDependentManager): QUOTAS.rollback(context, old_reservations) QUOTAS.rollback(context, new_reservations) - context = ctxt.elevated() - # FIXME(dulek): Remove this in v3.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume @@ -2140,6 +2287,7 @@ class VolumeManager(manager.SchedulerDependentManager): # We assume that those that support pools do this internally # so we strip off the pools designation if (not retyped and + diff.get('encryption') is None and vol_utils.hosts_are_equivalent(self.driver.host, host['host'])): try: @@ -2378,27 +2526,46 @@ class VolumeManager(manager.SchedulerDependentManager): def create_consistencygroup(self, context, group): """Creates the consistency group.""" + return self._create_group(context, group, False) + + def create_group(self, context, group): + """Creates the group.""" + return self._create_group(context, group) + + def _create_group(self, context, group, is_generic_group=True): context = context.elevated() - status = fields.ConsistencyGroupStatus.AVAILABLE + status = fields.GroupStatus.AVAILABLE model_update = None - self._notify_about_consistencygroup_usage( - context, group, "create.start") + if is_generic_group: + self._notify_about_group_usage( + context, group, "create.start") + else: + self._notify_about_consistencygroup_usage( + context, group, "create.start") try: utils.require_driver_initialized(self.driver) - LOG.info(_LI("Consistency group %s: creating"), group.name) - model_update = self.driver.create_consistencygroup(context, - group) + LOG.info(_LI("Group %s: creating"), group.name) + if is_generic_group: + try: + model_update = self.driver.create_group(context, + group) + except NotImplementedError: + model_update = self._create_group_generic(context, + group) + else: + model_update = self.driver.create_consistencygroup(context, + group) if model_update: if (model_update['status'] == - fields.ConsistencyGroupStatus.ERROR): - msg = (_('Create consistency group failed.')) + fields.GroupStatus.ERROR): + msg = (_('Create group failed.')) LOG.error(msg, - resource={'type': 'consistency_group', + resource={'type': 'group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: @@ -2406,22 +2573,26 @@ class VolumeManager(manager.SchedulerDependentManager): group.save() except Exception: with excutils.save_and_reraise_exception(): - group.status = fields.ConsistencyGroupStatus.ERROR + group.status = fields.GroupStatus.ERROR group.save() - LOG.error(_LE("Consistency group %s: create failed"), + LOG.error(_LE("Group %s: create failed"), group.name) group.status = status group.created_at = timeutils.utcnow() group.save() - LOG.info(_LI("Consistency group %s: created successfully"), + LOG.info(_LI("Group %s: created successfully"), group.name) - self._notify_about_consistencygroup_usage( - context, group, "create.end") + if is_generic_group: + self._notify_about_group_usage( + context, group, "create.end") + else: + self._notify_about_consistencygroup_usage( + context, group, "create.end") - LOG.info(_LI("Create consistency group completed successfully."), - resource={'type': 'consistency_group', + LOG.info(_LI("Create group completed successfully."), + resource={'type': 'group', 'id': group.id}) return group @@ -2560,6 +2731,181 @@ class VolumeManager(manager.SchedulerDependentManager): 'id': group.id}) return group + def create_group_from_src(self, context, group, + group_snapshot=None, source_group=None): + """Creates the group from source. + + The source can be a group snapshot or a source group. + """ + source_name = None + snapshots = None + source_vols = None + try: + volumes = objects.VolumeList.get_all_by_generic_group(context, + group.id) + if group_snapshot: + try: + # Check if group_snapshot still exists + group_snapshot = objects.GroupSnapshot.get_by_id( + context, group_snapshot.id) + except exception.GroupSnapshotNotFound: + LOG.error(_LE("Create group " + "from snapshot-%(snap)s failed: " + "SnapshotNotFound."), + {'snap': group_snapshot.id}, + resource={'type': 'group', + 'id': group.id}) + raise + + source_name = _("snapshot-%s") % group_snapshot.id + snapshots = objects.SnapshotList.get_all_for_group_snapshot( + context, group_snapshot.id) + for snap in snapshots: + if (snap.status not in + VALID_CREATE_GROUP_SRC_SNAP_STATUS): + msg = (_("Cannot create group " + "%(group)s because snapshot %(snap)s is " + "not in a valid state. Valid states are: " + "%(valid)s.") % + {'group': group.id, + 'snap': snap['id'], + 'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS}) + raise exception.InvalidGroup(reason=msg) + + if source_group: + try: + source_group = objects.Group.get_by_id( + context, source_group.id) + except exception.GroupNotFound: + LOG.error(_LE("Create group " + "from source group-%(group)s failed: " + "GroupNotFound."), + {'group': source_group.id}, + resource={'type': 'group', + 'id': group.id}) + raise + + source_name = _("group-%s") % source_group.id + source_vols = objects.VolumeList.get_all_by_generic_group( + context, source_group.id) + for source_vol in source_vols: + if (source_vol.status not in + VALID_CREATE_GROUP_SRC_GROUP_STATUS): + msg = (_("Cannot create group " + "%(group)s because source volume " + "%(source_vol)s is not in a valid " + "state. Valid states are: " + "%(valid)s.") % + {'group': group.id, + 'source_vol': source_vol.id, + 'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS}) + raise exception.InvalidGroup(reason=msg) + + # Sort source snapshots so that they are in the same order as their + # corresponding target volumes. + sorted_snapshots = None + if group_snapshot and snapshots: + sorted_snapshots = self._sort_snapshots(volumes, snapshots) + + # Sort source volumes so that they are in the same order as their + # corresponding target volumes. + sorted_source_vols = None + if source_group and source_vols: + sorted_source_vols = self._sort_source_vols(volumes, + source_vols) + + self._notify_about_group_usage( + context, group, "create.start") + + utils.require_driver_initialized(self.driver) + + try: + model_update, volumes_model_update = ( + self.driver.create_group_from_src( + context, group, volumes, group_snapshot, + sorted_snapshots, source_group, sorted_source_vols)) + except NotImplementedError: + model_update, volumes_model_update = ( + self._create_group_from_src_generic( + context, group, volumes, group_snapshot, + sorted_snapshots, source_group, sorted_source_vols)) + + if volumes_model_update: + for update in volumes_model_update: + self.db.volume_update(context, update['id'], update) + + if model_update: + group.update(model_update) + group.save() + + except Exception: + with excutils.save_and_reraise_exception(): + group.status = 'error' + group.save() + LOG.error(_LE("Create group " + "from source %(source)s failed."), + {'source': source_name}, + resource={'type': 'group', + 'id': group.id}) + # Update volume status to 'error' as well. + for vol in volumes: + vol.status = 'error' + vol.save() + + now = timeutils.utcnow() + status = 'available' + for vol in volumes: + update = {'status': status, 'created_at': now} + self._update_volume_from_src(context, vol, update, group=group) + self._update_allocated_capacity(vol) + + group.status = status + group.created_at = now + group.save() + + self._notify_about_group_usage( + context, group, "create.end") + LOG.info(_LI("Create group " + "from source-%(source)s completed successfully."), + {'source': source_name}, + resource={'type': 'group', + 'id': group.id}) + return group + + def _create_group_from_src_generic(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + """Creates a group from source. + + :param context: the context of the caller. + :param group: the Group object to be created. + :param volumes: a list of volume objects in the group. + :param group_snapshot: the GroupSnapshot object as source. + :param snapshots: a list of snapshot objects in group_snapshot. + :param source_group: the Group object as source. + :param source_vols: a list of volume objects in the source_group. + :returns: model_update, volumes_model_update + """ + for vol in volumes: + try: + if snapshots: + for snapshot in snapshots: + if vol.snapshot_id == snapshot.id: + self.driver.create_volume_from_snapshot( + vol, snapshot) + break + except Exception: + raise + try: + if source_vols: + for source_vol in source_vols: + if vol.source_volid == source_vol.id: + self.driver.create_cloned_volume(vol, source_vol) + break + except Exception: + raise + return None, None + def _sort_snapshots(self, volumes, snapshots): # Sort source snapshots so that they are in the same order as their # corresponding target volumes. Each source snapshot in the snapshots @@ -2793,6 +3139,170 @@ class VolumeManager(manager.SchedulerDependentManager): resource={'type': 'consistency_group', 'id': group.id}) + def delete_group(self, context, group): + """Deletes group and the volumes in the group.""" + context = context.elevated() + project_id = group.project_id + + if context.project_id != group.project_id: + project_id = group.project_id + else: + project_id = context.project_id + + volumes = objects.VolumeList.get_all_by_generic_group( + context, group.id) + + for vol_obj in volumes: + if vol_obj.attach_status == "attached": + # Volume is still attached, need to detach first + raise exception.VolumeAttached(volume_id=vol_obj.id) + # self.host is 'host@backend' + # vol_obj.host is 'host@backend#pool' + # Extract host before doing comparison + if vol_obj.host: + new_host = vol_utils.extract_host(vol_obj.host) + msg = (_("Volume %(vol_id)s is not local to this node " + "%(host)s") % {'vol_id': vol_obj.id, + 'host': self.host}) + if new_host != self.host: + raise exception.InvalidVolume(reason=msg) + + self._notify_about_group_usage( + context, group, "delete.start") + + volumes_model_update = None + model_update = None + try: + utils.require_driver_initialized(self.driver) + + try: + model_update, volumes_model_update = ( + self.driver.delete_group(context, group, volumes)) + except NotImplementedError: + model_update, volumes_model_update = ( + self._delete_group_generic(context, group, volumes)) + + if volumes_model_update: + for update in volumes_model_update: + # If we failed to delete a volume, make sure the + # status for the group is set to error as well + if (update['status'] in ['error_deleting', 'error'] + and model_update['status'] not in + ['error_deleting', 'error']): + model_update['status'] = update['status'] + self.db.volumes_update(context, volumes_model_update) + + if model_update: + if model_update['status'] in ['error_deleting', 'error']: + msg = (_('Delete group failed.')) + LOG.error(msg, + resource={'type': 'group', + 'id': group.id}) + raise exception.VolumeDriverException(message=msg) + else: + group.update(model_update) + group.save() + + except Exception: + with excutils.save_and_reraise_exception(): + group.status = 'error' + group.save() + # Update volume status to 'error' if driver returns + # None for volumes_model_update. + if not volumes_model_update: + for vol_obj in volumes: + vol_obj.status = 'error' + vol_obj.save() + + # Get reservations for group + try: + reserve_opts = {'groups': -1} + grpreservations = GROUP_QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + except Exception: + grpreservations = None + LOG.exception(_LE("Delete group " + "failed to update usages."), + resource={'type': 'group', + 'id': group.id}) + + for vol in volumes: + # Get reservations for volume + try: + reserve_opts = {'volumes': -1, + 'gigabytes': -vol.size} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + vol.volume_type_id) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + except Exception: + reservations = None + LOG.exception(_LE("Delete group " + "failed to update usages."), + resource={'type': 'group', + 'id': group.id}) + + # Delete glance metadata if it exists + self.db.volume_glance_metadata_delete_by_volume(context, vol.id) + + vol.destroy() + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + + self.stats['allocated_capacity_gb'] -= vol.size + + if grpreservations: + GROUP_QUOTAS.commit(context, grpreservations, + project_id=project_id) + + group.destroy() + self._notify_about_group_usage( + context, group, "delete.end") + self.publish_service_capabilities(context) + LOG.info(_LI("Delete group " + "completed successfully."), + resource={'type': 'group', + 'id': group.id}) + + def _create_group_generic(self, context, group): + """Creates a group.""" + # A group entry is already created in db. Just returns a status here. + model_update = {'status': fields.GroupStatus.AVAILABLE, + 'created_at': timeutils.utcnow()} + return model_update + + def _delete_group_generic(self, context, group, volumes): + """Deletes a group and volumes in the group.""" + model_update = {'status': group.status} + volume_model_updates = [] + for volume_ref in volumes: + volume_model_update = {'id': volume_ref.id} + try: + self.driver.remove_export(context, volume_ref) + self.driver.delete_volume(volume_ref) + volume_model_update['status'] = 'deleted' + except exception.VolumeIsBusy: + volume_model_update['status'] = 'available' + except Exception: + volume_model_update['status'] = 'error' + model_update['status'] = fields.GroupStatus.ERROR + volume_model_updates.append(volume_model_update) + + return model_update, volume_model_updates + + def _update_group_generic(self, context, group, + add_volumes=None, remove_volumes=None): + """Updates a group.""" + # NOTE(xyang): The volume manager adds/removes the volume to/from the + # group in the database. This default implementation does not do + # anything in the backend storage. + return None, None, None + def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates consistency group. @@ -2937,6 +3447,151 @@ class VolumeManager(manager.SchedulerDependentManager): resource={'type': 'consistency_group', 'id': group.id}) + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + """Updates group. + + Update group by adding volumes to the group, + or removing volumes from the group. + """ + + add_volumes_ref = [] + remove_volumes_ref = [] + add_volumes_list = [] + remove_volumes_list = [] + if add_volumes: + add_volumes_list = add_volumes.split(',') + if remove_volumes: + remove_volumes_list = remove_volumes.split(',') + for add_vol in add_volumes_list: + try: + add_vol_ref = objects.Volume.get_by_id(context, add_vol) + except exception.VolumeNotFound: + LOG.error(_LE("Update group " + "failed to add volume-%(volume_id)s: " + "VolumeNotFound."), + {'volume_id': add_vol_ref.id}, + resource={'type': 'group', + 'id': group.id}) + raise + if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS: + msg = (_("Cannot add volume %(volume_id)s to " + "group %(group_id)s because volume is in an invalid " + "state: %(status)s. Valid states are: %(valid)s.") % + {'volume_id': add_vol_ref.id, + 'group_id': group.id, + 'status': add_vol_ref.status, + 'valid': VALID_ADD_VOL_TO_GROUP_STATUS}) + raise exception.InvalidVolume(reason=msg) + # self.host is 'host@backend' + # volume_ref['host'] is 'host@backend#pool' + # Extract host before doing comparison + new_host = vol_utils.extract_host(add_vol_ref.host) + if new_host != self.host: + raise exception.InvalidVolume( + reason=_("Volume is not local to this node.")) + add_volumes_ref.append(add_vol_ref) + + for remove_vol in remove_volumes_list: + try: + remove_vol_ref = objects.Volume.get_by_id(context, remove_vol) + except exception.VolumeNotFound: + LOG.error(_LE("Update group " + "failed to remove volume-%(volume_id)s: " + "VolumeNotFound."), + {'volume_id': remove_vol_ref.id}, + resource={'type': 'group', + 'id': group.id}) + raise + if (remove_vol_ref.status not in + VALID_REMOVE_VOL_FROM_GROUP_STATUS): + msg = (_("Cannot remove volume %(volume_id)s from " + "group %(group_id)s because volume is in an invalid " + "state: %(status)s. Valid states are: %(valid)s.") % + {'volume_id': remove_vol_ref.id, + 'group_id': group.id, + 'status': remove_vol_ref.status, + 'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS}) + raise exception.InvalidVolume(reason=msg) + remove_volumes_ref.append(remove_vol_ref) + + self._notify_about_group_usage( + context, group, "update.start") + + try: + utils.require_driver_initialized(self.driver) + + try: + model_update, add_volumes_update, remove_volumes_update = ( + self.driver.update_group( + context, group, + add_volumes=add_volumes_ref, + remove_volumes=remove_volumes_ref)) + except NotImplementedError: + model_update, add_volumes_update, remove_volumes_update = ( + self._update_group_generic( + context, group, + add_volumes=add_volumes_ref, + remove_volumes=remove_volumes_ref)) + + if add_volumes_update: + self.db.volumes_update(context, add_volumes_update) + + if remove_volumes_update: + self.db.volumes_update(context, remove_volumes_update) + + if model_update: + if model_update['status'] in ( + [fields.GroupStatus.ERROR]): + msg = (_('Error occurred when updating group ' + '%s.') % group.id) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + group.update(model_update) + group.save() + + except exception.VolumeDriverException: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error occurred in the volume driver when " + "updating group %(group_id)s."), + {'group_id': group.id}) + group.status = 'error' + group.save() + for add_vol in add_volumes_ref: + add_vol.status = 'error' + add_vol.save() + for rem_vol in remove_volumes_ref: + rem_vol.status = 'error' + rem_vol.save() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error occurred when updating " + "group %(group_id)s."), + {'group_id': group.id}) + group.status = 'error' + group.save() + for add_vol in add_volumes_ref: + add_vol.status = 'error' + add_vol.save() + for rem_vol in remove_volumes_ref: + rem_vol.status = 'error' + rem_vol.save() + + group.status = 'available' + group.save() + for add_vol in add_volumes_ref: + add_vol.group_id = group.id + add_vol.save() + for rem_vol in remove_volumes_ref: + rem_vol.group_id = None + rem_vol.save() + + self._notify_about_group_usage( + context, group, "update.end") + LOG.info(_LI("Update group completed successfully."), + resource={'type': 'group', + 'id': group.id}) + def create_cgsnapshot(self, context, cgsnapshot): """Creates the cgsnapshot.""" caller_context = context @@ -3047,6 +3702,152 @@ class VolumeManager(manager.SchedulerDependentManager): context, cgsnapshot, "create.end") return cgsnapshot + def create_group_snapshot(self, context, group_snapshot): + """Creates the group_snapshot.""" + caller_context = context + context = context.elevated() + + LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id) + + snapshots = objects.SnapshotList.get_all_for_group_snapshot( + context, group_snapshot.id) + + self._notify_about_group_snapshot_usage( + context, group_snapshot, "create.start") + + snapshots_model_update = None + model_update = None + try: + utils.require_driver_initialized(self.driver) + + LOG.debug("Group snapshot %(grp_snap_id)s: creating.", + {'grp_snap_id': group_snapshot.id}) + + # Pass context so that drivers that want to use it, can, + # but it is not a requirement for all drivers. + group_snapshot.context = caller_context + for snapshot in snapshots: + snapshot.context = caller_context + + try: + model_update, snapshots_model_update = ( + self.driver.create_group_snapshot(context, group_snapshot, + snapshots)) + except NotImplementedError: + model_update, snapshots_model_update = ( + self._create_group_snapshot_generic( + context, group_snapshot, snapshots)) + + if snapshots_model_update: + for snap_model in snapshots_model_update: + # Update db for snapshot. + # NOTE(xyang): snapshots is a list of snapshot objects. + # snapshots_model_update should be a list of dicts. + snap_id = snap_model.pop('id') + snap_obj = objects.Snapshot.get_by_id(context, snap_id) + snap_obj.update(snap_model) + snap_obj.save() + if (snap_model['status'] in [ + fields.SnapshotStatus.ERROR_DELETING, + fields.SnapshotStatus.ERROR] and + model_update['status'] not in + ['error_deleting', 'error']): + model_update['status'] = snap_model['status'] + + if model_update: + if model_update['status'] == 'error': + msg = (_('Error occurred when creating group_snapshot ' + '%s.') % group_snapshot.id) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + group_snapshot.update(model_update) + group_snapshot.save() + + except exception.CinderException: + with excutils.save_and_reraise_exception(): + group_snapshot.status = 'error' + group_snapshot.save() + # Update snapshot status to 'error' if driver returns + # None for snapshots_model_update. + if not snapshots_model_update: + for snapshot in snapshots: + snapshot.status = fields.SnapshotStatus.ERROR + snapshot.save() + + for snapshot in snapshots: + volume_id = snapshot.volume_id + snapshot_id = snapshot.id + vol_obj = objects.Volume.get_by_id(context, volume_id) + if vol_obj.bootable: + try: + self.db.volume_glance_metadata_copy_to_snapshot( + context, snapshot_id, volume_id) + except exception.GlanceMetadataNotFound: + # If volume is not created from image, No glance metadata + # would be available for that volume in + # volume glance metadata table + pass + except exception.CinderException as ex: + LOG.error(_LE("Failed updating %(snapshot_id)s" + " metadata using the provided volumes" + " %(volume_id)s metadata"), + {'volume_id': volume_id, + 'snapshot_id': snapshot_id}) + snapshot.status = fields.SnapshotStatus.ERROR + snapshot.save() + raise exception.MetadataCopyFailure( + reason=six.text_type(ex)) + + snapshot.status = fields.SnapshotStatus.AVAILABLE + snapshot.progress = '100%' + snapshot.save() + + group_snapshot.status = 'available' + group_snapshot.save() + + LOG.info(_LI("group_snapshot %s: created successfully"), + group_snapshot.id) + self._notify_about_group_snapshot_usage( + context, group_snapshot, "create.end") + return group_snapshot + + def _create_group_snapshot_generic(self, context, group_snapshot, + snapshots): + """Creates a group_snapshot.""" + model_update = {'status': 'available'} + snapshot_model_updates = [] + for snapshot in snapshots: + snapshot_model_update = {'id': snapshot.id} + try: + self.driver.create_snapshot(snapshot) + snapshot_model_update['status'] = 'available' + except Exception: + snapshot_model_update['status'] = 'error' + model_update['status'] = 'error' + snapshot_model_updates.append(snapshot_model_update) + + return model_update, snapshot_model_updates + + def _delete_group_snapshot_generic(self, context, group_snapshot, + snapshots): + """Deletes a group_snapshot.""" + model_update = {'status': group_snapshot.status} + snapshot_model_updates = [] + for snapshot in snapshots: + snapshot_model_update = {'id': snapshot.id} + try: + self.driver.delete_snapshot(snapshot) + snapshot_model_update['status'] = 'deleted' + except exception.SnapshotIsBusy: + snapshot_model_update['status'] = 'available' + except Exception: + snapshot_model_update['status'] = 'error' + model_update['status'] = 'error' + snapshot_model_updates.append(snapshot_model_update) + + return model_update, snapshot_model_updates + def delete_cgsnapshot(self, context, cgsnapshot): """Deletes cgsnapshot.""" caller_context = context @@ -3155,6 +3956,120 @@ class VolumeManager(manager.SchedulerDependentManager): self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end", snapshots) + def delete_group_snapshot(self, context, group_snapshot): + """Deletes group_snapshot.""" + caller_context = context + context = context.elevated() + project_id = group_snapshot.project_id + + LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id) + + snapshots = objects.SnapshotList.get_all_for_group_snapshot( + context, group_snapshot.id) + + self._notify_about_group_snapshot_usage( + context, group_snapshot, "delete.start") + + snapshots_model_update = None + model_update = None + try: + utils.require_driver_initialized(self.driver) + + LOG.debug("group_snapshot %(grp_snap_id)s: deleting", + {'grp_snap_id': group_snapshot.id}) + + # Pass context so that drivers that want to use it, can, + # but it is not a requirement for all drivers. + group_snapshot.context = caller_context + for snapshot in snapshots: + snapshot.context = caller_context + + try: + model_update, snapshots_model_update = ( + self.driver.delete_group_snapshot(context, group_snapshot, + snapshots)) + except NotImplementedError: + model_update, snapshots_model_update = ( + self._delete_group_snapshot_generic( + context, group_snapshot, snapshots)) + + if snapshots_model_update: + for snap_model in snapshots_model_update: + # NOTE(xyang): snapshots is a list of snapshot objects. + # snapshots_model_update should be a list of dicts. + snap = next((item for item in snapshots if + item.id == snap_model['id']), None) + if snap: + snap_model.pop('id') + snap.update(snap_model) + snap.save() + + if (snap_model['status'] in + [fields.SnapshotStatus.ERROR_DELETING, + fields.SnapshotStatus.ERROR] and + model_update['status'] not in + ['error_deleting', 'error']): + model_update['status'] = snap_model['status'] + + if model_update: + if model_update['status'] in ['error_deleting', 'error']: + msg = (_('Error occurred when deleting group_snapshot ' + '%s.') % group_snapshot.id) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + else: + group_snapshot.update(model_update) + group_snapshot.save() + + except exception.CinderException: + with excutils.save_and_reraise_exception(): + group_snapshot.status = 'error' + group_snapshot.save() + # Update snapshot status to 'error' if driver returns + # None for snapshots_model_update. + if not snapshots_model_update: + for snapshot in snapshots: + snapshot.status = fields.SnapshotStatus.ERROR + snapshot.save() + + for snapshot in snapshots: + # Get reservations + try: + if CONF.no_snapshot_gb_quota: + reserve_opts = {'snapshots': -1} + else: + reserve_opts = { + 'snapshots': -1, + 'gigabytes': -snapshot.volume_size, + } + volume_ref = objects.Volume.get_by_id(context, + snapshot.volume_id) + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume_ref.volume_type_id) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + + except Exception: + reservations = None + LOG.exception(_LE("Failed to update usages deleting snapshot")) + + self.db.volume_glance_metadata_delete_by_snapshot(context, + snapshot.id) + snapshot.destroy() + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + + group_snapshot.destroy() + LOG.info(_LI("group_snapshot %s: deleted successfully"), + group_snapshot.id) + self._notify_about_group_snapshot_usage(context, group_snapshot, + "delete.end", + snapshots) + def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): """Finalize migration process on backend device.""" model_update = None @@ -3219,7 +4134,7 @@ class VolumeManager(manager.SchedulerDependentManager): service = objects.Service.get_by_args( context, svc_host, - 'cinder-volume') + constants.VOLUME_BINARY) volumes = objects.VolumeList.get_all_by_host(context, self.host) exception_encountered = False @@ -3328,7 +4243,7 @@ class VolumeManager(manager.SchedulerDependentManager): service = objects.Service.get_by_args( context, svc_host, - 'cinder-volume') + constants.VOLUME_BINARY) service.disabled = True service.disabled_reason = "frozen" service.save() @@ -3361,7 +4276,7 @@ class VolumeManager(manager.SchedulerDependentManager): service = objects.Service.get_by_args( context, svc_host, - 'cinder-volume') + constants.VOLUME_BINARY) service.disabled = False service.disabled_reason = "" service.save() diff --git a/cinder/volume/qos_specs.py b/cinder/volume/qos_specs.py index 99f09dcc1..6f0d439d2 100644 --- a/cinder/volume/qos_specs.py +++ b/cinder/volume/qos_specs.py @@ -22,6 +22,7 @@ from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception +from cinder import objects from cinder.i18n import _, _LE, _LW from cinder.volume import volume_types @@ -31,38 +32,6 @@ LOG = logging.getLogger(__name__) CONTROL_LOCATION = ['front-end', 'back-end', 'both'] -def _verify_prepare_qos_specs(specs, create=True): - """Check if 'consumer' value in qos specs is valid. - - Verify 'consumer' value in qos_specs is valid, raise - exception if not. Assign default value to 'consumer', which - is 'back-end' if input is empty. - - :params create a flag indicate if specs being verified is - for create. If it's false, that means specs is for update, - so that there's no need to add 'consumer' if that wasn't in - specs. - """ - - # Check control location, if it's missing in input, assign default - # control location: 'front-end' - if not specs: - specs = {} - # remove 'name' since we will handle that elsewhere. - if specs.get('name', None): - del specs['name'] - try: - if specs['consumer'] not in CONTROL_LOCATION: - msg = _("Valid consumer of QoS specs are: %s") % CONTROL_LOCATION - raise exception.InvalidQoSSpecs(reason=msg) - except KeyError: - # Default consumer is back-end, i.e Cinder volume service - if create: - specs['consumer'] = 'back-end' - - return specs - - def create(context, name, specs=None): """Creates qos_specs. @@ -71,23 +40,19 @@ def create(context, name, specs=None): 'total_iops_sec': 1000, 'total_bytes_sec': 1024000} """ - _verify_prepare_qos_specs(specs) + consumer = specs.get('consumer') + if consumer: + # If we need to modify specs, copy so we don't cause unintended + # consequences for the caller + specs = specs.copy() + del specs['consumer'] - values = dict(name=name, qos_specs=specs) + values = dict(name=name, consumer=consumer, specs=specs) LOG.debug("Dict for qos_specs: %s", values) - - try: - qos_specs_ref = db.qos_specs_create(context, values) - except db_exc.DBDataError: - msg = _('Error writing field to database') - LOG.exception(msg) - raise exception.Invalid(msg) - except db_exc.DBError: - LOG.exception(_LE('DB error:')) - raise exception.QoSSpecsCreateFailed(name=name, - qos_specs=specs) - return qos_specs_ref + qos_spec = objects.QualityOfServiceSpecs(context, **values) + qos_spec.create() + return qos_spec def update(context, qos_specs_id, specs): @@ -99,17 +64,29 @@ def update(context, qos_specs_id, specs): 'total_iops_sec': 500, 'total_bytes_sec': 512000,} """ - # need to verify specs in case 'consumer' is passed - _verify_prepare_qos_specs(specs, create=False) LOG.debug('qos_specs.update(): specs %s' % specs) + try: - res = db.qos_specs_update(context, qos_specs_id, specs) + qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, + qos_specs_id) + + if 'consumer' in specs: + qos_spec.consumer = specs['consumer'] + # If we need to modify specs, copy so we don't cause unintended + # consequences for the caller + specs = specs.copy() + del specs['consumer'] + + # Update any values in specs dict + qos_spec.specs.update(specs) + + qos_spec.save() except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, qos_specs=specs) - return res + return qos_spec def delete(context, qos_specs_id, force=False): @@ -126,15 +103,10 @@ def delete(context, qos_specs_id, force=False): msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) - # check if there is any entity associated with this qos specs - res = db.qos_specs_associations_get(context, qos_specs_id) - if res and not force: - raise exception.QoSSpecsInUse(specs_id=qos_specs_id) - elif res and force: - # remove all association - db.qos_specs_disassociate_all(context, qos_specs_id) + qos_spec = objects.QualityOfServiceSpecs.get_by_id( + context, qos_specs_id) - db.qos_specs_delete(context, qos_specs_id) + qos_spec.destroy(force) def delete_keys(context, qos_specs_id, keys): @@ -143,30 +115,42 @@ def delete_keys(context, qos_specs_id, keys): msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) - # make sure qos_specs_id is valid - get_qos_specs(context, qos_specs_id) - for key in keys: - db.qos_specs_item_delete(context, qos_specs_id, key) + qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) + + # Previous behavior continued to delete keys until it hit first unset one, + # so for now will mimic that. In the future it would be useful to have all + # or nothing deletion of keys (or at least delete all set keys), + # especially since order of keys from CLI to API is not preserved currently + try: + for key in keys: + try: + del qos_spec.specs[key] + except KeyError: + raise exception.QoSSpecsKeyNotFound( + specs_key=key, specs_id=qos_specs_id) + finally: + qos_spec.save() -def get_associations(context, specs_id): +def get_associations(context, qos_specs_id): """Get all associations of given qos specs.""" try: - # query returns a list of volume types associated with qos specs - associates = db.qos_specs_associations_get(context, specs_id) + types = objects.VolumeTypeList.get_all_types_for_qos(context, + qos_specs_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) msg = _('Failed to get all associations of ' - 'qos specs %s') % specs_id + 'qos specs %s') % qos_specs_id LOG.warning(msg) raise exception.CinderException(message=msg) result = [] - for vol_type in associates: - member = dict(association_type='volume_type') - member.update(dict(name=vol_type['name'])) - member.update(dict(id=vol_type['id'])) - result.append(member) + for vol_type in types: + result.append({ + 'association_type': 'volume_type', + 'name': vol_type.name, + 'id': vol_type.id + }) return result @@ -234,28 +218,18 @@ def disassociate_all(context, specs_id): def get_all_specs(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all non-deleted qos specs.""" - qos_specs = db.qos_specs_get_all(context, filters=filters, marker=marker, - limit=limit, offset=offset, - sort_keys=sort_keys, sort_dirs=sort_dirs) - return qos_specs + return objects.QualityOfServiceSpecsList.get_all( + context, filters=filters, marker=marker, limit=limit, offset=offset, + sort_keys=sort_keys, sort_dirs=sort_dirs) -def get_qos_specs(ctxt, id): +def get_qos_specs(ctxt, spec_id): """Retrieves single qos specs by id.""" - if id is None: + if spec_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) if ctxt is None: ctxt = context.get_admin_context() - return db.qos_specs_get(ctxt, id) - - -def get_qos_specs_by_name(context, name): - """Retrieves single qos specs by name.""" - if name is None: - msg = _("name cannot be None") - raise exception.InvalidQoSSpecs(reason=msg) - - return db.qos_specs_get_by_name(context, name) + return objects.QualityOfServiceSpecs.get_by_id(ctxt, spec_id) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 1c0a6a82e..ee1d8fcf1 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -16,15 +16,14 @@ Client side of the volume RPC API. """ -from oslo_config import cfg from oslo_serialization import jsonutils +from cinder.common import constants from cinder import quota from cinder import rpc from cinder.volume import utils -CONF = cfg.CONF QUOTAS = quota.QUOTAS @@ -100,13 +99,26 @@ class VolumeAPI(rpc.RPCAPI): 2.0 - Remove 1.x compatibility 2.1 - Add get_manageable_volumes() and get_manageable_snapshots(). - 2.2 - Adds support for sending objects over RPC in manage_existing(). + 2.2 - Adds support for sending objects over RPC in manage_existing(). + 2.3 - Adds support for sending objects over RPC in + initialize_connection(). + 2.4 - Sends request_spec as object in create_volume(). + 2.5 - Adds create_group, delete_group, and update_group + 2.6 - Adds create_group_snapshot, delete_group_snapshot, and + create_group_from_src(). """ - RPC_API_VERSION = '2.2' - TOPIC = CONF.volume_topic + RPC_API_VERSION = '2.6' + TOPIC = constants.VOLUME_TOPIC BINARY = 'cinder-volume' + def _compat_ver(self, current, *legacy): + versions = (current,) + legacy + for version in versions[:-1]: + if self.client.can_send_version(version): + return version + return versions[-1] + def _get_cctxt(self, host, version): new_host = utils.get_volume_rpc_host(host) return self.client.prepare(server=new_host, version=version) @@ -147,12 +159,19 @@ class VolumeAPI(rpc.RPCAPI): def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True): - request_spec_p = jsonutils.to_primitive(request_spec) - cctxt = self._get_cctxt(host, '2.0') - cctxt.cast(ctxt, 'create_volume', volume_id=volume.id, - request_spec=request_spec_p, - filter_properties=filter_properties, - allow_reschedule=allow_reschedule, volume=volume) + msg_args = {'volume_id': volume.id, 'request_spec': request_spec, + 'filter_properties': filter_properties, + 'allow_reschedule': allow_reschedule, + 'volume': volume, + } + version = '2.4' + if not self.client.can_send_version('2.4'): + # Send request_spec as dict + version = '2.0' + msg_args['request_spec'] = jsonutils.to_primitive(request_spec) + + cctxt = self._get_cctxt(host, version) + cctxt.cast(ctxt, 'create_volume', **msg_args) def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False): cctxt = self._get_cctxt(volume.host, '2.0') @@ -190,10 +209,15 @@ class VolumeAPI(rpc.RPCAPI): image_meta=image_meta) def initialize_connection(self, ctxt, volume, connector): - cctxt = self._get_cctxt(volume['host'], '2.0') - return cctxt.call(ctxt, 'initialize_connection', - volume_id=volume['id'], - connector=connector) + version = self._compat_ver('2.3', '2.0') + msg_args = {'volume_id': volume.id, 'connector': connector, + 'volume': volume} + + if version == '2.0': + del msg_args['volume'] + + cctxt = self._get_cctxt(volume['host'], version=version) + return cctxt.call(ctxt, 'initialize_connection', **msg_args) def terminate_connection(self, ctxt, volume, connector, force=False): cctxt = self._get_cctxt(volume['host'], '2.0') @@ -319,3 +343,39 @@ class VolumeAPI(rpc.RPCAPI): return cctxt.call(ctxt, 'get_manageable_snapshots', marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) + + def create_group(self, ctxt, group, host): + cctxt = self._get_cctxt(host, '2.5') + cctxt.cast(ctxt, 'create_group', + group=group) + + def delete_group(self, ctxt, group): + cctxt = self._get_cctxt(group.host, '2.5') + cctxt.cast(ctxt, 'delete_group', + group=group) + + def update_group(self, ctxt, group, add_volumes=None, + remove_volumes=None): + cctxt = self._get_cctxt(group.host, '2.5') + cctxt.cast(ctxt, 'update_group', + group=group, + add_volumes=add_volumes, + remove_volumes=remove_volumes) + + def create_group_from_src(self, ctxt, group, group_snapshot=None, + source_group=None): + cctxt = self._get_cctxt(group.host, '2.6') + cctxt.cast(ctxt, 'create_group_from_src', + group=group, + group_snapshot=group_snapshot, + source_group=source_group) + + def create_group_snapshot(self, ctxt, group_snapshot): + cctxt = self._get_cctxt(group_snapshot.group.host, '2.6') + cctxt.cast(ctxt, 'create_group_snapshot', + group_snapshot=group_snapshot) + + def delete_group_snapshot(self, ctxt, group_snapshot): + cctxt = self._get_cctxt(group_snapshot.group.host, '2.6') + cctxt.cast(ctxt, 'delete_group_snapshot', + group_snapshot=group_snapshot) diff --git a/cinder/volume/targets/fake.py b/cinder/volume/targets/fake.py index 17883dd79..97841bbac 100644 --- a/cinder/volume/targets/fake.py +++ b/cinder/volume/targets/fake.py @@ -16,9 +16,6 @@ from cinder.volume.targets import iscsi class FakeTarget(iscsi.ISCSITarget): VERSION = '0.1' - def __init__(self, *args, **kwargs): - super(FakeTarget, self).__init__(*args, **kwargs) - def _get_target_and_lun(self, context, volume): return(0, 0) diff --git a/cinder/volume/targets/iscsi.py b/cinder/volume/targets/iscsi.py index c165746a1..47d38786b 100644 --- a/cinder/volume/targets/iscsi.py +++ b/cinder/volume/targets/iscsi.py @@ -358,9 +358,6 @@ class SanISCSITarget(ISCSITarget): and local block devices when we create and manage our own targets. """ - def __init__(self, *args, **kwargs): - super(SanISCSITarget, self).__init__(*args, **kwargs) - @abc.abstractmethod def create_export(self, context, volume, volume_path): pass diff --git a/cinder/volume/targets/tgt.py b/cinder/volume/targets/tgt.py index ee3ab9cc2..e7f7377a1 100644 --- a/cinder/volume/targets/tgt.py +++ b/cinder/volume/targets/tgt.py @@ -45,9 +45,6 @@ class TgtAdm(iscsi.ISCSITarget): """) - def __init__(self, *args, **kwargs): - super(TgtAdm, self).__init__(*args, **kwargs) - def _get_target(self, iqn): (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py index 53fd2bdb7..eb87e95ed 100644 --- a/cinder/volume/utils.py +++ b/cinder/volume/utils.py @@ -16,7 +16,9 @@ import ast +import functools import math +import operator import re import time import uuid @@ -42,6 +44,7 @@ from cinder import objects from cinder import rpc from cinder import utils from cinder.volume import throttling +from cinder.volume import volume_types CONF = cfg.CONF @@ -241,6 +244,37 @@ def notify_about_consistencygroup_usage(context, group, event_suffix, usage_info) +def _usage_from_group(group_ref, **kw): + usage_info = dict(tenant_id=group_ref.project_id, + user_id=group_ref.user_id, + availability_zone=group_ref.availability_zone, + group_id=group_ref.id, + group_type=group_ref.group_type_id, + name=group_ref.name, + created_at=group_ref.created_at.isoformat(), + status=group_ref.status) + + usage_info.update(kw) + return usage_info + + +def notify_about_group_usage(context, group, event_suffix, + extra_usage_info=None, host=None): + if not host: + host = CONF.host + + if not extra_usage_info: + extra_usage_info = {} + + usage_info = _usage_from_group(group, + **extra_usage_info) + + rpc.get_notifier("group", host).info( + context, + 'group.%s' % event_suffix, + usage_info) + + def _usage_from_cgsnapshot(cgsnapshot, **kw): usage_info = dict( tenant_id=cgsnapshot.project_id, @@ -255,6 +289,21 @@ def _usage_from_cgsnapshot(cgsnapshot, **kw): return usage_info +def _usage_from_group_snapshot(group_snapshot, **kw): + usage_info = dict( + tenant_id=group_snapshot.project_id, + user_id=group_snapshot.user_id, + group_snapshot_id=group_snapshot.id, + name=group_snapshot.name, + group_id=group_snapshot.group_id, + group_type=group_snapshot.group_type_id, + created_at=group_snapshot.created_at.isoformat(), + status=group_snapshot.status) + + usage_info.update(kw) + return usage_info + + def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix, extra_usage_info=None, host=None): if not host: @@ -272,6 +321,23 @@ def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix, usage_info) +def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix, + extra_usage_info=None, host=None): + if not host: + host = CONF.host + + if not extra_usage_info: + extra_usage_info = {} + + usage_info = _usage_from_group_snapshot(group_snapshot, + **extra_usage_info) + + rpc.get_notifier("group_snapshot", host).info( + context, + 'group_snapshot.%s' % event_suffix, + usage_info) + + def _calculate_count(size_in_m, blocksize): # Check if volume_dd_blocksize is valid @@ -316,36 +382,36 @@ def check_for_odirect_support(src, dest, flag='oflag=direct'): def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize, sync=False, execute=utils.execute, ionice=None, sparse=False): + cmd = prefix[:] + + if ionice: + cmd.extend(('ionice', ionice)) + + blocksize, count = _calculate_count(size_in_m, blocksize) + cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % count, 'bs=%s' % blocksize)) + # Use O_DIRECT to avoid thrashing the system buffer cache - extra_flags = [] + odirect = False if check_for_odirect_support(srcstr, deststr, 'iflag=direct'): - extra_flags.append('iflag=direct') + cmd.append('iflag=direct') + odirect = True if check_for_odirect_support(srcstr, deststr, 'oflag=direct'): - extra_flags.append('oflag=direct') + cmd.append('oflag=direct') + odirect = True # If the volume is being unprovisioned then # request the data is persisted before returning, # so that it's not discarded from the cache. conv = [] - if sync and not extra_flags: + if sync and not odirect: conv.append('fdatasync') if sparse: conv.append('sparse') if conv: conv_options = 'conv=' + ",".join(conv) - extra_flags.append(conv_options) - - blocksize, count = _calculate_count(size_in_m, blocksize) - - cmd = ['dd', 'if=%s' % srcstr, 'of=%s' % deststr, - 'count=%d' % count, 'bs=%s' % blocksize] - cmd.extend(extra_flags) - - if ionice is not None: - cmd = ['ionice', ionice] + cmd - - cmd = prefix + cmd + cmd.append(conv_options) # Perform the copy start_time = timeutils.utcnow() @@ -488,6 +554,11 @@ def clear_volume(volume_size, volume_path, volume_clear=None, LOG.info(_LI("Performing secure delete on volume: %s"), volume_path) + if volume_clear == 'shred': + LOG.warning(_LW("volume_clear=shred has been deprecated and will " + "be removed in the next release. Clearing with dd.")) + volume_clear = 'zero' + # We pass sparse=False explicitly here so that zero blocks are not # skipped in order to clear the volume. if volume_clear == 'zero': @@ -496,26 +567,11 @@ def clear_volume(volume_size, volume_path, volume_clear=None, sync=True, execute=utils.execute, ionice=volume_clear_ionice, throttle=throttle, sparse=False) - elif volume_clear == 'shred': - clear_cmd = ['shred', '-n3'] - if volume_clear_size: - clear_cmd.append('-s%dMiB' % volume_clear_size) else: raise exception.InvalidConfigurationValue( option='volume_clear', value=volume_clear) - clear_cmd.append(volume_path) - start_time = timeutils.utcnow() - utils.execute(*clear_cmd, run_as_root=True) - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - - # NOTE(jdg): use a default of 1, mostly for unit test, but in - # some incredible event this is 0 (cirros image?) don't barf - if duration < 1: - duration = 1 - LOG.info(_LI('Elapsed time for clear volume: %.2f sec'), duration) - def supports_thin_provisioning(): return brick_lvm.LVM.supports_thin_provisioning( @@ -664,28 +720,85 @@ def read_proc_mounts(): return mounts.readlines() -def _extract_id(vol_name): +def extract_id_from_volume_name(vol_name): regex = re.compile( CONF.volume_name_template.replace('%s', '(?P.+)')) match = regex.match(vol_name) return match.group('uuid') if match else None -def check_already_managed_volume(vol_name): +def check_already_managed_volume(vol_id): """Check cinder db for already managed volume. - :param vol_name: volume name parameter + :param vol_id: volume id parameter :returns: bool -- return True, if db entry with specified - volume name exist, otherwise return False + volume id exists, otherwise return False """ - vol_id = _extract_id(vol_name) try: - return (vol_id and uuid.UUID(vol_id, version=4) and + return (vol_id and isinstance(vol_id, six.string_types) and + uuid.UUID(vol_id, version=4) and objects.Volume.exists(context.get_admin_context(), vol_id)) except ValueError: return False +def extract_id_from_snapshot_name(snap_name): + """Return a snapshot's ID from its name on the backend.""" + regex = re.compile( + CONF.snapshot_name_template.replace('%s', '(?P.+)')) + match = regex.match(snap_name) + return match.group('uuid') if match else None + + +def paginate_entries_list(entries, marker, limit, offset, sort_keys, + sort_dirs): + """Paginate a list of entries. + + :param entries: list of dictionaries + :marker: The last element previously returned + :limit: The maximum number of items to return + :offset: The number of items to skip from the marker or from the first + element. + :sort_keys: A list of keys in the dictionaries to sort by + :sort_dirs: A list of sort directions, where each is either 'asc' or 'dec' + """ + comparers = [(operator.itemgetter(key.strip()), multiplier) + for (key, multiplier) in zip(sort_keys, sort_dirs)] + + def comparer(left, right): + for fn, d in comparers: + left_val = fn(left) + right_val = fn(right) + if isinstance(left_val, dict): + left_val = sorted(left_val.values())[0] + if isinstance(right_val, dict): + right_val = sorted(right_val.values())[0] + if left_val == right_val: + continue + if d == 'asc': + return -1 if left_val < right_val else 1 + else: + return -1 if left_val > right_val else 1 + else: + return 0 + sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer)) + + start_index = 0 + if offset is None: + offset = 0 + if marker: + start_index = -1 + for i, entry in enumerate(sorted_entries): + if entry['reference'] == marker: + start_index = i + 1 + break + if start_index < 0: + msg = _('marker not found: %s') % marker + raise exception.InvalidInput(reason=msg) + range_end = start_index + limit + return sorted_entries[start_index + offset:range_end + offset] + + def convert_config_string_to_dict(config_string): """Convert config file replication string to a dict. @@ -708,3 +821,19 @@ def convert_config_string_to_dict(config_string): {'config_string': config_string}) return resultant_dict + + +def create_encryption_key(context, key_manager, volume_type_id): + encryption_key_id = None + if volume_types.is_encrypted(context, volume_type_id): + volume_type_encryption = ( + volume_types.get_volume_type_encryption(context, + volume_type_id)) + cipher = volume_type_encryption.cipher + length = volume_type_encryption.key_size + algorithm = cipher.split('-')[0] if cipher else None + encryption_key_id = key_manager.create_key( + context, + algorithm=algorithm, + length=length) + return encryption_key_id diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py index cb68c7797..5657aef8f 100644 --- a/cinder/volume/volume_types.py +++ b/cinder/volume/volume_types.py @@ -33,6 +33,8 @@ from cinder import quota CONF = cfg.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS +ENCRYPTION_IGNORED_FIELDS = ['volume_type_id', 'created_at', 'updated_at', + 'deleted_at'] def create(context, @@ -90,9 +92,8 @@ def destroy(context, id): if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) - else: - elevated = context if context.is_admin else context.elevated() - db.volume_type_destroy(elevated, id) + elevated = context if context.is_admin else context.elevated() + return db.volume_type_destroy(elevated, id) def get_all_types(context, inactive=0, filters=None, marker=None, @@ -111,6 +112,12 @@ def get_all_types(context, inactive=0, filters=None, marker=None, return vol_types +def get_all_types_by_group(context, group_id): + """Get all volume_types in a group.""" + vol_types = db.volume_type_get_all_by_group(context, group_id) + return vol_types + + def get_volume_type(ctxt, id, expected_fields=None): """Retrieves single volume type by id.""" if id is None: @@ -209,6 +216,7 @@ def get_volume_type_encryption(context, volume_type_id): def get_volume_type_qos_specs(volume_type_id): + """Get all qos specs for given volume type.""" ctxt = context.get_admin_context() res = db.volume_type_qos_specs_get(ctxt, volume_type_id) @@ -251,8 +259,7 @@ def volume_types_diff(context, vol_type_id1, vol_type_id2): def _fix_encryption_specs(encryption): if encryption: encryption = dict(encryption) - for param in ['volume_type_id', 'created_at', 'updated_at', - 'deleted_at']: + for param in ENCRYPTION_IGNORED_FIELDS: encryption.pop(param, None) return encryption @@ -307,3 +314,19 @@ def volume_types_diff(context, vol_type_id1, vol_type_id2): all_equal = False return (diff, all_equal) + + +def volume_types_encryption_changed(context, vol_type_id1, vol_type_id2): + """Return whether encryptions of two volume types are same.""" + def _get_encryption(enc): + enc = dict(enc) + for param in ENCRYPTION_IGNORED_FIELDS: + enc.pop(param, None) + return enc + + enc1 = get_volume_type_encryption(context, vol_type_id1) + enc2 = get_volume_type_encryption(context, vol_type_id2) + + enc1_filtered = _get_encryption(enc1) if enc1 else None + enc2_filtered = _get_encryption(enc2) if enc2 else None + return enc1_filtered != enc2_filtered diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py index fb1bf8073..64bb4f249 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py @@ -76,6 +76,9 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): VERSION = "1.5" + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Brocade_OpenStack_CI" + def __init__(self, **kwargs): super(BrcdFCZoneDriver, self).__init__(**kwargs) self.sb_conn_map = {} diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py index 289fd7f0e..a52fd5a60 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py @@ -71,6 +71,9 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): VERSION = "1.1.0" + # ThirdPartySystems wiki name + CI_WIKI_NAME = "Cisco_ZM_CI" + def __init__(self, **kwargs): super(CiscoFCZoneDriver, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) diff --git a/debian/changelog b/debian/changelog index 7075e241b..16485871a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,18 @@ +cinder (2:9.0.0~b3-1) experimental; urgency=medium + + * New upstream release. + * Fixed (build-)depends for this release. + * Fixed namespace list when generating cinder.conf. + * Using OpenStack's Gerrit as VCS URLs. + * Rebased nozfs patches. + * Added PYTHONPATH=. when building sphinx doc. + * Black list 3 unit tests that non-deterministically fail: + - test_volume.VolumeMigrationTestCase.test_retype_volume_migration_failed + - test_volume.VolumeMigrationTestCase.test_retype_volume_migration_bad_policy + - backup.test_backup.BackupTestCase.test_create_backup_with_temp_snapshot + + -- Thomas Goirand Wed, 14 Sep 2016 21:17:08 +0200 + cinder (2:9.0.0~b2-1) experimental; urgency=medium * New upstream release. diff --git a/debian/control b/debian/control index baf09382f..40104fe9a 100644 --- a/debian/control +++ b/debian/control @@ -17,21 +17,23 @@ Build-Depends-Indep: bandit (>= 0.13.2), python-anyjson, python-babel (>= 2.3.4), python-barbicanclient (>= 4.0.0), + python-castellan (>= 0.4.0), python-coverage, python-crypto, - python-decorator (>= 3.4.0), python-ddt (>= 1.0.1), + python-decorator (>= 3.4.0), python-enum34, python-eventlet (>= 0.18.4), python-fixtures (>= 3.0.0), - python-glanceclient (>= 1:2.0.0), + python-glanceclient (>= 1:2.3.0), python-googleapi (>= 1.4.2), python-greenlet, python-hacking (>= 0.10.0), python-httplib2, python-ipaddress (>= 1.0.7), python-iso8601 (>= 0.1.11), - python-keystoneclient (>= 1:1.7.0), + python-keystoneauth1 (>= 2.10.0), + python-keystoneclient (>= 1:2.0.0), python-keystonemiddleware (>= 4.0.0), python-lxml, python-migrate (>= 0.9.6), @@ -44,24 +46,25 @@ Build-Depends-Indep: bandit (>= 0.13.2), python-os-testr (>= 0.7.0), python-os-win (>= 0.2.3), python-oslo.concurrency (>= 3.8.0), - python-oslo.config (>= 1:3.12.0), - python-oslo.context (>= 2.4.0), - python-oslo.db (>= 4.1.0), + python-oslo.config (>= 1:3.14.0), + python-oslo.context (>= 2.9.0), + python-oslo.db (>= 4.10.0), python-oslo.i18n (>= 2.1.0), python-oslo.log (>= 2.0.0), python-oslo.messaging (>= 5.2.0), python-oslo.middleware (>= 3.0.0), + python-oslo.privsep (>= 1.9.0), python-oslo.policy (>= 1.9.0), python-oslo.reports (>= 1.0.0), - python-oslo.rootwrap (>= 2.0.0), + python-oslo.rootwrap (>= 5.0.0), python-oslo.serialization (>= 2.0.0), python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.15.0), - python-oslo.versionedobjects (>= 1.9.1), - python-oslo.vmware (>= 1.16.0), + python-oslo.utils (>= 3.16.0), + python-oslo.versionedobjects (>= 1.13.0), + python-oslo.vmware (>= 2.11.0), python-oslosphinx (>= 2.5.0), python-oslotest (>= 1.10.0), - python-osprofiler (>= 1.3.0), + python-osprofiler (>= 1.4.0), python-paramiko (>= 2.0), python-paste, python-pastedeploy, @@ -77,7 +80,7 @@ Build-Depends-Indep: bandit (>= 0.13.2), python-simplejson, python-six (>= 1.9.0), python-sqlalchemy (>= 1.0.10), - python-stevedore (>= 1.10.0), + python-stevedore (>= 1.16.0), python-swiftclient (>= 1:2.2.0), python-taskflow (>= 1.26.0), python-tempest-lib (>= 0.14.0), @@ -90,8 +93,8 @@ Build-Depends-Indep: bandit (>= 0.13.2), subunit, testrepository, Standards-Version: 3.9.8 -Vcs-Browser: https://anonscm.debian.org/cgit/openstack/cinder.git/ -Vcs-Git: https://anonscm.debian.org/git/openstack/cinder.git +Vcs-Browser: https://git.openstack.org/cgit/openstack/deb-cinder +Vcs-Git: https://git.openstack.org/openstack/deb-cinder Homepage: https://github.com/openstack/cinder Package: python-cinder @@ -99,18 +102,20 @@ Section: python Architecture: all Depends: python-babel (>= 2.3.4), python-barbicanclient (>= 4.0.0), + python-castellan (>= 0.4.0), python-ceph, python-crypto, python-decorator (>= 3.4.0), python-eventlet (>= 0.18.4), - python-glanceclient (>= 1:2.0.0), + python-glanceclient (>= 1:2.3.0), python-googleapi (>= 1.4.2), python-greenlet, - python-httplib2, python-hp3parclient, + python-httplib2, python-ipaddress (>= 1.0.7), python-iso8601 (>= 0.1.11), - python-keystoneclient (>= 1:1.7.0), + python-keystoneauth1 (>= 2.10.0), + python-keystoneclient (>= 1:2.0.0), python-keystonemiddleware (>= 4.0.0), python-lxml, python-migrate (>= 0.9.6), @@ -120,22 +125,23 @@ Depends: python-babel (>= 2.3.4), python-os-brick (>= 1.3.0), python-os-win (>= 0.2.3), python-oslo.concurrency (>= 3.8.0), - python-oslo.config (>= 1:3.12.0), - python-oslo.context (>= 2.4.0), - python-oslo.db (>= 4.1.0), + python-oslo.config (>= 1:3.14.0), + python-oslo.context (>= 2.9.0), + python-oslo.db (>= 4.10.0), python-oslo.i18n (>= 2.1.0), python-oslo.log (>= 2.0.0), python-oslo.messaging (>= 5.2.0), python-oslo.middleware (>= 3.0.0), python-oslo.policy (>= 1.9.0), + python-oslo.privsep (>= 1.9.0), python-oslo.reports (>= 1.0.0), - python-oslo.rootwrap (>= 2.0.0), + python-oslo.rootwrap (>= 5.0.0), python-oslo.serialization (>= 2.0.0), python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.15.0), - python-oslo.versionedobjects (>= 1.9.1), - python-oslo.vmware (>= 1.16.0), - python-osprofiler (>= 1.3.0), + python-oslo.utils (>= 3.16.0), + python-oslo.versionedobjects (>= 1.13.0), + python-oslo.vmware (>= 2.11.0), + python-osprofiler (>= 1.4.0), python-paramiko (>= 2.0), python-paste, python-pastedeploy, @@ -151,7 +157,7 @@ Depends: python-babel (>= 2.3.4), python-simplejson, python-six (>= 1.9.0), python-sqlalchemy (>= 1.0.10), - python-stevedore (>= 1.10.0), + python-stevedore (>= 1.16.0), python-swiftclient (>= 1:2.2.0), python-taskflow (>= 1.26.0), python-tooz (>= 1.28.0), diff --git a/debian/patches/no-zfssa-tests.patch b/debian/patches/no-zfssa-tests.patch index e8003777c..9cf7f8c8f 100644 --- a/debian/patches/no-zfssa-tests.patch +++ b/debian/patches/no-zfssa-tests.patch @@ -3,8 +3,8 @@ Author: Thomas Goirand Forwarded: not-needed Last-Update: 2015-01-22 ---- a/cinder/tests/unit/test_zfssa.py 2016-03-04 03:58:20.261889747 +0000 -+++ /dev/null 2015-11-25 10:13:06.495404578 +0000 +--- a/cinder/tests/unit/volume/drivers/test_zfssa.py 2016-09-14 21:16:41.075013368 +0200 ++++ /dev/null 2016-09-01 16:08:01.266394396 +0200 @@ -1,1718 +0,0 @@ -# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. -# diff --git a/debian/patches/remove-zfssa-from-opts.py.patch b/debian/patches/remove-zfssa-from-opts.py.patch index 165817f0c..d96510537 100644 --- a/debian/patches/remove-zfssa-from-opts.py.patch +++ b/debian/patches/remove-zfssa-from-opts.py.patch @@ -1,12 +1,11 @@ -zigo@GPLHost:xen018401>_ ~/sources/openstack/newton/cinder/cinder cinder (debian/newton)$ cat ../remove-zfssa-from-opts.py.patch Description: Remove zfssa from opts.py Author: Thomas Goirand Forwarded: no -Last-Update: 2016-07-15 +Last-Update: 2016-09-14 ---- cinder-9.0.0~b2.orig/cinder/opts.py -+++ cinder-9.0.0~b2/cinder/opts.py -@@ -169,10 +169,6 @@ from cinder.volume.drivers.windows impor +--- cinder-9.0.0~b3.orig/cinder/opts.py ++++ cinder-9.0.0~b3/cinder/opts.py +@@ -171,10 +171,6 @@ from cinder.volume.drivers.windows impor cinder_volume_drivers_windows_windows from cinder.volume.drivers import xio as cinder_volume_drivers_xio from cinder.volume.drivers import zadara as cinder_volume_drivers_zadara @@ -17,15 +16,15 @@ Last-Update: 2016-07-15 from cinder.volume.drivers.zte import zte_ks as cinder_volume_drivers_zte_zteks from cinder.volume import manager as cinder_volume_manager from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver -@@ -281,7 +277,6 @@ def list_opts(): - cinder_volume_drivers_xio.XIO_OPTS, +@@ -282,7 +278,6 @@ def list_opts(): cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. storwize_svc_fc_opts, + cinder_volume_drivers_falconstor_fsscommon.FSS_OPTS, - cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS, cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, cinder_api_views_versions.versions_opts, -@@ -325,7 +320,6 @@ def list_opts(): +@@ -326,7 +321,6 @@ def list_opts(): cinder_volume_drivers_blockbridge.blockbridge_opts, [cinder_scheduler_scheduleroptions. scheduler_json_config_location_opt], diff --git a/debian/rules b/debian/rules index 6d7a12e03..e475ffa5b 100755 --- a/debian/rules +++ b/debian/rules @@ -14,7 +14,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ TEMP_REZ=`mktemp -t` ; \ - PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'cinder\.tests\.unit\.(?!(.*test_volume\.VolumeTestCase\.test_create_delete_volume.*|.*test_volume\.ConsistencyGroupTestCase\.test_create_delete_cgsnapshot.*|.*test_volume\.VolumeMigrationTestCase\.test_retype_volume_driver_success.*))' | tee $$TEMP_REZ | subunit2pyunit ; \ + PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'cinder\.tests\.unit\.(?!(.*test_volume\.VolumeTestCase\.test_create_delete_volume.*|.*test_volume\.ConsistencyGroupTestCase\.test_create_delete_cgsnapshot.*|.*test_volume\.VolumeMigrationTestCase\.test_retype_volume_driver_success.*|.*test_volume\.VolumeMigrationTestCase\.test_retype_volume_migration_failed.*|.*backup\.test_backup\.BackupTestCase\.test_create_backup_with_temp_snapshot.*|.*test_volume\.VolumeMigrationTestCase\.test_retype_volume_migration_bad_policy.*))' | tee $$TEMP_REZ | subunit2pyunit ; \ cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ @@ -39,6 +39,7 @@ override_dh_install: PYTHONPATH=$(CURDIR)/debian/tmp/usr/lib/python2.7/dist-packages oslo-config-generator \ --output-file $(CURDIR)/debian/cinder-common/usr/share/cinder-common/cinder.conf \ --wrap-width 80 \ + --namespace castellan.config \ --namespace cinder \ --namespace keystonemiddleware.auth_token \ --namespace oslo.config \ @@ -103,7 +104,7 @@ override_dh_auto_build: override_dh_sphinxdoc: ifeq (,$(findstring nodocs, $(DEB_BUILD_OPTIONS))) - python setup.py build_sphinx + PYTHONPATH=. python setup.py build_sphinx cp -rf doc/build/html $(CURDIR)/debian/cinder-doc/usr/share/doc/cinder-doc dh_sphinxdoc -O--buildsystem=python_distutils endif diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index ba789b5df..000000000 --- a/doc/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXSOURCE = source -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) - -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest - -.DEFAULT_GOAL = html - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -rm -rf cinder.sqlite - if [ -f .autogenerated ] ; then \ - cat .autogenerated | xargs rm ; \ - rm .autogenerated ; \ - fi - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cinder.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cinder.qhc" - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/README.rst b/doc/README.rst index 02fc8eb54..a2d171d1a 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -1,66 +1,33 @@ -================= -Building the docs -================= +======================= +Cinder Development Docs +======================= -Dependencies -============ +Files under this directory tree are used for generating the documentation +for the Cinder source code. -Sphinx_ - You'll need sphinx (the python one) and if you are - using the virtualenv you'll need to install it in the virtualenv - specifically so that it can load the cinder modules. +Developer documentation is built to: +http://docs.openstack.org/developer/cinder/ - :: +Tools +===== - pip install Sphinx +Sphinx + The Python Sphinx package is used to generate the documentation output. + Information on Sphinx, including formatting information for RST source + files, can be found in the `Sphinx online documentation + `_. -Graphviz_ +Graphviz Some of the diagrams are generated using the ``dot`` language - from Graphviz. - - :: - - sudo apt-get install graphviz - -.. _Sphinx: http://sphinx.pocoo.org - -.. _Graphviz: http://www.graphviz.org/ + from Graphviz. See the `Graphviz documentation `_ + for Graphviz and dot language usage information. -Use `make` -========== +Building Documentation +====================== -Just type make:: - - % make - -Look in the Makefile for more targets. - - -Manually -======== - - 1. Generate the code.rst file so that Sphinx will pull in our docstrings:: - - % ./generate_autodoc_index.sh > source/code.rst - - 2. Run `sphinx_build`:: - - % sphinx-build -b html source build/html - - -Use `tox` -========= - -The easiest way to build the docs and avoid dealing with all -dependencies is to let tox prepare a virtualenv and run the -build_sphinx target inside the virtualenv:: +Doc builds are performed using tox with the ``docs`` target:: % cd .. % tox -e docs - -The docs have been built -======================== - -Check out the `build` directory to find them. Yay! diff --git a/doc/ext/cinder_driverlist.py b/doc/ext/cinder_driverlist.py new file mode 100644 index 000000000..5a7a61926 --- /dev/null +++ b/doc/ext/cinder_driverlist.py @@ -0,0 +1,24 @@ +# Copyright 2016 Dell Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cinder import utils + + +def setup(app): + print('** Generating driver list...') + rv = utils.execute('./tools/generate_driver_list.py', ['docs']) + print(rv[0]) + diff --git a/doc/ext/cinder_todo.py b/doc/ext/cinder_todo.py deleted file mode 100644 index bdd64532a..000000000 --- a/doc/ext/cinder_todo.py +++ /dev/null @@ -1,103 +0,0 @@ -# This is a hack of the builtin todo extension, to make the todo_list -# more user friendly - -from sphinx.ext.todo import * -import re - - -def _(s): - return s - - -def process_todo_nodes(app, doctree, fromdocname): - if not app.config['todo_include_todos']: - for node in doctree.traverse(todo_node): - node.parent.remove(node) - - # Replace all todolist nodes with a list of the collected todos. - # Augment each todo with a backlink to the original location. - env = app.builder.env - - if not hasattr(env, 'todo_all_todos'): - env.todo_all_todos = [] - - # remove the item that was added in the constructor, since I'm tired of - # reading through docutils for the proper way to construct an empty list - lists = [] - for i in range(5): - lists.append(nodes.bullet_list("", nodes.Text('', ''))) - lists[i].remove(lists[i][0]) - lists[i]['classes'].append('todo_list') - - for node in doctree.traverse(todolist): - if not app.config['todo_include_todos']: - node.replace_self([]) - continue - - for todo_info in env.todo_all_todos: - para = nodes.paragraph() - filename = env.doc2path(todo_info['docname'], base=None) - - # Create a reference - newnode = nodes.reference('', '') - - line_info = todo_info['lineno'] - link = _('%(filename)s, line %(line_info)d') % locals() - innernode = nodes.emphasis(link, link) - newnode['refdocname'] = todo_info['docname'] - - try: - newnode['refuri'] = app.builder.get_relative_uri( - fromdocname, todo_info['docname']) - newnode['refuri'] += '#' + todo_info['target']['refid'] - except NoUri: - # ignore if no URI can be determined, e.g. for LaTeX output - pass - - newnode.append(innernode) - para += newnode - para['classes'].append('todo_link') - - todo_entry = todo_info['todo'] - - env.resolve_references(todo_entry, todo_info['docname'], - app.builder) - - item = nodes.list_item('', para) - todo_entry[1]['classes'].append('details') - - comment = todo_entry[1] - - m = re.match(r"^P(\d)", comment.astext()) - priority = 5 - if m: - priority = int(m.group(1)) - if priority < 0: - priority = 1 - if priority > 5: - priority = 5 - - item['classes'].append('todo_p' + str(priority)) - todo_entry['classes'].append('todo_p' + str(priority)) - - item.append(comment) - - lists[priority - 1].insert(0, item) - - node.replace_self(lists) - - -def setup(app): - app.add_config_value('todo_include_todos', False, False) - - app.add_node(todolist) - app.add_node(todo_node, - html=(visit_todo_node, depart_todo_node), - latex=(visit_todo_node, depart_todo_node), - text=(visit_todo_node, depart_todo_node)) - - app.add_directive('todo', Todo) - app.add_directive('todolist', TodoList) - app.connect('doctree-read', process_todos) - app.connect('doctree-resolved', process_todo_nodes) - app.connect('env-purge-doc', purge_todos) diff --git a/doc/source/conf.py b/doc/source/conf.py index f70900170..f47fc2fee 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,3 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + # cinder documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # @@ -15,6 +27,16 @@ import subprocess import sys import warnings +from cinder import objects + + +# NOTE(geguileo): Sphinx will fail to generate the documentation if we are +# using decorators from any OVO in cinder.objects, because the OVOs are only +# added to the cinder.objects namespace when the CLI programs are run. So we +# need to run it here as well to avoid failures like: +# AttributeError: 'module' object has no attribute 'Volume' +objects.register_all() + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -29,16 +51,17 @@ sys.path.insert(0, os.path.abspath('./')) # or your custom ones. extensions = ['sphinx.ext.autodoc', - 'ext.cinder_todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', 'stevedore.sphinxext', 'oslo_config.sphinxconfiggen', + 'ext.cinder_driverlist', ] -config_generator_config_file = '../../cinder/config/cinder-config-generator.conf' +config_generator_config_file = ( + '../../cinder/config/cinder-config-generator.conf') sample_config_basename = '_static/cinder' # autodoc generation is a bit aggressive and a nuisance @@ -63,7 +86,7 @@ else: source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' @@ -84,13 +107,13 @@ version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ @@ -104,10 +127,10 @@ exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). @@ -143,26 +166,26 @@ man_pages = [ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -171,9 +194,9 @@ html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", - "-n1"] + "-n1"] try: html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] @@ -183,34 +206,34 @@ except Exception: # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'cinderdoc' @@ -219,10 +242,10 @@ htmlhelp_basename = 'cinderdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass @@ -234,17 +257,17 @@ latex_documents = [ # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True diff --git a/doc/source/devref/api.apache.rst b/doc/source/devref/api.apache.rst index e83bae671..57145f4dc 100644 --- a/doc/source/devref/api.apache.rst +++ b/doc/source/devref/api.apache.rst @@ -34,4 +34,5 @@ Cinder's primary configuration file (etc/cinder.conf) and the PasteDeploy config Access Control -------------- -If you are running with Linux kernel security module enabled (for example SELinux or AppArmor), make sure that the configuration file has the appropriate context to access the linked file. \ No newline at end of file +If you are running with Linux kernel security module enabled (for example SELinux or AppArmor), make sure that the configuration file has the appropriate context to access the linked file. + diff --git a/doc/source/devref/api_microversion_dev.rst b/doc/source/devref/api_microversion_dev.rst index 9f8956829..750878409 100644 --- a/doc/source/devref/api_microversion_dev.rst +++ b/doc/source/devref/api_microversion_dev.rst @@ -186,7 +186,7 @@ In the controller class:: def my_api_method(self, req, id): .... method_1 ... - @wsgi.Controller.api_version("3.4") # noqa + @my_api_method.api_version("3.4") def my_api_method(self, req, id): .... method_2 ... @@ -194,10 +194,15 @@ If a caller specified ``3.1``, ``3.2`` or ``3.3`` (or received the default of ``3.1``) they would see the result from ``method_1``, ``3.4`` or later ``method_2``. -It is vital that the two methods have the same name, so the second of -them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The -two methods may be different in any kind of semantics (schema -validation, return values, response codes, etc) +We could use ``wsgi.Controller.api_version`` decorator on the second +``my_api_method`` as well, but then we would have to add ``# no qa`` to that +line to avoid failing flake8's ``F811`` rule. So the recommended approach is +to use the ``api_version`` decorator from the first method that is defined, as +illustrated by the example above, and then use ``my_api_method`` decorator for +subsequent api versions of the same method. + +The two methods may be different in any kind of semantics (schema validation, +return values, response codes, etc.). A method with only small changes between versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -301,7 +306,7 @@ method test, you just need to add the ``OpenStack-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') - req.headers = {'OpenStack-API-Version': 'volume 3.2'} + req.headers['OpenStack-API-Version'] = 'volume 3.6' req.api_version_request = api_version.APIVersionRequest('3.6') controller = controller.TestableController() diff --git a/doc/source/devref/attach_detach_conventions.rst b/doc/source/devref/attach_detach_conventions.rst index b894c9a9f..480d12b60 100644 --- a/doc/source/devref/attach_detach_conventions.rst +++ b/doc/source/devref/attach_detach_conventions.rst @@ -100,7 +100,7 @@ We call this infor the model_update and it's used to update vital target information associated with the volume in the Cinder database. driver.initialize_connection -*************************** +**************************** Now that we've actually built a target and persisted the important bits of information associated with it, we're ready to actually assign diff --git a/doc/source/devref/auth.rst b/doc/source/devref/auth.rst index 99993ecf8..166a8bfd0 100644 --- a/doc/source/devref/auth.rst +++ b/doc/source/devref/auth.rst @@ -96,9 +96,9 @@ Relationship of US eAuth to RBAC Typical implementations of US eAuth authentication systems are structured as follows:: [ MS Active Directory or other federated LDAP user store ] - --> backends to… + --> backends to… [ SUN Identity Manager or other SAML Policy Controller ] - --> maps URLs to groups… + --> maps URLs to groups… [ Apache Policy Agent in front of eAuth-secured Web Application ] In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. diff --git a/doc/source/devref/cinder.rst b/doc/source/devref/cinder.rst index c820b4f69..fe791bbf0 100644 --- a/doc/source/devref/cinder.rst +++ b/doc/source/devref/cinder.rst @@ -1,6 +1,6 @@ .. Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. + Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -69,7 +69,7 @@ The :mod:`cinder.test` Module :noindex: :members: :undoc-members: - :show-inheritance: + :show-inheritance: The :mod:`cinder.utils` Module diff --git a/doc/source/devref/drivers.rst b/doc/source/devref/drivers.rst index ddd9f08ce..d4698cbce 100644 --- a/doc/source/devref/drivers.rst +++ b/doc/source/devref/drivers.rst @@ -21,6 +21,20 @@ Cinder exposes an API to users to interact with different storage backend solutions. The following are standards across all drivers for Cinder services to properly interact with a driver. +Basic attributes +---------------- + +There are some basic attributes that all drivers classes should have: + +* VERSION: Driver version in string format. No naming convention is imposed, + although semantic versioning is recommended. +* CI_WIKI_NAME: Must be the exact name of the `ThirdPartySystems wiki page + `_. This is used by our + tooling system to associate jobs to drivers and track their CI reporting + status correctly. + +The tooling system will also use the name and docstring of the driver class. + Minimum Features ---------------- @@ -50,7 +64,6 @@ provided by a driver. * driver_version * free_capacity_gb -* reserved_percentage * storage_protocol * total_capacity_gb * vendor_name @@ -58,8 +71,9 @@ provided by a driver. **NOTE:** If the driver is unable to provide a value for free_capacity_gb or total_capacity_gb, keywords can be provided instead. Please use 'unknown' if -the array cannot report the value or 'infinite' if the array has no upper -limit. +the backend cannot report the value or 'infinite' if the backend has no upper +limit. But, it is recommended to report real values as the Cinder scheduler +assigns lowest weight to any storage backend reporting 'unknown' or 'infinite'. Feature Enforcement ------------------- @@ -82,3 +96,63 @@ tests. The details for the required volume driver interfaces can be found in the ``cinder/interface/volume_*_driver.py`` source. +Driver Development Documentations +--------------------------------- + +The LVM driver is our reference for all new driver implementations. The +information below can provide additional documentation for the methods that +volume drivers need to implement. + +Base Driver Interface +````````````````````` +The methods documented below are the minimum required interface for a volume +driver to support. All methods from this interface must be implemented +in order to be an official Cinder volume driver. + +.. automodule:: cinder.interface.volume_driver + :members: + + +Snapshot Interface +`````````````````` +Another required interface for a volume driver to be fully compatible is the +ability to create and manage snapshots. Due to legacy constraints, this +interface is not included in the base driver interface above. + +Work is being done to address those legacy issues. Once that is complete, this +interface will be merged with the base driver interface. + +.. automodule:: cinder.interface.volume_snapshot_driver + :members: + + +Manage/Unmanage Support +``````````````````````` +An optional feature a volume backend can support is the ability to manage +existing volumes or unmanage volumes - keep the volume on the storage backend +but no longer manage it through Cinder. + +To support this functionality, volume drivers must implement these methods: + +.. automodule:: cinder.interface.volume_management_driver + :members: + + +Manage/Unmanage Snapshot Support +```````````````````````````````` +In addition to the ability to manage and unmanage volumes, Cinder backend +drivers may also support managing and unmanaging volume snapshots. These +additional methods must be implemented to support these operations. + +.. automodule:: cinder.interface.volume_snapshotmanagement_driver + :members: + +Volume Consistency Groups +````````````````````````` +Some storage backends support the ability to group volumes and create write +consistent snapshots across the group. In order to support these operations, +the following interface must be implemented by the driver. + +.. automodule:: cinder.interface.volume_consistencygroup_driver + :members: + diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst index b225ff2ac..741bc7b03 100644 --- a/doc/source/devref/fakes.rst +++ b/doc/source/devref/fakes.rst @@ -1,6 +1,6 @@ .. Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. + Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/doc/source/devref/i18n.rst b/doc/source/devref/i18n.rst index 68416e182..a829fe2ad 100644 --- a/doc/source/devref/i18n.rst +++ b/doc/source/devref/i18n.rst @@ -1,23 +1,37 @@ Internationalization ==================== -cinder uses `gettext `_ so that + +For internationalization guidelines, see the +`oslo.i18n documention `_. +The information below can be used to get started. + +Cinder uses `gettext `_ so that user-facing strings such as log messages appear in the appropriate language in different locales. To use gettext, make sure that the strings passed to the logger are wrapped -in a ``_()`` function call. For example:: +in a ``_Lx()`` function call. For example:: - LOG.info(_("block_device_mapping %s") % block_device_mapping) + LOG.info(_LI("block_device_mapping %s"), block_device_mapping) + +There are a few different _() translation markers, depending on the logging +level of the text: + +- _LI() - Used for INFO level log messages +- _LW() - Used for WARNING level log messages +- _LE() - Used for ERROR level log messages (this includes LOG.exception) +- _() - Used for any exception messages, including strings used for both + logging and exceptions. Do not use ``locals()`` for formatting messages because: + 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. -If you do not follow the project conventions, your code may cause the -LocalizationTestCase.test_multiple_positional_format_placeholders test to fail -in cinder/tests/test_localization.py. +If you do not follow the project conventions, your code may cause pep8 hacking +check failures. For translation to work properly, the top level scripts for Cinder need to first do the following before any Cinder modules are imported:: @@ -25,6 +39,9 @@ to first do the following before any Cinder modules are imported:: from cinder import i18n i18n.enable_lazy() +Note: this should _only_ be called from top level scripts - no library code +or common modules should call this method. + Any files that use the _() for translation then must have the following lines:: @@ -34,3 +51,4 @@ If the above code is missing, it may result in an error that looks like:: NameError: name '_' is not defined + diff --git a/doc/source/devref/migration.rst b/doc/source/devref/migration.rst index 52dfb8c23..b295c5c11 100644 --- a/doc/source/devref/migration.rst +++ b/doc/source/devref/migration.rst @@ -54,7 +54,7 @@ the CLI:: cinder migrate [--force-host-copy []] [--lock-volume []] - + Mandatory arguments: ID of volume to migrate. Destination host. The format of host is @@ -65,7 +65,7 @@ the CLI:: back-end. If the back-end does not have specified pools, 'POOL' needs to be set with the same name as 'backend'. - + Optional arguments: --force-host-copy [] Enables or disables generic host-based force- diff --git a/doc/source/devref/threading.rst b/doc/source/devref/threading.rst index c48d353e0..9476c3545 100644 --- a/doc/source/devref/threading.rst +++ b/doc/source/devref/threading.rst @@ -28,9 +28,9 @@ in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: - from eventlet import greenthread - ... - greenthread.sleep(0) + from eventlet import greenthread + ... + greenthread.sleep(0) MySQL access and eventlet diff --git a/doc/source/drivers.rst b/doc/source/drivers.rst deleted file mode 100644 index 44ee4c2ab..000000000 --- a/doc/source/drivers.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================== - Available Drivers -=================== - -.. list-plugins:: oslo_messaging.notify.drivers - :detailed: diff --git a/doc/source/index.rst b/doc/source/index.rst index 0fee7585e..190b802f3 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -44,9 +44,20 @@ Developer Docs database_architecture scheduler-filters scheduler-weights - drivers oslo-middleware +Drivers +======= + +Cinder maintains drivers for volume backends, backup targets, and fibre +channel zone manager fabric types. The list of the available drivers can be +found here: + +.. toctree:: + :maxdepth: 1 + + drivers + API Extensions ============== @@ -60,11 +71,6 @@ Sample Configuration File sample_config -Outstanding Documentation Tasks -=============================== - -.. todolist:: - Indices and tables ================== diff --git a/etc/cinder/policy.json b/etc/cinder/policy.json index 02440a625..881837205 100644 --- a/etc/cinder/policy.json +++ b/etc/cinder/policy.json @@ -10,6 +10,7 @@ "volume:get": "rule:admin_or_owner", "volume:get_all": "rule:admin_or_owner", "volume:get_volume_metadata": "rule:admin_or_owner", + "volume:create_volume_metadata": "rule:admin_or_owner", "volume:delete_volume_metadata": "rule:admin_or_owner", "volume:update_volume_metadata": "rule:admin_or_owner", "volume:get_volume_admin_metadata": "rule:admin_api", @@ -91,6 +92,7 @@ "backup:restore": "rule:admin_or_owner", "backup:backup-import": "rule:admin_api", "backup:backup-export": "rule:admin_api", + "backup:update": "rule:admin_or_owner", "snapshot_extension:snapshot_actions:update_snapshot_status": "", "snapshot_extension:snapshot_manage": "rule:admin_api", @@ -108,8 +110,29 @@ "consistencygroup:get_cgsnapshot": "group:nobody", "consistencygroup:get_all_cgsnapshots": "group:nobody", + "group:group_types_manage": "rule:admin_api", + "group:group_types_specs": "rule:admin_api", + "group:access_group_types_specs": "rule:admin_api", + "group:group_type_access": "rule:admin_or_owner", + + "group:create" : "", + "group:delete": "rule:admin_or_owner", + "group:update": "rule:admin_or_owner", + "group:get": "rule:admin_or_owner", + "group:get_all": "rule:admin_or_owner", + + "group:create_group_snapshot": "", + "group:delete_group_snapshot": "rule:admin_or_owner", + "group:update_group_snapshot": "rule:admin_or_owner", + "group:get_group_snapshot": "rule:admin_or_owner", + "group:get_all_group_snapshots": "rule:admin_or_owner", + "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", "message:delete": "rule:admin_or_owner", "message:get": "rule:admin_or_owner", - "message:get_all": "rule:admin_or_owner" + "message:get_all": "rule:admin_or_owner", + + "clusters:get": "rule:admin_api", + "clusters:get_all": "rule:admin_api", + "clusters:update": "rule:admin_api" } diff --git a/etc/cinder/rootwrap.d/volume.filters b/etc/cinder/rootwrap.d/volume.filters index 925df2d44..db642f3a0 100644 --- a/etc/cinder/rootwrap.d/volume.filters +++ b/etc/cinder/rootwrap.d/volume.filters @@ -65,10 +65,6 @@ lvconvert: CommandFilter, lvconvert, root # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... iscsiadm: CommandFilter, iscsiadm, root -# cinder/volume/drivers/lvm.py: 'shred', '-n3' -# cinder/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB' -shred: CommandFilter, shred, root - # cinder/volume/utils.py: utils.temporary_chown(path, 0) chown: CommandFilter, chown, root diff --git a/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml b/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml new file mode 100644 index 000000000..21b56a6c5 --- /dev/null +++ b/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added support for the use of live volume in place of + standard replication in the Dell SC driver. \ No newline at end of file diff --git a/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml b/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml new file mode 100644 index 000000000..fa002c67e --- /dev/null +++ b/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml @@ -0,0 +1,11 @@ +--- +features: + - Added a new config option `scheduler_weight_handler`. This is a global + option which specifies how the scheduler should choose from a listed of + weighted pools. By default the existing weigher is used which always + chooses the highest weight. + - Added a new weight handler `StochasticHostWeightHandler`. This weight + handler chooses pools randomly, where the random probabilities are + proportional to the weights, so higher weighted pools are chosen more + frequently, but not all the time. This weight handler spreads new + shares across available pools more fairly. diff --git a/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml b/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml new file mode 100644 index 000000000..0a36494a2 --- /dev/null +++ b/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Projects with the admin role are now allowed to operate + on the quotas of all other projects. diff --git a/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml b/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml new file mode 100644 index 000000000..9a65c19c2 --- /dev/null +++ b/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml @@ -0,0 +1,3 @@ +--- +features: + - Huawei support backup snapshot optimal path \ No newline at end of file diff --git a/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml b/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml new file mode 100644 index 000000000..3abca4d25 --- /dev/null +++ b/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added REST API to update backup name and description. diff --git a/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml b/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml new file mode 100644 index 000000000..5612d2e69 --- /dev/null +++ b/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml @@ -0,0 +1,3 @@ +--- +features: + - Report pools in volume stats for Block Device Driver. diff --git a/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml b/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml new file mode 100644 index 000000000..9c6ef506b --- /dev/null +++ b/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Volumes created on NetApp cDOT and 7mode storage systems now + report 'multiattach' capability. They have always supported such a + capability, but not reported it to Cinder. diff --git a/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml b/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml new file mode 100644 index 000000000..895151661 --- /dev/null +++ b/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml @@ -0,0 +1,20 @@ +--- +prelude: > + Everything in Cinder's release notes related to the High Availability + Active-Active effort -preluded with "HA A-A:"- is work in progress and + should not be used in production until it has been completed and the + appropriate release note has been issued stating its readiness for + production. + +features: + - "HA A-A: Add cluster configuration option to allow grouping hosts that + share the same backend configurations and should work in Active-Active + fashion." + - "HA A-A: Updated manage command to display cluster information on service + listings." + - "HA A-A: Added cluster subcommand in manage command to list, remove, and + rename clusters." + - "HA A-A: Added clusters API endpoints for cluster related operations (index, + detail, show, enable/disable). Index and detail accept filtering by + `name`, `binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down + status (`is_up`) as URL parameters. Also added their respective policies." diff --git a/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml b/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml new file mode 100644 index 000000000..8ccc773d0 --- /dev/null +++ b/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml @@ -0,0 +1,6 @@ +--- +features: + - Separate create and update rules for volume metadata. +upgrade: + - If policy for update volume metadata is modified in a desired way + it's needed to add a desired rule for create volume metadata. diff --git a/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml b/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml new file mode 100644 index 000000000..2805a323d --- /dev/null +++ b/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @@ -0,0 +1,13 @@ +--- +features: + - Capabilites List for Datera Volume Drivers + - Extended Volume-Type Support for Datera Volume Drivers + - Naming convention change for Datera Volume Drivers + - Volume Manage/Unmanage support for Datera Volume Drivers + - BoolOpt datera_debug_override_num_replicas was added for Datera Volume + Drivers +deprecations: + - datera_num_replicas IntOpt was changed to a VolumeType + Extra Spec -- DF:replica_count + - datera_acl_allow_all BoolOpt was changed to a VolumeType + Extra Spec -- DF:acl_allow_all diff --git a/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml b/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml new file mode 100644 index 000000000..99129b7c9 --- /dev/null +++ b/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added using etags to avoid the lost update problem during deleting volume metadata. diff --git a/releasenotes/notes/deprecated-kaminario_nodedup_substring-659a47acea97ef5a.yaml b/releasenotes/notes/deprecated-kaminario_nodedup_substring-659a47acea97ef5a.yaml new file mode 100644 index 000000000..3ae9edf31 --- /dev/null +++ b/releasenotes/notes/deprecated-kaminario_nodedup_substring-659a47acea97ef5a.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - Deprecated Kaminario iSCSI and FC Cinder drivers 'kaminario_nodedup_substring' + option in favour of 'kaminario:thin_prov_type' in extra-specs. + diff --git a/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml b/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml new file mode 100644 index 000000000..ac2f7cf97 --- /dev/null +++ b/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml @@ -0,0 +1,6 @@ +--- +features: + - Configuration options for the DRBD driver that will be + applied to DRBD resources; the default values should + be okay for most installations. + diff --git a/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml b/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml new file mode 100644 index 000000000..595889209 --- /dev/null +++ b/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added backend driver for FalconStor FreeStor. + diff --git a/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml b/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml new file mode 100644 index 000000000..63656e83a --- /dev/null +++ b/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml @@ -0,0 +1,4 @@ +--- +features: + - Introduced generic volume groups and added create/ + delete/update/list/show APIs for groups. diff --git a/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml b/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml new file mode 100644 index 000000000..032ad189c --- /dev/null +++ b/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added create/delete APIs for group snapshots and + an API to create group from source. diff --git a/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml b/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml new file mode 100644 index 000000000..601cddc71 --- /dev/null +++ b/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added group type and group specs APIs. diff --git a/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml b/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml new file mode 100644 index 000000000..6ae575112 --- /dev/null +++ b/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml @@ -0,0 +1,7 @@ +upgrade: + - HNAS drivers have new configuration paths. Users should now use + ``cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver + and ``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS + iSCSI driver. +deprecations: + - The old HNAS drivers configuration paths have been marked for deprecation. diff --git a/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml b/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml new file mode 100644 index 000000000..5c8298fb0 --- /dev/null +++ b/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added manage/unmanage snapshot support to the HNAS NFS driver. diff --git a/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml b/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml new file mode 100644 index 000000000..e64d53af9 --- /dev/null +++ b/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - HNAS drivers will now read configuration from cinder.conf. +deprecations: + - The XML configuration file used by the HNAS drivers is now + deprecated and will no longer be used in the future. Please + use cinder.conf for all driver configuration. diff --git a/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml b/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml new file mode 100644 index 000000000..137a779d3 --- /dev/null +++ b/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for reporting pool disk type in Huawei + driver. diff --git a/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml b/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml new file mode 100644 index 000000000..b3bfba0f2 --- /dev/null +++ b/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for hybrid aggregates to the NetApp cDOT drivers. + diff --git a/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml b/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml new file mode 100644 index 000000000..318307013 --- /dev/null +++ b/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml @@ -0,0 +1,4 @@ +--- +features: + - Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers. + diff --git a/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml b/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml new file mode 100644 index 000000000..da53724a2 --- /dev/null +++ b/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml @@ -0,0 +1,5 @@ +--- +features: + - Added support for querying volumes filtered by group_id + using 'group_id' optional URL parameter. + For example, "volumes/detail?group_id={consistency_group_id}". diff --git a/releasenotes/notes/kaminario-cinder-driver-15f2208655590d0a.yaml b/releasenotes/notes/kaminario-cinder-driver-15f2208655590d0a.yaml new file mode 100644 index 000000000..2fbf3ff36 --- /dev/null +++ b/releasenotes/notes/kaminario-cinder-driver-15f2208655590d0a.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add manage/unmanage and extra-specs feature in Kaminario iSCSI and FC Cinder drivers. + diff --git a/releasenotes/notes/kaminario-cinder-driver-435faa025b2f6df4.yaml b/releasenotes/notes/kaminario-cinder-driver-435faa025b2f6df4.yaml new file mode 100644 index 000000000..ca97ae8c6 --- /dev/null +++ b/releasenotes/notes/kaminario-cinder-driver-435faa025b2f6df4.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add replication feature in Kaminario iSCSI and FC Cinder drivers. + diff --git a/releasenotes/notes/kaminario-cinder-driver-ac522bc73e27faad.yaml b/releasenotes/notes/kaminario-cinder-driver-ac522bc73e27faad.yaml new file mode 100644 index 000000000..b67837b14 --- /dev/null +++ b/releasenotes/notes/kaminario-cinder-driver-ac522bc73e27faad.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add retype feature in Kaminario iSCSI and FC Cinder drivers. + diff --git a/releasenotes/notes/kaminario-cinder-driver-bug-1612602-8209c921e59d9dda.yaml b/releasenotes/notes/kaminario-cinder-driver-bug-1612602-8209c921e59d9dda.yaml new file mode 100644 index 000000000..953769cf3 --- /dev/null +++ b/releasenotes/notes/kaminario-cinder-driver-bug-1612602-8209c921e59d9dda.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - Fixed _delete_volume_replica and _get_replica_status in + Kaminario K2 iSCSI and FC Cinder drivers with different source + and target K2 arrays while testing replication. + Removed hard-coding of RPO and fixed volume_type.name issue. + diff --git a/releasenotes/notes/kaminario-cinder-driver-bug-1616329-c91bb4e747d14f3b.yaml b/releasenotes/notes/kaminario-cinder-driver-bug-1616329-c91bb4e747d14f3b.yaml new file mode 100644 index 000000000..9b5073a18 --- /dev/null +++ b/releasenotes/notes/kaminario-cinder-driver-bug-1616329-c91bb4e747d14f3b.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Fixed retyping 'dedup volume without replication' <-> + 'nodedup volume without replication' after replication failback and + removing replication for a replicated volume in Kaminario iSCSI and + FC Cinder Drivers. diff --git a/releasenotes/notes/kaminario-concurrency-bug-e0b899a42383660c.yaml b/releasenotes/notes/kaminario-concurrency-bug-e0b899a42383660c.yaml new file mode 100644 index 000000000..6935b08fe --- /dev/null +++ b/releasenotes/notes/kaminario-concurrency-bug-e0b899a42383660c.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Fixed concurrency issue in K2 iSCSI and FC Cinder drivers + due to possible race conditions between attach and detach + volumes and due to limitation from Kaminario K2 iSCSI and + FC arrays on concurrent operations. diff --git a/releasenotes/notes/kaminario-failback-support-072295f8d13589d3.yaml b/releasenotes/notes/kaminario-failback-support-072295f8d13589d3.yaml new file mode 100644 index 000000000..92927859c --- /dev/null +++ b/releasenotes/notes/kaminario-failback-support-072295f8d13589d3.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Added missing replication failback support in Kaminario iSCSI and + FC Cinder drivers. diff --git a/releasenotes/notes/kaminario-snapshot-auto-delete-ff789c57fa34a7a0.yaml b/releasenotes/notes/kaminario-snapshot-auto-delete-ff789c57fa34a7a0.yaml new file mode 100644 index 000000000..409c26cbc --- /dev/null +++ b/releasenotes/notes/kaminario-snapshot-auto-delete-ff789c57fa34a7a0.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Add "is_auto_deleteable=False" to the snapshot properties + in Kaminario K2 iSCSI and FC Cinder drivers, in order to + avoid auto deleting the snapshot. + diff --git a/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml b/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml new file mode 100644 index 000000000..94ae8b3be --- /dev/null +++ b/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml @@ -0,0 +1,4 @@ +--- +features: + - The v2 API extensions os-volume-manage and os-snapshot-manage have been + mapped to the v3 resources manageable_volumes and manageable_snapshots diff --git a/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml b/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml new file mode 100644 index 000000000..16864456d --- /dev/null +++ b/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml @@ -0,0 +1,8 @@ +--- +features: + - Added host-level (whole back end replication - v2.1) replication support + to the NetApp cDOT drivers (iSCSI, FC, NFS). +upgrade: + - While configuring NetApp cDOT back ends, new configuration options + ('replication_device' and 'netapp_replication_aggregate_map') must be + added in order to use the host-level failover feature. diff --git a/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml b/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml new file mode 100644 index 000000000..4852b5d7c --- /dev/null +++ b/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added Cinder consistency group for the NetApp NFS driver. diff --git a/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml b/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml new file mode 100644 index 000000000..584c04300 --- /dev/null +++ b/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml @@ -0,0 +1,4 @@ +--- +features: + - Support Force backup of in-use cinder volumes + for Nimble Storage. diff --git a/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml b/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml new file mode 100644 index 000000000..a4fbf456d --- /dev/null +++ b/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml @@ -0,0 +1,9 @@ +--- +issues: + - When running Nova Compute and Cinder Volume or Backup services on the same + host they must use a shared lock directory to avoid rare race conditions + that can cause volume operation failures (primarily attach/detach of + volumes). This is done by setting the "lock_path" to the same directory + in the "oslo_concurrency" section of nova.conf and cinder.conf. This issue + affects all previous releases utilizing os-brick and shared operations + on hosts between Nova Compute and Cinder data services. \ No newline at end of file diff --git a/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml b/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml index b93f003c1..25b955e37 100644 --- a/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml +++ b/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml @@ -1,7 +1,7 @@ --- features: - New config option for Pure Storage volume drivers pure_eradicate_on_delete. - When enabled will permanantly eradicate data instead of placing into + When enabled will permanently eradicate data instead of placing into pending eradication state. fixes: - Allow for eradicating Pure Storage volumes, snapshots, and pgroups when diff --git a/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml b/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml new file mode 100644 index 000000000..fdb208595 --- /dev/null +++ b/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Fix issue with PureFCDriver where partially case sensitive comparison of + connector wwpn could cause initialize_connection to fail when attempting + to create duplicate Purity host. diff --git a/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml b/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml new file mode 100644 index 000000000..a2bed8cc9 --- /dev/null +++ b/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add get_manageable_volumes and get_manageable_snapshots implementations for + Pure Storage Volume Drivers. diff --git a/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml b/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml new file mode 100644 index 000000000..9ad9eac86 --- /dev/null +++ b/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Fix 'No Space left' of dd command when users set the config item + volume_clear_size to larger value than a size of a volume. diff --git a/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml b/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml new file mode 100644 index 000000000..b2469b694 --- /dev/null +++ b/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - The config options 'scheduler_topic', 'volume_topic' + and 'backup_topic' have been removed without a + deprecation period as these had never worked + correctly. diff --git a/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml b/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml new file mode 100644 index 000000000..33e5d582c --- /dev/null +++ b/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml @@ -0,0 +1,15 @@ +--- +features: + - The xiv_ds8k driver now supports IBM XIV, Spectrum + Accelerate, FlashSystem A9000, FlashSystem A9000R + and DS8000 storage systems, and was renamed to IBM + Storage Driver for OpenStack. The changes include text + changes, file names, names of cinder.conf flags, and + names of the proxy classes. +upgrade: + - Users of the IBM Storage Driver, previously known as + the IBM XIV/DS8K driver, upgrading from Mitaka or + previous releases, need to reconfigure the relevant + cinder.conf entries. In most cases the change is just + removal of the xiv-ds8k field prefix, but for details + use the driver documentation. diff --git a/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml b/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml new file mode 100644 index 000000000..caec27364 --- /dev/null +++ b/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml @@ -0,0 +1,4 @@ +--- +features: + - Allow to retype volumes with different encryptions, including + changes from unencrypted types to encrypted types and vice-versa. diff --git a/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml b/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml new file mode 100644 index 000000000..ce4727b9d --- /dev/null +++ b/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml @@ -0,0 +1,9 @@ +--- +features: + - Added support for oversubscription in thin provisioning in the + ScaleIO driver. + Volumes should have extra_specs with the key provisioning:type with value + equals to either 'thick' or 'thin'. + max_oversubscription_ratio can be defined by the global config or for + ScaleIO specific with the config option sio_max_over_subscription_ratio. + The maximum oversubscription ratio supported at the moment is 10.0. \ No newline at end of file diff --git a/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml b/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml new file mode 100644 index 000000000..b11c2c9de --- /dev/null +++ b/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added supported driver checks on all drivers. diff --git a/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml b/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml new file mode 100644 index 000000000..6113dfb0c --- /dev/null +++ b/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml @@ -0,0 +1,15 @@ +--- +prelude: > + The default key manager interface in Cinder was + deprecated and the Castellan key manager interface + library is now used instead. For more information + about Castellan, please see + http://docs.openstack.org/developer/castellan/ . +upgrade: + - If using the key manager, the configuration details + should be updated to reflect the Castellan-specific + configuration options. +deprecations: + - All barbican and keymgr config options in Cinder are + now deprecated. All of these options are moved to + the key_manager section for the Castellan library. diff --git a/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml b/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml new file mode 100644 index 000000000..e31e34aad --- /dev/null +++ b/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added support for vhd and vhdx disk-formats for volume upload-to-image. diff --git a/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml b/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml new file mode 100644 index 000000000..58b99575f --- /dev/null +++ b/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml @@ -0,0 +1,3 @@ +--- +features: + - VMAX driver iSCSI Multipathing. diff --git a/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml b/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml new file mode 100644 index 000000000..c31f05012 --- /dev/null +++ b/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added oversubscription support in the VMAX driver diff --git a/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml b/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml new file mode 100644 index 000000000..374f8edb3 --- /dev/null +++ b/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml @@ -0,0 +1,3 @@ +--- +features: + - QoS support for the VMAX. diff --git a/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml b/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml new file mode 100644 index 000000000..d09afc650 --- /dev/null +++ b/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml @@ -0,0 +1,4 @@ +--- +features: + - A new API to display the volumes summary. This summary API displays the + total number of volumes and total volume's size in GB. diff --git a/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml b/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml new file mode 100644 index 000000000..7363172df --- /dev/null +++ b/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added snapshot manage/unmanage support to the EMC XtremIO driver. diff --git a/requirements.txt b/requirements.txt index 2b144e450..baa64a6b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,32 +11,34 @@ greenlet>=0.3.2 # MIT httplib2>=0.7.5 # MIT iso8601>=0.1.11 # MIT ipaddress>=1.0.7;python_version<'3.3' # PSF +keystoneauth1>=2.10.0 # Apache-2.0 keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD oauth2client>=1.5.0 # Apache-2.0 -oslo.config>=3.12.0 # Apache-2.0 +oslo.config>=3.14.0 # Apache-2.0 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.context>=2.4.0 # Apache-2.0 -oslo.db>=4.1.0 # Apache-2.0 +oslo.context>=2.9.0 # Apache-2.0 +oslo.db>=4.10.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=5.2.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=1.9.0 # Apache-2.0 +oslo.privsep>=1.9.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 +oslo.rootwrap>=5.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.15.0 # Apache-2.0 -oslo.versionedobjects>=1.9.1 # Apache-2.0 -osprofiler>=1.3.0 # Apache-2.0 +oslo.utils>=3.16.0 # Apache-2.0 +oslo.versionedobjects>=1.13.0 # Apache-2.0 +osprofiler>=1.4.0 # Apache-2.0 paramiko>=2.0 # LGPLv2.1+ Paste # MIT PasteDeploy>=1.5.0 # MIT pycrypto>=2.6 # Public Domain pyparsing>=2.0.1 # MIT python-barbicanclient>=4.0.0 # Apache-2.0 -python-glanceclient>=2.0.0 # Apache-2.0 -python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0 +python-glanceclient!=2.4.0,>=2.3.0 # Apache-2.0 +python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT @@ -50,12 +52,13 @@ simplejson>=2.2.0 # MIT six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 -stevedore>=1.10.0 # Apache-2.0 +stevedore>=1.16.0 # Apache-2.0 suds-jurko>=0.6 # LGPLv3+ WebOb>=1.2.3 # MIT oslo.i18n>=2.1.0 # Apache-2.0 -oslo.vmware>=1.16.0 # Apache-2.0 -os-brick!=1.4.0,>=1.3.0 # Apache-2.0 +oslo.vmware>=2.11.0 # Apache-2.0 +os-brick>=1.3.0 # Apache-2.0 os-win>=0.2.3 # Apache-2.0 tooz>=1.28.0 # Apache-2.0 google-api-python-client>=1.4.2 # Apache-2.0 +castellan>=0.4.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg index 1768fa624..f704bbd90 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org -home-page = http://www.openstack.org/ +home-page = http://docs.openstack.org/developer/cinder/ classifier = Environment :: OpenStack Intended Audience :: Information Technology diff --git a/test-requirements.txt b/test-requirements.txt index 6636087f7..7fb4e7c5f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,7 +11,7 @@ ddt>=1.0.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD mock>=2.0 # BSD mox3>=0.7.0 # Apache-2.0 -os-api-ref>=0.1.0 # Apache-2.0 +os-api-ref>=1.0.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 sphinx!=1.3b1,<1.3,>=1.2.1 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD @@ -22,5 +22,5 @@ testscenarios>=0.4 # Apache-2.0/BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 os-testr>=0.7.0 # Apache-2.0 tempest-lib>=0.14.0 # Apache-2.0 -bandit>=1.0.1 # Apache-2.0 +bandit>=1.1.0 # Apache-2.0 reno>=1.8.0 # Apache2 diff --git a/tools/enable-pre-commit-hook.sh b/tools/enable-pre-commit-hook.sh index ecb01eab8..d4f1ba3f5 100755 --- a/tools/enable-pre-commit-hook.sh +++ b/tools/enable-pre-commit-hook.sh @@ -1,6 +1,6 @@ #!/bin/sh -# Copyright 2011 OpenStack LLC +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/tools/generate_driver_list.py b/tools/generate_driver_list.py index d7531b46f..91848acd7 100755 --- a/tools/generate_driver_list.py +++ b/tools/generate_driver_list.py @@ -14,43 +14,127 @@ """Generate list of cinder drivers""" +import argparse +import os + from cinder.interface import util -def format_description(desc): +parser = argparse.ArgumentParser(prog="generate_driver_list") + +parser.add_argument("--format", default='str', choices=['str', 'dict'], + help="Output format type") + +# Keep backwards compatibilty with the gate-docs test +# The tests pass ['docs'] on the cmdln, but it's never been used. +parser.add_argument("output_list", default=None, nargs='?') + +CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/" + + +class Output(object): + + def __init__(self, base_dir, output_list): + # At this point we don't care what was passed in, just a trigger + # to write this out to the doc tree for now + self.driver_file = None + if output_list: + self.driver_file = open( + '%s/doc/source/drivers.rst' % base_dir, 'w+') + self.driver_file.write('===================\n') + self.driver_file.write('Available Drivers\n') + self.driver_file.write('===================\n\n') + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if self.driver_file: + self.driver_file.close() + + def write(self, text): + if self.driver_file: + self.driver_file.write('%s\n' % text) + else: + print(text) + + +def format_description(desc, output): desc = desc or '' lines = desc.rstrip('\n').split('\n') for line in lines: - print(' %s' % line) + output.write(' %s' % line) -def print_drivers(drivers, config_name): - # for driver in drivers.sort(key=lambda x: x.class_fqn): +def print_drivers(drivers, config_name, output): for driver in sorted(drivers, key=lambda x: x.class_fqn): - print(driver.class_name) - print('-' * len(driver.class_name)) + output.write(driver.class_name) + output.write('-' * len(driver.class_name)) if driver.version: - print('* Version: %s' % driver.version) - print('* %s=%s' % (config_name, driver.class_fqn)) - print('* Description:') - format_description(driver.desc) - print('') - print('') + output.write('* Version: %s' % driver.version) + output.write('* %s=%s' % (config_name, driver.class_fqn)) + if driver.ci_wiki_name: + output.write('* CI info: %s%s' % (CI_WIKI_ROOT, + driver.ci_wiki_name)) + output.write('* Description:') + format_description(driver.desc, output) + output.write('') + output.write('') + + +def output_str(cinder_root, args): + with Output(cinder_root, args.output_list) as output: + output.write('Volume Drivers') + output.write('==============') + print_drivers(util.get_volume_drivers(), 'volume_driver', output) + + output.write('Backup Drivers') + output.write('==============') + print_drivers(util.get_backup_drivers(), 'backup_driver', output) + + output.write('FC Zone Manager Drivers') + output.write('=======================') + print_drivers(util.get_fczm_drivers(), 'zone_driver', output) + + +def collect_driver_info(driver): + """Build the dictionary that describes this driver.""" + + info = {'name': driver.class_name, + 'version': driver.version, + 'fqn': driver.class_fqn, + 'description': driver.desc, + 'ci_wiki_name': driver.ci_wiki_name} + + return info + + +def output_dict(): + + import pprint + driver_list = [] + drivers = util.get_volume_drivers() + for driver in drivers: + driver_list.append(collect_driver_info(driver)) + + pprint.pprint(driver_list) def main(): - print('VOLUME DRIVERS') - print('==============') - print_drivers(util.get_volume_drivers(), 'volume_driver') + tools_dir = os.path.dirname(os.path.abspath(__file__)) + cinder_root = os.path.dirname(tools_dir) + cur_dir = os.getcwd() + os.chdir(cinder_root) + args = parser.parse_args() - print('BACKUP DRIVERS') - print('==============') - print_drivers(util.get_backup_drivers(), 'backup_driver') - - print('FC ZONE MANAGER DRIVERS') - print('=======================') - print_drivers(util.get_fczm_drivers(), 'zone_driver') + try: + if args.format == 'str': + output_str(cinder_root, args) + elif args.format == 'dict': + output_dict() + finally: + os.chdir(cur_dir) if __name__ == '__main__': main() diff --git a/tools/lintstack.py b/tools/lintstack.py index 5f8cdcf5e..1d989105f 100755 --- a/tools/lintstack.py +++ b/tools/lintstack.py @@ -59,6 +59,11 @@ ignore_messages = [ # during runtime. "Class 'ConsistencyGroup' has no '__table__' member", "Class 'Cgsnapshot' has no '__table__' member", + + # NOTE(xyang): this error message is for code [E1120] when checking if + # there are already 'groups' entries in 'quota_classes' `in DB migration + # (078_add_groups_and_group_volume_type_mapping_table). + "No value passed for parameter 'functions' in function call", ] # Note(maoy): We ignore cinder.tests for now due to high false @@ -87,12 +92,24 @@ objects_ignore_messages = [ "Module 'cinder.objects' has no 'CGSnapshotList' member", "Module 'cinder.objects' has no 'ConsistencyGroup' member", "Module 'cinder.objects' has no 'ConsistencyGroupList' member", + "Module 'cinder.objects' has no 'QualityOfServiceSpecs' member", + "Module 'cinder.objects' has no 'QualityOfServiceSpecsList' member", + "Module 'cinder.objects' has no 'RequestSpec' member", "Module 'cinder.objects' has no 'Service' member", "Module 'cinder.objects' has no 'ServiceList' member", "Module 'cinder.objects' has no 'Snapshot' member", "Module 'cinder.objects' has no 'SnapshotList' member", "Module 'cinder.objects' has no 'Volume' member", "Module 'cinder.objects' has no 'VolumeList' member", + "Module 'cinder.objects' has no 'VolumeProperties' member", + "Module 'cinder.objects' has no 'VolumeType' member", + "Module 'cinder.objects' has no 'VolumeTypeList' member", + "Module 'cinder.objects' has no 'Group' member", + "Module 'cinder.objects' has no 'GroupList' member", + "Module 'cinder.objects' has no 'GroupSnapshot' member", + "Module 'cinder.objects' has no 'GroupSnapshotList' member", + "Class 'Group' has no '__table__' member", + "Class 'GroupSnapshot' has no '__table__' member", ] objects_ignore_modules = ["cinder/objects/"] @@ -174,8 +191,12 @@ class LintOutput(object): def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" - "%(code)s: %(message)s" % self.__dict__) - + "%(code)s: %(message)s" % + {'filename': self.filename, + 'lineno': self.lineno, + 'line_content': self.line_content, + 'code': self.code, + 'message': self.message}) class ErrorKeys(object): diff --git a/tox.ini b/tox.ini index a5e538915..56d7aa1a5 100644 --- a/tox.ini +++ b/tox.ini @@ -34,23 +34,25 @@ passenv = *_proxy *_PROXY # the API Ref to developer.openstack.org. whitelist_externals = rm deps = -r{toxinidir}/test-requirements.txt -install_command = pip install -U --force-reinstall {opts} {packages} commands = rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees/v1 api-ref/v1/source api-ref/build/html/v1 - sphinx-build -W -b html -d api-ref/build/doctrees/v2 api-ref/v2/source api-ref/build/html/v2 + sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html/ [testenv:releasenotes] -# NOTE(jaegerandi): This target does not use constraints because -# upstream infra does not yet support it. Once that's fixed, we can -# drop the install_command. -install_command = pip install -U --force-reinstall {opts} {packages} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:functional] setenv = OS_TEST_PATH = ./cinder/tests/functional +[testenv:functional-py34] +[testenv:functional-py35] +basepython= + py34: python3.4 + py35: python3.5 +setenv = + {[testenv:functional]setenv} + [testenv:compliance] setenv = OS_TEST_PATH = ./cinder/tests/compliance @@ -77,10 +79,6 @@ commands = bash tools/lintstack.sh [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. -# NOTE(jaegerandi): This target does not use constraints because -# upstream infra does not yet support it. Once that's fixed, we can -# drop the install_command. -install_command = pip install -U --force-reinstall {opts} {packages} commands = python setup.py testr --coverage \ --testr-args='^(?!.*test.*coverage).*$' @@ -97,14 +95,13 @@ envdir = {toxworkdir}/pep8 commands = python cinder/config/generate_cinder_opts.py [testenv:venv] -# NOTE(jaegerandi): This target does not use constraints because -# upstream infra does not yet support it. Once that's fixed, we can -# drop the install_command. -install_command = pip install -U --force-reinstall {opts} {packages} commands = {posargs} [testenv:docs] -commands = python setup.py build_sphinx +commands = + python setup.py build_sphinx + rm -rf api-ref/build + sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html/ [testenv:gendriverlist] sitepackages = False @@ -121,7 +118,7 @@ commands = bandit -r cinder -n5 -x tests -ll # E251 unexpected spaces around keyword / parameter equals # reason: no improvement in readability ignore = E251 -exclude = .git,.venv,.tox,dist,tools,doc,*egg,build +exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build max-complexity=30 [hacking] @@ -133,5 +130,5 @@ import_exceptions = cinder.i18n # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files -deps = pip_missing_reqs +deps = pip_check_reqs>=2.0.1 commands = pip-missing-reqs -d --ignore-file=cinder/tests/* cinder