Stop cinderlib development

Remove files from master, as development will no longer occur there.
The stable branches continue to be supported while they are in
Maintained status.

Updated the README to indicate this change.

Depends-on: Ib186ac5830e5920e264d79be946995e63e960426
Depends-on: I081cd363117671eaab6a3193094d5872f9820354
Depends-on: If2b9a82cddb20543b176ee22765049db257c89b9
Depends-on: I1143e5e5ccf8103e386fe1ce614a554e7f152d9a
Change-Id: I4722b869033ad1bd357e36c4a258b6d3ea61f5d6
This commit is contained in:
Brian Rosmaita 2023-12-10 12:57:04 -05:00
parent ee1d86c058
commit f165c6ff5e
125 changed files with 13 additions and 12255 deletions

73
.gitignore vendored
View File

@ -1,73 +0,0 @@
# Byte-compiled / optimized / DLL files
.*
!.gitignore
!.testr.conf
!.stestr.conf
!.zuul.yaml
!.travis.yml
.*.sw?
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
cover/
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
# PyBuilder
target/
# pyenv python configuration file
.python-version
# Temp directory, for example for the LVM file, our custom config, etc.
temp/
cinder-lioadm
local-upper-constraints.txt

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=${OS_TEST_PATH:-./cinderlib/tests/unit}
top_dir=./

View File

@ -1,142 +0,0 @@
- project:
vars:
ensure_tox_version: '<4'
queue: integrated
templates:
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
jobs:
- openstack-tox-pep8
- cinderlib-tox-py38
- cinderlib-tox-py39
- cinderlib-tox-py310
# TODO: make this voting when cinderlib opens for 2024.1 development
- cinderlib-tox-py311:
voting: false
- cinderlib-lvm-functional
- cinderlib-ceph-functional
# NOTE: when cinderlib opens for 2024.1 development, use the parent
# job instead
- cinderlib-os-brick-src-tempest-lvm-lio-barbican-2023.2
gate:
jobs:
- openstack-tox-pep8
- cinderlib-tox-py38
- cinderlib-tox-py39
- cinderlib-tox-py310
- cinderlib-lvm-functional
- cinderlib-ceph-functional
# NOTE: when cinderlib opens for 2024.1 development, use the parent
# job instead
- cinderlib-os-brick-src-tempest-lvm-lio-barbican-2023.2
post:
jobs:
- publish-openstack-python-branch-tarball
- job:
name: cinderlib-tox-py38
parent: openstack-tox-py38
required-projects:
- name: openstack/os-brick
override-checkout: stable/2023.2
- name: openstack/cinder
override-checkout: stable/2023.2
- name: openstack/requirements
override-checkout: stable/2023.2
- job:
name: cinderlib-tox-py39
parent: openstack-tox-py39
required-projects:
- name: openstack/os-brick
override-checkout: stable/2023.2
- name: openstack/cinder
override-checkout: stable/2023.2
- name: openstack/requirements
override-checkout: stable/2023.2
- job:
name: cinderlib-tox-py310
parent: openstack-tox-py310
required-projects:
- name: openstack/os-brick
override-checkout: stable/2023.2
- name: openstack/cinder
override-checkout: stable/2023.2
- name: openstack/requirements
override-checkout: stable/2023.2
- job:
name: cinderlib-tox-py311
parent: openstack-tox-py311
required-projects:
- name: openstack/os-brick
override-checkout: stable/2023.2
- name: openstack/cinder
override-checkout: stable/2023.2
- name: openstack/requirements
override-checkout: stable/2023.2
- job:
name: cinderlib-functional
parent: openstack-tox-functional-with-sudo
required-projects:
- name: openstack/os-brick
override-checkout: stable/2023.2
- name: openstack/cinder
override-checkout: stable/2023.2
- name: openstack/requirements
override-checkout: stable/2023.2
pre-run: playbooks/required-projects-bindeps.yaml
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
- job:
name: cinderlib-lvm-functional
parent: cinderlib-functional
pre-run: playbooks/setup-lvm.yaml
nodeset: centos-9-stream
vars:
tox_environment:
# Workaround for https://github.com/pypa/pip/issues/6264
PIP_OPTIONS: "--no-use-pep517"
CL_FTEST_MEMORY_PERSISTENCE: "false"
# These come from great-great-grandparent tox job
NOSE_WITH_HTML_OUTPUT: 1
NOSE_HTML_OUT_FILE: nose_results.html
NOSE_WITH_XUNIT: 1
# The Ceph job tests cinderlib without unnecessary libraries
- job:
name: cinderlib-ceph-functional
parent: cinderlib-functional
pre-run: playbooks/setup-ceph.yaml
# TODO: move back to centos as soon as Ceph packages are available
nodeset: ubuntu-focal
vars:
tox_environment:
CL_FTEST_CFG: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}/cinderlib/tests/functional/ceph.yaml"
# These come from great-great-grandparent tox job
NOSE_WITH_HTML_OUTPUT: 1
NOSE_HTML_OUT_FILE: nose_results.html
NOSE_WITH_XUNIT: 1
- job:
name: cinderlib-os-brick-src-tempest-lvm-lio-barbican-2023.2
parent: os-brick-src-tempest-lvm-lio-barbican
description: |
Use this job during the phase when cinderlib master is still
the development branch of the cinder previous release. When
cinderlib master and cinder master are the development branches
for the *same* release, you should use the parent job directly
in the check and gate, above.
override-checkout: stable/2023.2
# NOTE: while the cinderlib stable/2023.2 branch does not exist,
# zuul will fall back to using cinderlib master, which is the
# behavior we want.

View File

@ -1,19 +0,0 @@
The source repository for this project can be found at:
https://opendev.org/openstack/cinderlib
Pull requests submitted through GitHub are not monitored.
To start contributing to OpenStack, follow the steps in the contribution guide
to set up and use Gerrit:
https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
Bugs should be filed on Launchpad:
https://bugs.launchpad.net/cinderlib
For more specific information about contributing to this repository, see the
cinder contributor guide:
https://docs.openstack.org/cinderlib/latest/contributor/contributing.html

View File

@ -1,49 +0,0 @@
The Cinder Library, also known as cinderlib, is a Python library that leverages
the Cinder project to provide an object oriented abstraction around Cinder's
storage drivers to allow their usage directly without running any of the Cinder
services or surrounding services, such as KeyStone, MySQL or RabbitMQ.
* Free software: Apache Software License 2.0
* Documentation: https://docs.openstack.org/cinderlib/latest/
The library is intended for developers who only need the basic CRUD
functionality of the drivers and don't care for all the additional features
Cinder provides such as quotas, replication, multi-tenancy, migrations,
retyping, scheduling, backups, authorization, authentication, REST API, etc.
The library was originally created as an external project, so it didn't have
the broad range of backend testing Cinder does, and only a limited number of
drivers were validated at the time. Drivers should work out of the box, and
we'll keep a list of drivers that have added the cinderlib functional tests to
the driver gates confirming they work and ensuring they will keep working.
Features
--------
* Use a Cinder driver without running a DBMS, Message broker, or Cinder
service.
* Using multiple simultaneous drivers on the same application.
* Basic operations support:
- Create volume
- Delete volume
- Extend volume
- Clone volume
- Create snapshot
- Delete snapshot
- Create volume from snapshot
- Connect volume
- Disconnect volume
- Local attach
- Local detach
- Validate connector
- Extra Specs for specific backend functionality.
- Backend QoS
- Multi-pool support
* Metadata persistence plugins:
- Stateless: Caller stores JSON serialization.
- Database: Metadata is stored in a database: MySQL, PostgreSQL, SQLite...
- Custom plugin: Caller provides module to store Metadata and cinderlib calls
it when necessary.

View File

@ -1,53 +0,0 @@
Cinderlib Style Commandments
============================
- Step 1: Read the OpenStack Style Commandments
https://docs.openstack.org/hacking/latest/
- Step 2: Read on
Cinder Specific Commandments
----------------------------
- [N314] Check for vi editor configuration in source files.
- [N322] Ensure default arguments are not mutable.
- [N323] Add check for explicit import of _() to ensure proper translation.
- [N325] str() and unicode() cannot be used on an exception. Remove or use six.text_type().
- [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs.
- [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now().
- [C302] six.text_type should be used instead of unicode.
- [C303] Ensure that there are no 'print()' statements in code that is being committed.
- [C304] Enforce no use of LOG.audit messages. LOG.info should be used instead.
- [C305] Prevent use of deprecated contextlib.nested.
- [C306] timeutils.strtime() must not be used (deprecated).
- [C307] LOG.warn is deprecated. Enforce use of LOG.warning.
- [C308] timeutils.isotime() must not be used (deprecated).
- [C309] Unit tests should not perform logging.
- [C310] Check for improper use of logging format arguments.
- [C311] Check for proper naming and usage in option registration.
- [C312] Validate that logs are not translated.
- [C313] Check that assertTrue(value) is used and not assertEqual(True, value).
General
-------
- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::
except Exception as e:
...
raise e # BAD
except Exception:
...
raise # OKAY
Creating Unit Tests
-------------------
For every new feature, unit tests should be created that both test and
(implicitly) document the usage of said feature. If submitting a patch for a
bug that had no unit test, a new passing unit test should be added. If a
submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
For more information on creating unit tests and utilizing the testing
infrastructure in OpenStack Cinder, please see
https://docs.openstack.org/cinder/latest/contributor/testing.html

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,78 +1,19 @@
Cinder Library
==============
.. image:: https://img.shields.io/pypi/v/cinderlib.svg
:target: https://pypi.python.org/pypi/cinderlib
This project is no longer being developed. Previous releases will
continue to be supported under the schedule outlined in the
`OpenStack Stable Branches Policy
<https://docs.openstack.org/project-team-guide/stable-branches.html>`_.
.. image:: https://img.shields.io/pypi/pyversions/cinderlib.svg
:target: https://pypi.python.org/pypi/cinderlib
While stable branches exist, you will be able to see them here,
but they will be deleted as they reach End of Life.
.. image:: https://img.shields.io/:license-apache-blue.svg
:target: http://www.apache.org/licenses/LICENSE-2.0
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
Introduction
------------
The Cinder Library, also known as cinderlib, is a Python library that leverages
the Cinder project to provide an object oriented abstraction around Cinder's
storage drivers to allow their usage directly without running any of the Cinder
services or surrounding services, such as KeyStone, MySQL or RabbitMQ.
* Free software: Apache Software License 2.0
* Documentation: https://docs.openstack.org/cinderlib/latest/
The library is intended for developers who only need the basic CRUD
functionality of the drivers and don't care for all the additional features
Cinder provides such as quotas, replication, multi-tenancy, migrations,
retyping, scheduling, backups, authorization, authentication, REST API, etc.
The library was originally created as an external project, so it didn't have
the broad range of backend testing Cinder does, and only a limited number of
drivers were validated at the time. Drivers should work out of the box, and
we'll keep a list of drivers that have added the cinderlib functional tests to
the driver gates confirming they work and ensuring they will keep working.
Features
--------
* Use a Cinder driver without running a DBMS, Message broker, or Cinder
service.
* Using multiple simultaneous drivers on the same application.
* Basic operations support:
- Create volume
- Delete volume
- Extend volume
- Clone volume
- Create snapshot
- Delete snapshot
- Create volume from snapshot
- Connect volume
- Disconnect volume
- Local attach
- Local detach
- Validate connector
- Extra Specs for specific backend functionality.
- Backend QoS
- Multi-pool support
* Metadata persistence plugins:
- Stateless: Caller stores JSON serialization.
- Database: Metadata is stored in a database: MySQL, PostgreSQL, SQLite...
- Custom plugin: Caller provides module to store Metadata and cinderlib calls
it when necessary.
Demo
----
.. raw:: html
<a href="https://asciinema.org/a/TcTR7Lu7jI0pEsd9ThEn01l7n?autoplay=1"
target="_blank"><img
src="https://asciinema.org/a/TcTR7Lu7jI0pEsd9ThEn01l7n.png"/></a>
.. _GIGO: https://en.wikipedia.org/wiki/Garbage_in,_garbage_out
.. _official project documentation: https://readthedocs.org/projects/cinderlib/badge/?version=latest
.. _OpenStack's Cinder volume driver configuration documentation: https://docs.openstack.org/cinder/latest/configuration/block-storage/volume-drivers.html
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@ -1,2 +0,0 @@
[python: **.py]

View File

@ -1,44 +0,0 @@
# This is a cross-platform list tracking distribution packages needed for
# install and tests;
# see https://docs.openstack.org/infra/bindep/ for additional information.
build-essential [platform:dpkg test]
gcc [platform:rpm test]
python3 [platform:redhat test]
python3-devel [platform:redhat test]
# gettext and graphviz are needed by doc builds only. For transition,
# have them in both doc and test.
# TODO(jaegerandi): Remove test once infra scripts are updated.
gettext [!platform:suse doc test]
gettext-runtime [platform:suse doc test]
graphviz [doc test]
# for pdf-docs
fonts-liberation [doc platform:dpkg]
latexmk [doc platform:dpkg]
librsvg2-bin [doc platform:dpkg]
sg3-utils [platform:dpkg]
texlive-latex-base [doc platform:dpkg]
texlive-latex-extra [doc platform:dpkg]
texlive-xetex [doc platform:dpkg]
texlive-fonts-recommended [doc platform:dpkg]
xindy [doc platform:dpkg]
latexmk [doc platform:rpm]
librsvg2-tools [doc platform:rpm]
python3-sphinxcontrib-svg2pdfconverter-common [doc platform:rpm]
sg3_utils [platform:rpm]
texlive [doc platform:rpm]
texlive-capt-of [doc platform:rpm]
texlive-fncychap [doc platform:rpm]
texlive-framed [doc platform:rpm]
texlive-needspace [doc platform:rpm]
texlive-pdftex [doc platform:rpm]
texlive-polyglossia [doc platform:rpm]
texlive-tabulary [doc platform:rpm]
texlive-titlesec [doc platform:rpm]
texlive-upquote [doc platform:rpm]
texlive-wrapfig [doc platform:rpm]
texlive-xetex [doc platform:rpm]
texlive-xindy [doc platform:rpm]

View File

@ -1,53 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
# For python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# For everyone else
import importlib_metadata
from os_brick.initiator import connector
from cinderlib import _fake_packages # noqa F401
from cinderlib import cinderlib
from cinderlib import objects
from cinderlib import serialization
try:
__version__ = importlib_metadata.version('cinderlib')
except importlib_metadata.PackageNotFoundError:
__version__ = '0.0.0'
DEFAULT_PROJECT_ID = objects.DEFAULT_PROJECT_ID
DEFAULT_USER_ID = objects.DEFAULT_USER_ID
Volume = objects.Volume
Snapshot = objects.Snapshot
Connection = objects.Connection
KeyValue = objects.KeyValue
load = serialization.load
json = serialization.json
jsons = serialization.jsons
dump = serialization.dump
dumps = serialization.dumps
setup = cinderlib.setup
Backend = cinderlib.Backend
# This gets reassigned on initialization by nos_brick.init
get_connector_properties = connector.get_connector_properties
list_supported_drivers = cinderlib.Backend.list_supported_drivers

View File

@ -1,169 +0,0 @@
# Copyright (c) 2019, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake unnecessary packages
There are many packages that are automatically imported when loading cinder
modules, and are used for normal Cinder operation, but they are not necessary
for cinderlib's execution. One example of this happening is when cinderlib
loads a module to get configuration options but won't execute any of the code
present in that module.
This module fakes these packages providing the following benefits:
- Faster load times
- Reduced memory footprint
- Distributions can create a cinderlib package with fewer dependencies.
"""
try:
# Only present and needed in Python >= 3.4
from importlib import machinery
except ImportError:
pass
import logging
import sys
import types
from oslo_config import cfg
__all__ = ['faker']
PACKAGES = [
'glanceclient', 'novaclient', 'swiftclient', 'barbicanclient', 'cursive',
'keystoneauth1', 'keystonemiddleware', 'keystoneclient', 'castellan',
'oslo_reports', 'oslo_policy', 'oslo_messaging', 'osprofiler', 'paste',
'oslo_middleware', 'webob', 'pyparsing', 'routes', 'jsonschema', 'os_win',
'oauth2client', 'oslo_upgradecheck', 'googleapiclient', 'pastedeploy',
]
_DECORATOR_CLASSES = (types.FunctionType, types.MethodType)
LOG = logging.getLogger(__name__)
class _FakeObject(object):
"""Generic fake object: Iterable, Class, decorator, etc."""
def __init__(self, *args, **kwargs):
self.__key_value__ = {}
def __len__(self):
return len(self.__key_value__)
def __contains__(self, key):
return key in self.__key_value__
def __iter__(self):
return iter(self.__key_value__)
def __mro_entries__(self, bases):
return (self.__class__,)
def __setitem__(self, key, value):
self.__key_value__[key] = value
def _new_instance(self, class_name):
attrs = {'__module__': self.__module__ + '.' + self.__class__.__name__}
return type(class_name, (self.__class__,), attrs)()
# No need to define __class_getitem__, as __getitem__ has the priority
def __getitem__(self, key):
if key in self.__key_value__.get:
return self.__key_value__.get[key]
return self._new_instance(key)
def __getattr__(self, key):
return self._new_instance(key)
def __call__(self, *args, **kw):
# If we are a decorator return the method that we are decorating
if args and isinstance(args[0], _DECORATOR_CLASSES):
return args[0]
return self
def __repr__(self):
return self.__qualname__
class Faker(object):
"""Fake Finder and Loader for whole packages."""
def __init__(self, packages):
self.faked_modules = []
self.packages = packages
def _fake_module(self, name):
"""Dynamically create a module as close as possible to a real one."""
LOG.debug('Faking %s', name)
attributes = {
'__doc__': None,
'__name__': name,
'__file__': name,
'__loader__': self,
'__builtins__': __builtins__,
'__package__': name.rsplit('.', 1)[0] if '.' in name else None,
'__repr__': lambda self: self.__name__,
'__getattr__': lambda self, name: (
type(name, (_FakeObject,), {'__module__': self.__name__})()),
}
keys = ['__doc__', '__name__', '__file__', '__builtins__',
'__package__']
# Path only present at the package level
if '.' not in name:
attributes['__path__'] = [name]
keys.append('__path__')
# We only want to show some of our attributes
attributes.update(__dict__={k: attributes[k] for k in keys},
__dir__=lambda self: keys)
# Create the class and instantiate it
module_class = type(name, (types.ModuleType,), attributes)
self.faked_modules.append(name)
return module_class(name)
def find_module(self, fullname, path=None):
"""Find a module and return a Loader if it's one of ours or None."""
package = fullname.split('.')[0]
# If it's one of ours, then we are the loader
if package in self.packages:
return self
return None
def load_module(self, fullname):
"""Create a new Fake module if it's not already present."""
if fullname in sys.modules:
return sys.modules[fullname]
sys.modules[fullname] = self._fake_module(fullname)
return sys.modules[fullname]
def find_spec(self, fullname, path=None, target=None):
"""Return our spec it it's one of our packages or None."""
if self.find_module(fullname):
return machinery.ModuleSpec(fullname,
self,
is_package='.' not in fullname)
return None
def create_module(self, spec):
"""Fake a module."""
return self._fake_module(spec.name)
# cinder.quota_utils manually imports keystone_authtoken config group, so we
# create a fake one to avoid failure.
cfg.CONF.register_opts([cfg.StrOpt('fake')], group='keystone_authtoken')
# Create faker and add it to the list of Finders
faker = Faker(PACKAGES)
sys.meta_path.insert(0, faker)

View File

@ -1,10 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copy of oslo.privsep's privsep-helper that uses the virtual env python
# instead of a hardcoded Python version
import re
import sys
from oslo_privsep.daemon import helper_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(helper_main())

View File

@ -1,594 +0,0 @@
# Copyright (c) 2017, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import glob
import json as json_lib
import logging
import multiprocessing
import os
import shutil
from cinder import coordination
from cinder.db import api as db_api
from cinder import objects as cinder_objects
# We need this here until we remove from cinder/volume/manager.py:
# VA_LIST = objects.VolumeAttachmentList
cinder_objects.register_all() # noqa
from cinder.interface import util as cinder_interface_util
import cinder.privsep
from cinder import utils
from cinder.volume import configuration
from cinder.volume import manager # noqa We need to import config options
import os_brick.privileged
from oslo_config import cfg
from oslo_log import log as oslo_logging
from oslo_privsep import priv_context
from oslo_utils import importutils
import urllib3
import cinderlib
from cinderlib import objects
from cinderlib import persistence
from cinderlib import serialization
from cinderlib import utils as cinderlib_utils
__all__ = ['setup', 'Backend']
LOG = logging.getLogger(__name__)
class Backend(object):
"""Representation of a Cinder Driver.
User facing attributes are:
- __init__
- json
- jsons
- load
- stats
- create_volume
- global_setup
- validate_connector
"""
backends = {}
global_initialization = False
# Some drivers try access the DB directly for extra specs on creation.
# With this dictionary the DB class can get the necessary data
_volumes_inflight = {}
def __new__(cls, volume_backend_name, **driver_cfg):
# Prevent redefinition of an already initialized backend on the same
# persistence storage with a different configuration.
backend = Backend.backends.get(volume_backend_name)
if backend:
# If we are instantiating the same backend return the one we have
# saved (singleton pattern).
if driver_cfg == backend._original_driver_cfg:
return backend
raise ValueError('Backend named %s already exists with a different'
' configuration' % volume_backend_name)
return super(Backend, cls).__new__(cls)
def __init__(self, volume_backend_name, **driver_cfg):
if not self.global_initialization:
self.global_setup()
# Instance already initialized
if volume_backend_name in Backend.backends:
return
# Save the original config before we add the backend name and template
# the values.
self._original_driver_cfg = driver_cfg.copy()
driver_cfg['volume_backend_name'] = volume_backend_name
conf = self._get_backend_config(driver_cfg)
self._apply_backend_workarounds(conf)
self.driver = importutils.import_object(
conf.volume_driver,
configuration=conf,
db=self.persistence.db,
host='%s@%s' % (cfg.CONF.host, volume_backend_name),
cluster_name=None, # We don't use cfg.CONF.cluster for now
active_backend_id=None) # No failover for now
# do_setup and check_for_setup errors were merged into setup in Yoga.
# First try the old interface, and if it fails, try the new one.
try:
self.driver.do_setup(objects.CONTEXT)
self.driver.check_for_setup_error()
except AttributeError:
self.driver.setup(objects.CONTEXT)
self.driver.init_capabilities()
self.driver.set_throttle()
self.driver.set_initialized()
self._driver_cfg = driver_cfg
self._volumes = None
# Some drivers don't implement the caching correctly. Populate cache
# with data retrieved in init_capabilities.
stats = self.driver.capabilities.copy()
stats.pop('properties', None)
stats.pop('vendor_prefix', None)
self._stats = self._transform_legacy_stats(stats)
self._pool_names = tuple(pool['pool_name'] for pool in stats['pools'])
Backend.backends[volume_backend_name] = self
@property
def pool_names(self):
return self._pool_names
def __repr__(self):
return '<cinderlib.Backend %s>' % self.id
def __getattr__(self, name):
return getattr(self.driver, name)
@property
def id(self):
return self._driver_cfg['volume_backend_name']
@property
def volumes(self):
if self._volumes is None:
self._volumes = self.persistence.get_volumes(backend_name=self.id)
return self._volumes
def volumes_filtered(self, volume_id=None, volume_name=None):
return self.persistence.get_volumes(backend_name=self.id,
volume_id=volume_id,
volume_name=volume_name)
def _transform_legacy_stats(self, stats):
"""Convert legacy stats to new stats with pools key."""
# Fill pools for legacy driver reports
if stats and 'pools' not in stats:
pool = stats.copy()
pool['pool_name'] = self.id
for key in ('driver_version', 'shared_targets',
'sparse_copy_volume', 'storage_protocol',
'vendor_name', 'volume_backend_name'):
pool.pop(key, None)
stats['pools'] = [pool]
return stats
def stats(self, refresh=False):
# Some drivers don't implement the caching correctly, so we implement
# it ourselves.
if refresh:
stats = self.driver.get_volume_stats(refresh=refresh)
self._stats = self._transform_legacy_stats(stats)
return self._stats
def create_volume(self, size, name='', description='', bootable=False,
**kwargs):
vol = objects.Volume(self, size=size, name=name,
description=description, bootable=bootable,
**kwargs)
vol.create()
return vol
def _volume_removed(self, volume):
i, vol = cinderlib_utils.find_by_id(volume.id, self._volumes)
if vol:
del self._volumes[i]
@classmethod
def _start_creating_volume(cls, volume):
cls._volumes_inflight[volume.id] = volume
def _volume_created(self, volume):
if self._volumes is not None:
self._volumes.append(volume)
self._volumes_inflight.pop(volume.id, None)
def validate_connector(self, connector_dict):
"""Raise exception if missing info for volume's connect call."""
self.driver.validate_connector(connector_dict)
@classmethod
def set_persistence(cls, persistence_config):
if not hasattr(cls, 'project_id'):
raise Exception('set_persistence can only be called after '
'cinderlib has been configured')
cls.persistence = persistence.setup(persistence_config)
objects.setup(cls.persistence, Backend, cls.project_id, cls.user_id,
cls.non_uuid_ids)
for backend in cls.backends.values():
backend.driver.db = cls.persistence.db
# Replace the standard DB implementation instance with the one from
# the persistence plugin.
db_api.IMPL = cls.persistence.db
@classmethod
def _set_cinder_config(cls, host, locks_path, cinder_config_params):
"""Setup the parser with all the known Cinder configuration."""
cfg.CONF.set_default('state_path', os.getcwd())
cfg.CONF.set_default('lock_path', '$state_path', 'oslo_concurrency')
cfg.CONF.version = cinderlib.__version__
if locks_path:
cfg.CONF.oslo_concurrency.lock_path = locks_path
cfg.CONF.coordination.backend_url = 'file://' + locks_path
if host:
cfg.CONF.host = host
cls._validate_and_set_options(cinder_config_params)
# Replace command line arg parser so we ignore caller's args
cfg._CachedArgumentParser.parse_args = lambda *a, **kw: None
@classmethod
def _validate_and_set_options(cls, kvs, group=None):
"""Validate options and substitute references."""
# Dynamically loading the driver triggers adding the specific
# configuration options to the backend_defaults section
if kvs.get('volume_driver'):
driver_ns = kvs['volume_driver'].rsplit('.', 1)[0]
__import__(driver_ns)
group = group or 'backend_defaults'
for k, v in kvs.items():
try:
# set_override does the validation
cfg.CONF.set_override(k, v, group)
except cfg.NoSuchOptError:
# RBD keyring may be removed from the Cinder RBD driver, but
# the functionality will remain for cinderlib usage only, so we
# do the validation manually in that case.
# NOTE: Templating won't work on the rbd_keyring_conf, but it's
# unlikely to be needed.
if k == 'rbd_keyring_conf':
if v and not isinstance(v, str):
raise ValueError('%s must be a string' % k)
else:
# Don't fail on unknown variables, behave like cinder
LOG.warning('Unknown config option %s', k)
oslo_group = getattr(cfg.CONF, str(group), cfg.CONF)
# Now that we have validated/templated everything set updated values
for k, v in kvs.items():
kvs[k] = getattr(oslo_group, k, v)
# For global configuration we leave the overrides, but for drivers we
# don't to prevent cross-driver config polination. The cfg will be
# set as an attribute of the configuration that's passed to the driver.
if group:
for k in kvs.keys():
try:
cfg.CONF.clear_override(k, group, clear_cache=True)
except cfg.NoSuchOptError:
pass
def _get_backend_config(self, driver_cfg):
# Create the group for the backend
backend_name = driver_cfg['volume_backend_name']
cfg.CONF.register_group(cfg.OptGroup(backend_name))
# Validate and set config options
self._validate_and_set_options(driver_cfg)
backend_group = getattr(cfg.CONF, backend_name)
for key, value in driver_cfg.items():
setattr(backend_group, key, value)
# Return the Configuration that will be passed to the driver
config = configuration.Configuration([], config_group=backend_name)
return config
@classmethod
def global_setup(cls, file_locks_path=None, root_helper='sudo',
suppress_requests_ssl_warnings=True, disable_logs=True,
non_uuid_ids=False, output_all_backend_info=False,
project_id=None, user_id=None, persistence_config=None,
fail_on_missing_backend=True, host=None,
**cinder_config_params):
# Global setup can only be set once
if cls.global_initialization:
raise Exception('Already setup')
cls.im_root = os.getuid() == 0
cls.fail_on_missing_backend = fail_on_missing_backend
cls.project_id = project_id
cls.user_id = user_id
cls.non_uuid_ids = non_uuid_ids
cls.set_persistence(persistence_config)
cls._set_cinder_config(host, file_locks_path, cinder_config_params)
serialization.setup(cls)
cls._set_logging(disable_logs)
cls._set_priv_helper(root_helper)
coordination.COORDINATOR.start()
if suppress_requests_ssl_warnings:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
urllib3.disable_warnings(
urllib3.exceptions.InsecurePlatformWarning)
cls.global_initialization = True
cls.output_all_backend_info = output_all_backend_info
def _apply_backend_workarounds(self, config):
"""Apply workarounds for drivers that do bad stuff."""
if 'netapp' in config.volume_driver:
# Workaround NetApp's weird replication stuff that makes it reload
# config sections in get_backend_configuration. OK since we don't
# support replication.
cfg.CONF.list_all_sections = lambda: config.volume_backend_name
@classmethod
def _set_logging(cls, disable_logs):
if disable_logs:
logging.Logger.disabled = property(lambda s: True,
lambda s, x: None)
return
oslo_logging.setup(cfg.CONF, 'cinder')
logging.captureWarnings(True)
@classmethod
def _set_priv_helper(cls, root_helper):
# If we are using a virtual environment then the rootwrap config files
# Should be within the environment and not under /etc/cinder/
venv = os.environ.get('VIRTUAL_ENV')
if (venv and not cfg.CONF.rootwrap_config.startswith(venv) and
not os.path.exists(cfg.CONF.rootwrap_config)):
# We need to remove the absolute path (initial '/') to generate the
# config path under the virtualenv
# for the join to work.
wrap_path = cfg.CONF.rootwrap_config[1:]
venv_wrap_file = os.path.join(venv, wrap_path)
venv_wrap_dir = os.path.dirname(venv_wrap_file)
# In virtual environments our rootwrap config file is no longer
# '/etc/cinder/rootwrap.conf'. We have 2 possible roots, it's
# either the virtualenv's directory or our where our sources are if
# we have installed cinder as editable.
# For editable we need to copy the files into the virtualenv if we
# haven't copied them before.
if not utils.__file__.startswith(venv):
# If we haven't copied the files yet
if not os.path.exists(venv_wrap_file):
editable_link = glob.glob(os.path.join(
venv, 'lib/python*/site-packages/cinder.egg-link'))
with open(editable_link[0], 'r') as f:
cinder_source_path = f.read().split('\n')[0]
cinder_source_etc = os.path.join(cinder_source_path,
'etc/cinder')
shutil.copytree(cinder_source_etc, venv_wrap_dir)
# For venvs we need to update configured filters_path and exec_dirs
parser = configparser.ConfigParser()
parser.read(venv_wrap_file)
# Change contents if we haven't done it already
if not parser['DEFAULT']['filters_path'].startswith(venv_wrap_dir):
parser['DEFAULT']['filters_path'] = os.path.join(venv_wrap_dir,
'rootwrap.d')
parser['DEFAULT']['exec_dirs'] = (
os.path.join(venv, 'bin,') +
parser['DEFAULT']['exec_dirs'])
with open(venv_wrap_file, 'w') as f:
parser.write(f)
# Don't use set_override because it doesn't work as it should
cfg.CONF.rootwrap_config = venv_wrap_file
# The default Cinder roothelper in Cinder and privsep is sudo, so
# nothing to do in those cases.
if root_helper != 'sudo':
# Get the current helper (usually 'sudo cinder-rootwrap
# <CONF.rootwrap_config>') and replace the sudo part
original_helper = utils.get_root_helper()
# If we haven't already set the helper
if root_helper not in original_helper:
new_helper = original_helper.replace('sudo', root_helper)
utils.get_root_helper = lambda: new_helper
# Initialize privsep's context to not use 'sudo'
priv_context.init(root_helper=[root_helper])
# When using privsep from the system we need to replace the
# privsep-helper with our own to use the virtual env libraries.
if venv and not priv_context.__file__.startswith(venv):
# Use importlib.resources to support PEP 302-based import hooks
# Can only use importlib.resources on 3.10 because it was added to
# 3.7, but files to 3.9 and namespace packages only to 3.10
import sys
if sys.version_info[:2] >= (3, 10):
from importlib.resources import files
else:
from importlib_resources import files
privhelper = files('cinderlib.bin').joinpath('venv-privsep-helper')
cmd = f'{root_helper} {privhelper}'
# Change default of the option instead of the value of the
# different contexts
for opt in priv_context.OPTS:
if opt.name == 'helper_command':
opt.default = cmd
break
# Don't use server/client mode when running as root
client_mode = not cls.im_root
cinder.privsep.sys_admin_pctxt.set_client_mode(client_mode)
os_brick.privileged.default.set_client_mode(client_mode)
@property
def config(self):
if self.output_all_backend_info:
return self._driver_cfg
return {'volume_backend_name': self._driver_cfg['volume_backend_name']}
def _serialize(self, property_name):
result = [getattr(volume, property_name) for volume in self.volumes]
# We only need to output the full backend configuration once
if self.output_all_backend_info:
backend = {'volume_backend_name': self.id}
for volume in result:
volume['backend'] = backend
return {'class': type(self).__name__,
'backend': self.config,
'volumes': result}
@property
def json(self):
return self._serialize('json')
@property
def dump(self):
return self._serialize('dump')
@property
def jsons(self):
return json_lib.dumps(self.json)
@property
def dumps(self):
return json_lib.dumps(self.dump)
@classmethod
def load(cls, json_src, save=False):
backend = Backend.load_backend(json_src['backend'])
volumes = json_src.get('volumes')
if volumes:
backend._volumes = [objects.Volume.load(v, save) for v in volumes]
return backend
@classmethod
def load_backend(cls, backend_data):
backend_name = backend_data['volume_backend_name']
if backend_name in cls.backends:
return cls.backends[backend_name]
if len(backend_data) > 1:
return cls(**backend_data)
if cls.fail_on_missing_backend:
raise Exception('Backend not present in system or json.')
return backend_name
def refresh(self):
if self._volumes is not None:
self._volumes = None
self.volumes
@staticmethod
def list_supported_drivers(output_version=1):
"""Returns dictionary with driver classes names as keys.
The output of the method changes from version to version, so we can
pass the output_version parameter to specify which version we are
expecting.
Version 1: Original output intended for human consumption, where all
dictionary values are strings.
Version 2: Improved version intended for automated consumption.
- type is now a dictionary with detailed information
- Values retain their types, so we'll no longer get 'None'
or 'False'.
"""
def get_vars(obj):
return {k: v for k, v in vars(obj).items()
if not k.startswith('_')}
def get_strs(obj):
return {k: str(v) for k, v in vars(obj).items()
if not k.startswith('_')}
def convert_oslo_config(oslo_option, output_version):
if output_version != 2:
return get_strs(oslo_option)
res = get_vars(oslo_option)
type_class = res['type']
res['type'] = get_vars(oslo_option.type)
res['type']['type_class'] = type_class
return res
def fix_cinderlib_options(driver_dict, output_version):
# The rbd_keyring_conf option is deprecated and will be removed for
# Cinder, because it's a security vulnerability there (OSSN-0085),
# but it isn't for cinderlib, since the user of the library already
# has access to all the credentials, and cinderlib needs it to work
# with RBD, so we need to make sure that the config option is
# there whether it's reported as deprecated or removed from Cinder.
RBD_KEYRING_CONF = cfg.StrOpt('rbd_keyring_conf',
default='',
help='Path to the ceph keyring file')
if driver_dict['class_name'] != 'RBDDriver':
return
rbd_opt = convert_oslo_config(RBD_KEYRING_CONF, output_version)
for opt in driver_dict['driver_options']:
if opt['dest'] == 'rbd_keyring_conf':
opt.clear()
opt.update(rbd_opt)
break
else:
driver_dict['driver_options'].append(rbd_opt)
def list_drivers(queue, output_version):
cwd = os.getcwd()
# Go to the parent directory directory where Cinder is installed
os.chdir(utils.__file__.rsplit(os.sep, 2)[0])
try:
drivers = cinder_interface_util.get_volume_drivers()
mapping = {d.class_name: vars(d) for d in drivers}
for driver in mapping.values():
driver.pop('cls', None)
if 'driver_options' in driver:
driver['driver_options'] = [
convert_oslo_config(opt, output_version)
for opt in driver['driver_options']
]
fix_cinderlib_options(driver, output_version)
finally:
os.chdir(cwd)
queue.put(mapping)
if not (1 <= output_version <= 2):
raise ValueError('Acceptable versions are 1 and 2')
# Use a different process to avoid having all driver classes loaded in
# memory during our execution.
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=list_drivers,
args=(queue, output_version))
p.start()
result = queue.get()
p.join()
return result
setup = Backend.global_setup
# Used by serialization.load
objects.Backend = Backend
# Needed if we use serialization.load before initializing cinderlib
objects.Object.backend_class = Backend

View File

@ -1,65 +0,0 @@
#!/bin/env python
# Copyright (c) 2017, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate Python code to initialize cinderlib based on Cinder config file
This tool generates Python code to instantiate backends using a cinder.conf
file.
It supports multiple backends as defined in enabled_backends.
This program uses the oslo.config module to load configuration options instead
of using configparser directly because drivers will need variables to have the
right type (string, list, integer...), and the types are defined in the code
using oslo.config.
cinder-cfg-to_python cinder.conf cinderlib-conf.py
If no output is provided it will use stdout, and if we also don't provide an
input file, it will default to /etc/cinder/cinder.conf.
"""
import sys
from cinderlib.cmd import cinder_to_yaml
def _to_str(value):
if isinstance(value, str):
return '"' + value + '"'
return value
def convert(source, dest):
config = cinder_to_yaml.convert(source)
result = ['import cinderlib as cl']
for backend in config['backends']:
name = backend['volume_backend_name']
name = name.replace(' ', '_').replace('-', '_')
cfg = ', '.join('%s=%s' % (k, _to_str(v)) for k, v in backend.items())
result.append('%s = cl.Backend(%s)' % (name, cfg))
with open(dest, 'w') as f:
f.write('\n\n'.join(result) + '\n')
def main():
source = '/etc/cinder/cinder.conf' if len(sys.argv) < 2 else sys.argv[1]
dest = '/dev/stdout' if len(sys.argv) < 3 else sys.argv[2]
convert(source, dest)
if __name__ == '__main__':
main()

View File

@ -1,70 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
import yaml
import configparser
from cinder.cmd import volume
volume.objects.register_all() # noqa
from cinder.volume import configuration as config
from cinder.volume import manager
def convert(cinder_source, yaml_dest=None):
result_cfgs = []
if not path.exists(cinder_source):
raise Exception("Cinder config file %s doesn't exist" % cinder_source)
# Manually parse the Cinder configuration file so we know which options are
# set.
parser = configparser.ConfigParser()
parser.read(cinder_source)
enabled_backends = parser.get('DEFAULT', 'enabled_backends')
backends = [name.strip() for name in enabled_backends.split(',') if name]
volume.CONF(('--config-file', cinder_source), project='cinder')
for backend in backends:
options_present = parser.options(backend)
# Dynamically loading the driver triggers adding the specific
# configuration options to the backend_defaults section
cfg = config.Configuration(manager.volume_backend_opts,
config_group=backend)
driver_ns = cfg.volume_driver.rsplit('.', 1)[0]
__import__(driver_ns)
# Use the backend_defaults section to extract the configuration for
# options that are present in the backend section and add them to
# the backend section.
opts = volume.CONF._groups['backend_defaults']._opts
known_present_options = [opt for opt in options_present if opt in opts]
volume_opts = [opts[option]['opt'] for option in known_present_options]
cfg.append_config_values(volume_opts)
# Now retrieve the options that are set in the configuration file.
result_cfgs.append({option: cfg.safe_get(option)
for option in known_present_options})
result = {'backends': result_cfgs}
if yaml_dest:
# Write the YAML to the destination
with open(yaml_dest, 'w') as f:
yaml.dump(result, f)
return result

View File

@ -1,37 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
NotFound = exception.NotFound
VolumeNotFound = exception.VolumeNotFound
SnapshotNotFound = exception.SnapshotNotFound
ConnectionNotFound = exception.VolumeAttachmentNotFound
InvalidVolume = exception.InvalidVolume
class InvalidPersistence(Exception):
__msg = 'Invalid persistence storage: %s.'
def __init__(self, name):
super(InvalidPersistence, self).__init__(self.__msg % name)
class NotLocal(Exception):
__msg = "Volume %s doesn't seem to be attached locally."
def __init__(self, name):
super(NotLocal, self).__init__(self.__msg % name)

View File

@ -1,996 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json as json_lib
import sys
import uuid
from cinder import context
from cinder import exception as cinder_exception
from cinder import objects as cinder_objs
from cinder.objects import base as cinder_base_ovo
from cinder.volume import volume_utils as volume_utils
from os_brick import exception as brick_exception
from os_brick import initiator as brick_initiator
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinderlib import exception
from cinderlib import utils
LOG = logging.getLogger(__name__)
DEFAULT_PROJECT_ID = 'cinderlib'
DEFAULT_USER_ID = 'cinderlib'
BACKEND_NAME_SNAPSHOT_FIELD = 'progress'
CONNECTIONS_OVO_FIELD = 'volume_attachment'
GB = 1024 ** 3
# This cannot go in the setup method because cinderlib objects need them to
# be setup to set OVO_CLASS
cinder_objs.register_all()
class KeyValue(object):
def __init__(self, key=None, value=None):
self.key = key
self.value = value
def __eq__(self, other):
return (self.key, self.value) == (other.key, other.value)
class Object(object):
"""Base class for our resource representation objects."""
SIMPLE_JSON_IGNORE = tuple()
DEFAULT_FIELDS_VALUES = {}
LAZY_PROPERTIES = tuple()
backend_class = None
CONTEXT = context.RequestContext(user_id=DEFAULT_USER_ID,
project_id=DEFAULT_PROJECT_ID,
is_admin=True,
overwrite=False)
def _get_backend(self, backend_name_or_obj):
if isinstance(backend_name_or_obj, str):
try:
return self.backend_class.backends[backend_name_or_obj]
except KeyError:
if self.backend_class.fail_on_missing_backend:
raise
return backend_name_or_obj
def __init__(self, backend, **fields_data):
self.backend = self._get_backend(backend)
__ovo = fields_data.get('__ovo')
if __ovo:
self._ovo = __ovo
else:
self._ovo = self._create_ovo(**fields_data)
# Store a reference to the cinderlib obj in the OVO for serialization
self._ovo._cl_obj_ = self
@classmethod
def setup(cls, persistence_driver, backend_class, project_id, user_id,
non_uuid_ids):
cls.persistence = persistence_driver
cls.backend_class = backend_class
# Set the global context if we aren't using the default
project_id = project_id or DEFAULT_PROJECT_ID
user_id = user_id or DEFAULT_USER_ID
if (project_id != cls.CONTEXT.project_id or
user_id != cls.CONTEXT.user_id):
cls.CONTEXT.user_id = user_id
cls.CONTEXT.project_id = project_id
Volume.DEFAULT_FIELDS_VALUES['user_id'] = user_id
Volume.DEFAULT_FIELDS_VALUES['project_id'] = project_id
# Configure OVOs to support non_uuid_ids
if non_uuid_ids:
for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes():
ovo_cls = getattr(cinder_objs, ovo_name)
if 'id' in ovo_cls.fields:
ovo_cls.fields['id'] = cinder_base_ovo.fields.StringField()
def _to_primitive(self):
"""Return custom cinderlib data for serialization."""
return None
def _create_ovo(self, **fields_data):
# The base are the default values we define on our own classes
fields_values = self.DEFAULT_FIELDS_VALUES.copy()
# Apply the values defined by the caller
fields_values.update(fields_data)
# We support manually setting the id, so set only if not already set
# or if set to None
if not fields_values.get('id'):
fields_values['id'] = self.new_uuid()
# Set non set field values based on OVO's default value and on whether
# it is nullable or not.
for field_name, field in self.OVO_CLASS.fields.items():
if field.default != cinder_base_ovo.fields.UnspecifiedDefault:
fields_values.setdefault(field_name, field.default)
elif field.nullable:
fields_values.setdefault(field_name, None)
if ('created_at' in self.OVO_CLASS.fields and
not fields_values.get('created_at')):
fields_values['created_at'] = timeutils.utcnow()
return self.OVO_CLASS(context=self.CONTEXT, **fields_values)
@property
def json(self):
return self.to_json(simplified=False)
def to_json(self, simplified=True):
visited = set()
if simplified:
for field in self.SIMPLE_JSON_IGNORE:
if self._ovo.obj_attr_is_set(field):
visited.add(id(getattr(self._ovo, field)))
ovo = self._ovo.obj_to_primitive(visited=visited)
return {'class': type(self).__name__,
# If no driver loaded, just return the name of the backend
'backend': getattr(self.backend, 'config',
{'volume_backend_name': self.backend}),
'ovo': ovo}
@property
def jsons(self):
return self.to_jsons(simplified=False)
def to_jsons(self, simplified=True):
json_data = self.to_json(simplified)
return json_lib.dumps(json_data, separators=(',', ':'))
def _only_ovo_data(self, ovo):
if isinstance(ovo, dict):
if 'versioned_object.data' in ovo:
value = ovo['versioned_object.data']
if ['objects'] == value.keys():
return self._only_ovo_data(value['objects'])
key = ovo['versioned_object.name'].lower()
return {key: self._only_ovo_data(value)}
for key in ovo.keys():
ovo[key] = self._only_ovo_data(ovo[key])
if isinstance(ovo, list) and ovo:
return [self._only_ovo_data(e) for e in ovo]
return ovo
def to_dict(self):
json_ovo = self.json
return self._only_ovo_data(json_ovo['ovo'])
@property
def dump(self):
# Make sure we load lazy loading properties
for lazy_property in self.LAZY_PROPERTIES:
getattr(self, lazy_property)
return self.json
@property
def dumps(self):
return json_lib.dumps(self.dump, separators=(',', ':'))
def __repr__(self):
backend = self.backend
if isinstance(self.backend, self.backend_class):
backend = backend.id
return ('<cinderlib.%s object %s on backend %s>' %
(type(self).__name__, self.id, backend))
@classmethod
def load(cls, json_src, save=False):
backend = cls.backend_class.load_backend(json_src['backend'])
ovo = cinder_base_ovo.CinderObject.obj_from_primitive(json_src['ovo'],
cls.CONTEXT)
return cls._load(backend, ovo, save=save)
@staticmethod
def new_uuid():
return str(uuid.uuid4())
def __getattr__(self, name):
if name == '_ovo':
raise AttributeError('Attribute _ovo is not yet set')
return getattr(self._ovo, name)
def _raise_with_resource(self):
exc_info = sys.exc_info()
exc_info[1].resource = self
if exc_info[1].__traceback__ is not exc_info[2]:
raise exc_info[1].with_traceback(exc_info[2])
raise exc_info[1]
class NamedObject(Object):
def __init__(self, backend, **fields_data):
if 'description' in fields_data:
fields_data['display_description'] = fields_data.pop('description')
if 'name' in fields_data:
fields_data['display_name'] = fields_data.pop('name')
super(NamedObject, self).__init__(backend, **fields_data)
@property
def name(self):
return self._ovo.display_name
@property
def description(self):
return self._ovo.display_description
@property
def name_in_storage(self):
return self._ovo.name
class LazyVolumeAttr(object):
LAZY_PROPERTIES = ('volume',)
_volume = None
def __init__(self, volume):
if volume:
self._volume = volume
# Ensure circular reference is set
self._ovo.volume = volume._ovo
self._ovo.volume_id = volume._ovo.id
elif self._ovo.obj_attr_is_set('volume'):
self._volume = Volume._load(self.backend, self._ovo.volume)
@property
def volume(self):
# Lazy loading
if self._volume is None:
self._volume = Volume.get_by_id(self.volume_id)
self._ovo.volume = self._volume._ovo
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
self._ovo.volume = value._ovo
def refresh(self):
last_self = self.get_by_id(self.id)
if self._volume is not None:
last_self.volume
vars(self).clear()
vars(self).update(vars(last_self))
class Volume(NamedObject):
OVO_CLASS = cinder_objs.Volume
SIMPLE_JSON_IGNORE = ('snapshots', 'volume_attachment')
DEFAULT_FIELDS_VALUES = {
'size': 1,
'user_id': Object.CONTEXT.user_id,
'project_id': Object.CONTEXT.project_id,
'status': 'creating',
'attach_status': 'detached',
'metadata': {},
'admin_metadata': {},
'glance_metadata': {},
}
LAZY_PROPERTIES = ('snapshots', 'connections')
_ignore_keys = ('id', CONNECTIONS_OVO_FIELD, 'snapshots', 'volume_type')
def __init__(self, backend_or_vol, pool_name=None, **kwargs):
# Accept backend name for convenience
if isinstance(backend_or_vol, str):
backend_name = backend_or_vol
backend_or_vol = self._get_backend(backend_or_vol)
elif isinstance(backend_or_vol, self.backend_class):
backend_name = backend_or_vol.id
elif isinstance(backend_or_vol, Volume):
backend_str, pool = backend_or_vol._ovo.host.split('#')
backend_name = backend_str.split('@')[-1]
pool_name = pool_name or pool
for key in backend_or_vol._ovo.fields:
if (backend_or_vol._ovo.obj_attr_is_set(key) and
key not in self._ignore_keys):
kwargs.setdefault(key, getattr(backend_or_vol._ovo, key))
if backend_or_vol.volume_type:
kwargs.setdefault('extra_specs',
backend_or_vol.volume_type.extra_specs)
if backend_or_vol.volume_type.qos_specs:
kwargs.setdefault(
'qos_specs',
backend_or_vol.volume_type.qos_specs.specs)
backend_or_vol = backend_or_vol.backend
if '__ovo' not in kwargs:
kwargs[CONNECTIONS_OVO_FIELD] = (
cinder_objs.VolumeAttachmentList(context=self.CONTEXT))
kwargs['snapshots'] = (
cinder_objs.SnapshotList(context=self.CONTEXT))
self._snapshots = []
self._connections = []
qos_specs = kwargs.pop('qos_specs', None)
extra_specs = kwargs.pop('extra_specs', {})
super(Volume, self).__init__(backend_or_vol, **kwargs)
self._populate_data()
self.local_attach = None
# If we overwrote the host, then we ignore pool_name and don't set a
# default value or copy the one from the source either.
if 'host' not in kwargs and '__ovo' not in kwargs:
# TODO(geguileo): Add pool support
pool_name = pool_name or backend_or_vol.pool_names[0]
self._ovo.host = ('%s@%s#%s' %
(cfg.CONF.host, backend_name, pool_name))
if qos_specs or extra_specs:
if qos_specs:
qos_specs = cinder_objs.QualityOfServiceSpecs(
id=self.id, name=self.id,
consumer='back-end', specs=qos_specs)
qos_specs_id = self.id
else:
qos_specs = qos_specs_id = None
self._ovo.volume_type = cinder_objs.VolumeType(
context=self.CONTEXT,
is_public=True,
id=self.id,
name=self.id,
qos_specs_id=qos_specs_id,
extra_specs=extra_specs,
qos_specs=qos_specs)
self._ovo.volume_type_id = self.id
@property
def snapshots(self):
# Lazy loading
if self._snapshots is None:
self._snapshots = self.persistence.get_snapshots(volume_id=self.id)
for snap in self._snapshots:
snap.volume = self
ovos = [snap._ovo for snap in self._snapshots]
self._ovo.snapshots = cinder_objs.SnapshotList(objects=ovos)
self._ovo.obj_reset_changes(('snapshots',))
return self._snapshots
@property
def connections(self):
# Lazy loading
if self._connections is None:
# Check if the driver has already lazy loaded it using OVOs
if self._ovo.obj_attr_is_set(CONNECTIONS_OVO_FIELD):
conns = [Connection(None, volume=self, __ovo=ovo)
for ovo
in getattr(self._ovo, CONNECTIONS_OVO_FIELD).objects]
# Retrieve data from persistence storage
else:
conns = self.persistence.get_connections(volume_id=self.id)
for conn in conns:
conn.volume = self
ovos = [conn._ovo for conn in conns]
setattr(self._ovo, CONNECTIONS_OVO_FIELD,
cinder_objs.VolumeAttachmentList(objects=ovos))
self._ovo.obj_reset_changes((CONNECTIONS_OVO_FIELD,))
self._connections = conns
return self._connections
@classmethod
def get_by_id(cls, volume_id):
result = cls.persistence.get_volumes(volume_id=volume_id)
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result[0]
@classmethod
def get_by_name(cls, volume_name):
return cls.persistence.get_volumes(volume_name=volume_name)
def _populate_data(self):
if self._ovo.obj_attr_is_set('snapshots'):
self._snapshots = []
for snap_ovo in self._ovo.snapshots:
# Set circular reference
snap_ovo.volume = self._ovo
Snapshot._load(self.backend, snap_ovo, self)
else:
self._snapshots = None
if self._ovo.obj_attr_is_set(CONNECTIONS_OVO_FIELD):
self._connections = []
for conn_ovo in getattr(self._ovo, CONNECTIONS_OVO_FIELD):
# Set circular reference
conn_ovo.volume = self._ovo
Connection._load(self.backend, conn_ovo, self)
else:
self._connections = None
@classmethod
def _load(cls, backend, ovo, save=None):
vol = cls(backend, __ovo=ovo)
if save:
vol.save()
if vol._snapshots:
for s in vol._snapshots:
s.obj_reset_changes()
s.save()
if vol._connections:
for c in vol._connections:
c.obj_reset_changes()
c.save()
return vol
def create(self):
self.backend._start_creating_volume(self)
try:
model_update = self.backend.driver.create_volume(self._ovo)
self._ovo.status = 'available'
if model_update:
self._ovo.update(model_update)
self.backend._volume_created(self)
except Exception:
self._ovo.status = 'error'
self._raise_with_resource()
finally:
self.save()
def _snapshot_removed(self, snapshot):
# The snapshot instance in memory could be out of sync and not be
# identical, so check by ID.
i, snap = utils.find_by_id(snapshot.id, self._snapshots)
if snap:
del self._snapshots[i]
i, ovo = utils.find_by_id(snapshot.id, self._ovo.snapshots.objects)
if ovo:
del self._ovo.snapshots.objects[i]
def _connection_removed(self, connection):
# The connection instance in memory could be out of sync and not be
# identical, so check by ID.
i, conn = utils.find_by_id(connection.id, self._connections)
if conn:
del self._connections[i]
ovo_conns = getattr(self._ovo, CONNECTIONS_OVO_FIELD).objects
i, ovo_conn = utils.find_by_id(connection.id, ovo_conns)
if ovo_conn:
del ovo_conns[i]
def delete(self):
if self.snapshots:
msg = 'Cannot delete volume %s with snapshots' % self.id
raise exception.InvalidVolume(reason=msg)
try:
self.backend.driver.delete_volume(self._ovo)
self.persistence.delete_volume(self)
self.backend._volume_removed(self)
self._ovo.status = 'deleted'
except Exception:
self._ovo.status = 'error_deleting'
self.save()
self._raise_with_resource()
def extend(self, size):
volume = self._ovo
volume.previous_status = volume.status
volume.status = 'extending'
try:
self.backend.driver.extend_volume(volume, size)
volume.size = size
volume.status = volume.previous_status
volume.previous_status = None
except Exception:
volume.status = 'error'
self._raise_with_resource()
finally:
self.save()
if volume.status == 'in-use' and self.local_attach:
return self.local_attach.extend()
# Must return size in bytes
return size * GB
def clone(self, **new_vol_attrs):
new_vol_attrs['source_volid'] = self.id
new_vol = Volume(self, **new_vol_attrs)
self.backend._start_creating_volume(new_vol)
try:
model_update = self.backend.driver.create_cloned_volume(
new_vol._ovo, self._ovo)
new_vol._ovo.status = 'available'
if model_update:
new_vol._ovo.update(model_update)
self.backend._volume_created(new_vol)
except Exception:
new_vol._ovo.status = 'error'
new_vol._raise_with_resource()
finally:
new_vol.save()
return new_vol
def create_snapshot(self, name='', description='', **kwargs):
snap = Snapshot(self, name=name, description=description, **kwargs)
try:
snap.create()
finally:
if self._snapshots is not None:
self._snapshots.append(snap)
self._ovo.snapshots.objects.append(snap._ovo)
return snap
def attach(self):
connector_dict = volume_utils.brick_get_connector_properties(
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
conn = self.connect(connector_dict)
try:
conn.attach()
except Exception:
self.disconnect(conn)
raise
return conn
def detach(self, force=False, ignore_errors=False):
if not self.local_attach:
raise exception.NotLocal(self.id)
exc = brick_exception.ExceptionChainer()
conn = self.local_attach
try:
conn.detach(force, ignore_errors, exc)
except Exception:
if not force:
raise
with exc.context(force, 'Unable to disconnect'):
conn.disconnect(force)
if exc and not ignore_errors:
raise exc
def connect(self, connector_dict, **ovo_fields):
model_update = self.backend.driver.create_export(self.CONTEXT,
self._ovo,
connector_dict)
if model_update:
self._ovo.update(model_update)
self.save()
try:
conn = Connection.connect(self, connector_dict, **ovo_fields)
if self._connections is not None:
self._connections.append(conn)
ovo_conns = getattr(self._ovo, CONNECTIONS_OVO_FIELD).objects
ovo_conns.append(conn._ovo)
self._ovo.status = 'in-use'
self.save()
except Exception:
self._remove_export()
self._raise_with_resource()
return conn
def _disconnect(self, connection):
self._remove_export()
self._connection_removed(connection)
if not self.connections:
self._ovo.status = 'available'
self.save()
def disconnect(self, connection, force=False):
connection._disconnect(force)
self._disconnect(connection)
def cleanup(self):
for attach in self.connections:
attach.detach()
self._remove_export()
def _remove_export(self):
self.backend.driver.remove_export(self._context, self._ovo)
def refresh(self):
last_self = self.get_by_id(self.id)
if self._snapshots is not None:
last_self.snapshots
if self._connections is not None:
last_self.connections
last_self.local_attach = self.local_attach
vars(self).clear()
vars(self).update(vars(last_self))
def save(self):
self.persistence.set_volume(self)
class Connection(Object, LazyVolumeAttr):
"""Cinderlib Connection info that maps to VolumeAttachment.
On Pike we don't have the connector field on the VolumeAttachment ORM
instance so we use the connection_info to store everything.
We'll have a dictionary:
{'conn': connection info
'connector': connector dictionary
'device': result of connect_volume}
"""
OVO_CLASS = cinder_objs.VolumeAttachment
SIMPLE_JSON_IGNORE = ('volume',)
@classmethod
def connect(cls, volume, connector, **kwargs):
conn_info = volume.backend.driver.initialize_connection(
volume._ovo, connector)
conn = cls(volume.backend,
connector=connector,
volume=volume,
status='attached',
connection_info={'conn': conn_info},
**kwargs)
conn.connector_info = connector
conn.save()
return conn
@staticmethod
def _is_multipathed_conn(kwargs):
# Priority:
# - kwargs['use_multipath']
# - Multipath in connector_dict in kwargs or _ovo
# - Detect from connection_info data from OVO in kwargs
if 'use_multipath' in kwargs:
return kwargs['use_multipath']
connector = kwargs.get('connector') or {}
conn_info = kwargs.get('connection_info') or {}
if '__ovo' in kwargs:
ovo = kwargs['__ovo']
conn_info = conn_info or ovo.connection_info or {}
connector = connector or ovo.connection_info.get('connector') or {}
if 'multipath' in connector:
return connector['multipath']
# If multipathed not defined autodetect based on connection info
conn_info = conn_info['conn'].get('data', {})
iscsi_mp = 'target_iqns' in conn_info and 'target_portals' in conn_info
fc_mp = not isinstance(conn_info.get('target_wwn', ''), str)
return iscsi_mp or fc_mp
def __init__(self, *args, **kwargs):
self.use_multipath = self._is_multipathed_conn(kwargs)
scan_attempts = brick_initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT
self.scan_attempts = kwargs.pop('device_scan_attempts', scan_attempts)
volume = kwargs.pop('volume', None)
self._connector = None
super(Connection, self).__init__(*args, **kwargs)
LazyVolumeAttr.__init__(self, volume)
# Attributes could be coming from __ovo, so we need to do this after
# all the initialization.
data = self.conn_info.get('data', {})
if not (self._ovo.obj_attr_is_set('attach_mode') and self.attach_mode):
self._ovo.attach_mode = data.get('access_mode', 'rw')
if data:
data['access_mode'] = self.attach_mode
@property
def conn_info(self):
conn_info = self._ovo.connection_info
if conn_info:
return conn_info.get('conn')
return {}
@conn_info.setter
def conn_info(self, value):
if not value:
self._ovo.connection_info = None
return
if self._ovo.connection_info is None:
self._ovo.connection_info = {}
# access_mode in the connection_info is set on __init__, here we ensure
# it's also set whenever we change the connection_info out of __init__.
if 'data' in value:
mode = value['data'].setdefault('access_mode', self.attach_mode)
# Keep attach_mode in sync.
self._ovo.attach_mode = mode
self._ovo.connection_info['conn'] = value
@property
def protocol(self):
return self.conn_info.get('driver_volume_type')
@property
def connector_info(self):
if self.connection_info:
return self.connection_info.get('connector')
return None
@connector_info.setter
def connector_info(self, value):
if self._ovo.connection_info is None:
self._ovo.connection_info = {}
self.connection_info['connector'] = value
# Since we are changing the dictionary the OVO won't detect the change
self._changed_fields.add('connection_info')
@property
def device(self):
if self.connection_info:
return self.connection_info.get('device')
return None
@device.setter
def device(self, value):
if value:
self.connection_info['device'] = value
else:
self.connection_info.pop('device', None)
# Since we are changing the dictionary the OVO won't detect the change
self._changed_fields.add('connection_info')
@property
def path(self):
device = self.device
if not device:
return None
return device['path']
@property
def connector(self):
if not self._connector:
if not self.conn_info:
return None
self._connector = volume_utils.brick_get_connector(
self.protocol,
use_multipath=self.use_multipath,
device_scan_attempts=self.scan_attempts,
# NOTE(geguileo): afaik only remotefs uses the connection info
conn=self.conn_info,
do_local_attach=True)
return self._connector
@property
def attached(self):
return bool(self.device)
@property
def connected(self):
return bool(self.conn_info)
@classmethod
def _load(cls, backend, ovo, volume=None, save=False):
# We let the __init__ method set the _volume if exists
conn = cls(backend, __ovo=ovo, volume=volume)
if save:
conn.save()
# Restore circular reference only if we have all the elements
if conn._volume:
utils.add_by_id(conn, conn._volume._connections)
connections = getattr(conn._volume._ovo,
CONNECTIONS_OVO_FIELD).objects
utils.add_by_id(conn._ovo, connections)
return conn
def _disconnect(self, force=False):
self.backend.driver.terminate_connection(self.volume._ovo,
self.connector_info,
force=force)
self.conn_info = None
self._ovo.status = 'detached'
self.persistence.delete_connection(self)
def disconnect(self, force=False):
self._disconnect(force)
self.volume._disconnect(self)
def device_attached(self, device):
self.device = device
self.save()
def attach(self):
device = self.connector.connect_volume(self.conn_info['data'])
self.device_attached(device)
try:
if self.connector.check_valid_device(self.path):
error_msg = None
else:
error_msg = ('Unable to access the backend storage via path '
'%s.' % self.path)
except Exception:
error_msg = ('Could not validate device %s. There may be missing '
'packages on your host.' % self.path)
LOG.exception(error_msg)
if error_msg:
# Prepare exception while we still have the value of the path
exc = cinder_exception.DeviceUnavailable(
path=self.path, attach_info=self._ovo.connection_info,
reason=error_msg)
self.detach(force=True, ignore_errors=True)
raise exc
if self._volume:
self.volume.local_attach = self
def detach(self, force=False, ignore_errors=False, exc=None):
if not exc:
exc = brick_exception.ExceptionChainer()
with exc.context(force, 'Disconnect failed'):
self.connector.disconnect_volume(self.conn_info['data'],
self.device,
force=force,
ignore_errors=ignore_errors)
if not exc or ignore_errors:
if self._volume:
self.volume.local_attach = None
self.device = None
self.save()
self._connector = None
if exc and not ignore_errors:
raise exc
@classmethod
def get_by_id(cls, connection_id):
result = cls.persistence.get_connections(connection_id=connection_id)
if not result:
msg = 'id=%s' % connection_id
raise exception.ConnectionNotFound(filter=msg)
return result[0]
@property
def backend(self):
if self._backend is None and hasattr(self, '_volume'):
self._backend = self.volume.backend
return self._backend
@backend.setter
def backend(self, value):
self._backend = value
def save(self):
self.persistence.set_connection(self)
def extend(self):
return self.connector.extend_volume(self.conn_info['data'])
class Snapshot(NamedObject, LazyVolumeAttr):
OVO_CLASS = cinder_objs.Snapshot
SIMPLE_JSON_IGNORE = ('volume',)
DEFAULT_FIELDS_VALUES = {
'status': 'creating',
'metadata': {},
}
def __init__(self, volume, **kwargs):
param_backend = self._get_backend(kwargs.pop('backend', None))
if '__ovo' in kwargs:
backend = kwargs['__ovo'][BACKEND_NAME_SNAPSHOT_FIELD]
else:
kwargs.setdefault('user_id', volume.user_id)
kwargs.setdefault('project_id', volume.project_id)
kwargs['volume_id'] = volume.id
kwargs['volume_size'] = volume.size
kwargs['volume_type_id'] = volume.volume_type_id
kwargs['volume'] = volume._ovo
if volume:
backend = volume.backend.id
kwargs[BACKEND_NAME_SNAPSHOT_FIELD] = backend
else:
backend = param_backend and param_backend.id
if not (backend or param_backend):
raise ValueError('Backend not provided')
if backend and param_backend and param_backend.id != backend:
raise ValueError("Multiple backends provided and they don't match")
super(Snapshot, self).__init__(backend=param_backend or backend,
**kwargs)
LazyVolumeAttr.__init__(self, volume)
@classmethod
def _load(cls, backend, ovo, volume=None, save=False):
# We let the __init__ method set the _volume if exists
snap = cls(volume, backend=backend, __ovo=ovo)
if save:
snap.save()
# Restore circular reference only if we have all the elements
if snap._volume:
utils.add_by_id(snap, snap._volume._snapshots)
utils.add_by_id(snap._ovo, snap._volume._ovo.snapshots.objects)
return snap
def create(self):
try:
model_update = self.backend.driver.create_snapshot(self._ovo)
self._ovo.status = 'available'
if model_update:
self._ovo.update(model_update)
except Exception:
self._ovo.status = 'error'
self._raise_with_resource()
finally:
self.save()
def delete(self):
try:
self.backend.driver.delete_snapshot(self._ovo)
self.persistence.delete_snapshot(self)
self._ovo.status = 'deleted'
except Exception:
self._ovo.status = 'error_deleting'
self.save()
self._raise_with_resource()
if self._volume is not None:
self._volume._snapshot_removed(self)
def create_volume(self, **new_vol_params):
new_vol_params.setdefault('size', self.volume_size)
new_vol_params['snapshot_id'] = self.id
new_vol = Volume(self.volume, **new_vol_params)
self.backend._start_creating_volume(new_vol)
try:
model_update = self.backend.driver.create_volume_from_snapshot(
new_vol._ovo, self._ovo)
new_vol._ovo.status = 'available'
if model_update:
new_vol._ovo.update(model_update)
self.backend._volume_created(new_vol)
except Exception:
new_vol._ovo.status = 'error'
new_vol._raise_with_resource()
finally:
new_vol.save()
return new_vol
@classmethod
def get_by_id(cls, snapshot_id):
result = cls.persistence.get_snapshots(snapshot_id=snapshot_id)
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result[0]
@classmethod
def get_by_name(cls, snapshot_name):
return cls.persistence.get_snapshots(snapshot_name=snapshot_name)
def save(self):
self.persistence.set_snapshot(self)
setup = Object.setup
CONTEXT = Object.CONTEXT

View File

@ -1,65 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
from stevedore import driver
from cinderlib import exception
from cinderlib.persistence import base
DEFAULT_STORAGE = 'memory'
def setup(config):
"""Setup persistence to be used in cinderlib.
By default memory persistance will be used, but there are other mechanisms
available and other ways to use custom mechanisms:
- Persistence plugins: Plugin mechanism uses Python entrypoints under
namespace cinderlib.persistence.storage, and cinderlib comes with 3
different mechanisms, "memory", "dbms", and "memory_dbms". To use any of
these one must pass the string name in the storage parameter and any
other configuration as keyword arguments.
- Passing a class that inherits from PersistenceDriverBase as storage
parameter and initialization parameters as keyword arguments.
- Passing an instance that inherits from PersistenceDriverBase as storage
parameter.
"""
if config is None:
config = {}
else:
config = config.copy()
# Default configuration is using memory storage
storage = config.pop('storage', None) or DEFAULT_STORAGE
if isinstance(storage, base.PersistenceDriverBase):
return storage
if inspect.isclass(storage) and issubclass(storage,
base.PersistenceDriverBase):
return storage(**config)
if not isinstance(storage, str):
raise exception.InvalidPersistence(storage)
persistence_driver = driver.DriverManager(
namespace='cinderlib.persistence.storage',
name=storage,
invoke_on_load=True,
invoke_kwds=config,
)
return persistence_driver.driver

View File

@ -1,259 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(geguileo): Probably a good idea not to depend on cinder.cmd.volume
# having all the other imports as they could change.
from cinder import objects
from cinder.objects import base as cinder_base_ovo
from oslo_utils import timeutils
from oslo_versionedobjects import fields
import cinderlib
from cinderlib import serialization
class PersistenceDriverBase(object):
"""Provide Metadata Persistency for our resources.
This class will be used to store new resources as they are created,
updated, and removed, as well as provide a mechanism for users to retrieve
volumes, snapshots, and connections.
"""
def __init__(self, **kwargs):
pass
@property
def db(self):
raise NotImplementedError()
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
raise NotImplementedError()
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
raise NotImplementedError()
def get_connections(self, connection_id=None, volume_id=None):
raise NotImplementedError()
def get_key_values(self, key):
raise NotImplementedError()
def set_volume(self, volume):
self.reset_change_tracker(volume)
if volume.volume_type:
volume.volume_type.obj_reset_changes()
if volume.volume_type.qos_specs_id:
volume.volume_type.qos_specs.obj_reset_changes()
def set_snapshot(self, snapshot):
self.reset_change_tracker(snapshot)
def set_connection(self, connection):
self.reset_change_tracker(connection)
def set_key_value(self, key_value):
pass
def delete_volume(self, volume):
self._set_deleted(volume)
self.reset_change_tracker(volume)
def delete_snapshot(self, snapshot):
self._set_deleted(snapshot)
self.reset_change_tracker(snapshot)
def delete_connection(self, connection):
self._set_deleted(connection)
self.reset_change_tracker(connection)
def delete_key_value(self, key):
pass
def _set_deleted(self, resource):
resource._ovo.deleted = True
resource._ovo.deleted_at = timeutils.utcnow()
if hasattr(resource._ovo, 'status'):
resource._ovo.status = 'deleted'
def reset_change_tracker(self, resource, fields=None):
if isinstance(fields, str):
fields = (fields,)
resource._ovo.obj_reset_changes(fields)
def get_changed_fields(self, resource):
# NOTE(geguileo): We don't use cinder_obj_get_changes to prevent
# recursion to children OVO which we are not interested and may result
# in circular references.
result = {key: getattr(resource._ovo, key)
for key in resource._changed_fields
if not isinstance(resource.fields[key], fields.ObjectField)}
if getattr(resource._ovo, 'volume_type', None):
if ('qos_specs' in resource.volume_type._changed_fields and
resource.volume_type.qos_specs):
result['qos_specs'] = resource._ovo.volume_type.qos_specs.specs
if ('extra_specs' in resource.volume_type._changed_fields and
resource.volume_type.extra_specs):
result['extra_specs'] = resource._ovo.volume_type.extra_specs
return result
def get_fields(self, resource):
result = {
key: getattr(resource._ovo, key)
for key in resource.fields
if (resource._ovo.obj_attr_is_set(key) and
key not in getattr(resource, 'obj_extra_fields', []) and not
isinstance(resource.fields[key], fields.ObjectField))
}
if getattr(resource._ovo, 'volume_type_id', None):
result['extra_specs'] = resource._ovo.volume_type.extra_specs
if resource._ovo.volume_type.qos_specs_id:
result['qos_specs'] = resource._ovo.volume_type.qos_specs.specs
return result
class DB(object):
"""Replacement for DB access methods.
This will serve as replacement for methods used by:
- Drivers
- OVOs' get_by_id and save methods
- DB implementation
Data will be retrieved using the persistence driver we setup.
"""
GET_METHODS_PER_DB_MODEL = {
objects.Volume.model: 'volume_get',
objects.VolumeType.model: 'volume_type_get',
objects.Snapshot.model: 'snapshot_get',
objects.QualityOfServiceSpecs.model: 'qos_specs_get',
}
def __init__(self, persistence_driver):
self.persistence = persistence_driver
# Replace get_by_id OVO methods with something that will return
# expected data
objects.Volume.get_by_id = self.volume_get
objects.Snapshot.get_by_id = self.snapshot_get
objects.VolumeAttachmentList.get_all_by_volume_id = \
self.__connections_get
# Disable saving in OVOs
for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes():
ovo_cls = getattr(objects, ovo_name)
ovo_cls.save = lambda *args, **kwargs: None
def __connections_get(self, context, volume_id):
# Used by drivers to lazy load volume_attachment
connections = self.persistence.get_connections(volume_id=volume_id)
ovos = [conn._ovo for conn in connections]
result = objects.VolumeAttachmentList(objects=ovos)
return result
def __volume_get(self, volume_id, as_ovo=True):
in_memory = volume_id in cinderlib.Backend._volumes_inflight
if in_memory:
vol = cinderlib.Backend._volumes_inflight[volume_id]
else:
vol = self.persistence.get_volumes(volume_id)[0]
vol_result = vol._ovo if as_ovo else vol
return in_memory, vol_result
def volume_get(self, context, volume_id, *args, **kwargs):
return self.__volume_get(volume_id)[1]
def snapshot_get(self, context, snapshot_id, *args, **kwargs):
return self.persistence.get_snapshots(snapshot_id)[0]._ovo
def volume_type_get(self, context, id, inactive=False,
expected_fields=None):
if id in cinderlib.Backend._volumes_inflight:
vol = cinderlib.Backend._volumes_inflight[id]
else:
vol = self.persistence.get_volumes(id)[0]
if not vol._ovo.volume_type_id:
return None
return vol_type_to_dict(vol._ovo.volume_type)
# Our volume type name is the same as the id and the volume name
def _volume_type_get_by_name(self, context, name, session=None):
return self.volume_type_get(context, name)
def qos_specs_get(self, context, qos_specs_id, inactive=False):
if qos_specs_id in cinderlib.Backend._volumes_inflight:
vol = cinderlib.Backend._volumes_inflight[qos_specs_id]
else:
vol = self.persistence.get_volumes(qos_specs_id)[0]
if not vol._ovo.volume_type_id:
return None
return vol_type_to_dict(vol._ovo.volume_type)['qos_specs']
@classmethod
def image_volume_cache_get_by_volume_id(cls, context, volume_id):
return None
def get_by_id(self, context, model, id, *args, **kwargs):
method = getattr(self, self.GET_METHODS_PER_DB_MODEL[model])
return method(context, id)
def volume_get_all_by_host(self, context, host, filters=None):
backend_name = host.split('#')[0].split('@')[1]
result = self.persistence.get_volumes(backend_name=backend_name)
return [vol._ovo for vol in result]
def _volume_admin_metadata_get(self, context, volume_id, session=None):
vol = self.volume_get(context, volume_id)
return vol.admin_metadata
def _volume_admin_metadata_update(self, context, volume_id, metadata,
delete, session=None, add=True,
update=True):
vol_in_memory, vol = self.__volume_get(volume_id, as_ovo=False)
changed = False
if delete:
remove = set(vol.admin_metadata.keys()).difference(metadata.keys())
changed = bool(remove)
for k in remove:
del vol.admin_metadata[k]
for k, v in metadata.items():
is_in = k in vol.admin_metadata
if (not is_in and add) or (is_in and update):
vol.admin_metadata[k] = v
changed = True
if changed and not vol_in_memory:
vol._changed_fields.add('admin_metadata')
self.persistence.set_volume(vol)
def volume_admin_metadata_delete(self, context, volume_id, key):
vol_in_memory, vol = self.__volume_get(volume_id, as_ovo=False)
if key in vol.admin_metadata:
del vol.admin_metadata[key]
if not vol_in_memory:
vol._changed_fields.add('admin_metadata')
self.persistence.set_volume(vol)
def vol_type_to_dict(volume_type):
res = serialization.obj_to_primitive(volume_type)
res = res['versioned_object.data']
if res.get('qos_specs'):
res['qos_specs'] = res['qos_specs']['versioned_object.data']
return res

View File

@ -1,417 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import alembic.script.revision
import alembic.util.exc
from cinder.db import api as db_api
from cinder.db import migration
from cinder.db.sqlalchemy import api as sqla_api
from cinder.db.sqlalchemy import models
from cinder import exception as cinder_exception
from cinder import objects as cinder_objs
from oslo_config import cfg
from oslo_db import exception
from oslo_db.sqlalchemy import models as oslo_db_models
from oslo_log import log
import sqlalchemy as sa
from cinderlib import objects
from cinderlib.persistence import base as persistence_base
LOG = log.getLogger(__name__)
def db_writer(func):
"""Decorator to start a DB writing transaction.
With the new Oslo DB Transaction Sessions everything needs to use the
sessions of the enginefacade using the function decorator or the context
manager approach: https://docs.openstack.org/oslo.db/ocata/usage.html
This plugin cannot use the decorator form because its fuctions don't
receive a Context objects that the decorator can find and use, so we use
this decorator instead.
Cinder DB API methods already have a decorator, so methods calling them
don't require this decorator, but methods that directly call the DB using
sqlalchemy or using the model_query method do.
Using this decorator at this level also allows us to enclose everything in
a single transaction, and it doesn't have any problems with the existing
Cinder decorators.
"""
def wrapper(*args, **kwargs):
with sqla_api.main_context_manager.writer.using(objects.CONTEXT):
return func(*args, **kwargs)
return wrapper
class KeyValue(models.BASE, oslo_db_models.ModelBase, objects.KeyValue):
__tablename__ = 'cinderlib_persistence_key_value'
key = sa.Column(sa.String(255), primary_key=True)
value = sa.Column(sa.Text)
class DBPersistence(persistence_base.PersistenceDriverBase):
GET_METHODS_PER_DB_MODEL = {
cinder_objs.VolumeType.model: 'volume_type_get',
cinder_objs.QualityOfServiceSpecs.model: 'qos_specs_get',
}
def __init__(self, connection, sqlite_synchronous=True,
soft_deletes=False):
self.soft_deletes = soft_deletes
cfg.CONF.set_override('connection', connection, 'database')
cfg.CONF.set_override('sqlite_synchronous',
sqlite_synchronous,
'database')
# Suppress logging for alembic
alembic_logger = logging.getLogger('alembic.runtime.migration')
alembic_logger.setLevel(logging.WARNING)
self._clear_facade()
self.db_instance = db_api.oslo_db_api.DBAPI.from_config(
conf=cfg.CONF, backend_mapping=db_api._BACKEND_MAPPING,
lazy=True)
# We need to wrap some get methods that get called before the volume is
# actually created.
self.original_vol_type_get = self.db_instance.volume_type_get
self.db_instance.volume_type_get = self.vol_type_get
self.original_qos_specs_get = self.db_instance.qos_specs_get
self.db_instance.qos_specs_get = self.qos_specs_get
self.original_get_by_id = self.db_instance.get_by_id
self.db_instance.get_by_id = self.get_by_id
try:
migration.db_sync()
except alembic.util.exc.CommandError as exc:
# We can be running 2 Cinder versions at the same time on the same
# DB while we upgrade, so we must ignore the fact that the DB is
# now on a newer version.
if not isinstance(
exc.__cause__, alembic.script.revision.ResolutionError,
):
raise
self._create_key_value_table()
# NOTE : At this point, the persistence isn't ready so we need to use
# db_instance instead of sqlalchemy API or DB API.
orm_obj = self.db_instance.volume_type_get_by_name(objects.CONTEXT,
'__DEFAULT__')
cls = cinder_objs.VolumeType
expected_attrs = cls._get_expected_attrs(objects.CONTEXT)
self.DEFAULT_TYPE = cls._from_db_object(
objects.CONTEXT, cls(objects.CONTEXT), orm_obj,
expected_attrs=expected_attrs)
super(DBPersistence, self).__init__()
def vol_type_get(self, context, id, inactive=False,
expected_fields=None):
if id not in objects.Backend._volumes_inflight:
return self.original_vol_type_get(context, id, inactive)
vol = objects.Backend._volumes_inflight[id]._ovo
if not vol.volume_type_id:
return None
return persistence_base.vol_type_to_dict(vol.volume_type)
def qos_specs_get(self, context, qos_specs_id, inactive=False):
if qos_specs_id not in objects.Backend._volumes_inflight:
return self.original_qos_specs_get(context, qos_specs_id, inactive)
vol = objects.Backend._volumes_inflight[qos_specs_id]._ovo
if not vol.volume_type_id:
return None
return persistence_base.vol_type_to_dict(vol.volume_type)['qos_specs']
def get_by_id(self, context, model, id, *args, **kwargs):
if model not in self.GET_METHODS_PER_DB_MODEL:
return self.original_get_by_id(context, model, id, *args, **kwargs)
method = getattr(self, self.GET_METHODS_PER_DB_MODEL[model])
return method(context, id)
def _clear_facade(self):
# This is for Pike
if hasattr(sqla_api, '_FACADE'):
sqla_api._FACADE = None
# This is for Queens or later
elif hasattr(sqla_api, 'main_context_manager'):
sqla_api.main_context_manager.configure(**dict(cfg.CONF.database))
def _create_key_value_table(self):
models.BASE.metadata.create_all(sqla_api.get_engine(),
tables=[KeyValue.__table__])
@property
def db(self):
return self.db_instance
@staticmethod
def _build_filter(**kwargs):
return {key: value for key, value in kwargs.items() if value}
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
# Use the % wildcard to ignore the host name on the backend_name search
host = '%@' + backend_name if backend_name else None
filters = self._build_filter(id=volume_id, display_name=volume_name,
host=host)
LOG.debug('get_volumes for %s', filters)
ovos = cinder_objs.VolumeList.get_all(objects.CONTEXT, filters=filters)
result = []
for ovo in ovos:
backend = ovo.host.split('@')[-1].split('#')[0]
# Trigger lazy loading of specs
if ovo.volume_type_id:
ovo.volume_type.extra_specs
ovo.volume_type.qos_specs
result.append(objects.Volume(backend, __ovo=ovo))
return result
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
filters = self._build_filter(id=snapshot_id, volume_id=volume_id,
display_name=snapshot_name)
LOG.debug('get_snapshots for %s', filters)
ovos = cinder_objs.SnapshotList.get_all(objects.CONTEXT,
filters=filters)
result = [objects.Snapshot(None, __ovo=ovo) for ovo in ovos.objects]
return result
def get_connections(self, connection_id=None, volume_id=None):
filters = self._build_filter(id=connection_id, volume_id=volume_id)
LOG.debug('get_connections for %s', filters)
ovos = cinder_objs.VolumeAttachmentList.get_all(objects.CONTEXT,
filters)
# Leverage lazy loading of the volume and backend in Connection
result = [objects.Connection(None, volume=None, __ovo=ovo)
for ovo in ovos.objects]
return result
def _get_kv(self, session, key=None):
query = session.query(KeyValue)
if key is not None:
query = query.filter_by(key=key)
res = query.all()
# If we want to use the result as an ORM
if session:
return res
return [objects.KeyValue(r.key, r.value) for r in res]
def get_key_values(self, key=None):
with sqla_api.main_context_manager.reader.using(objects.CONTEXT) as s:
return self._get_kv(s, key)
@db_writer
def set_volume(self, volume):
changed = self.get_changed_fields(volume)
if not changed:
changed = self.get_fields(volume)
extra_specs = changed.pop('extra_specs', None)
qos_specs = changed.pop('qos_specs', None)
# Since OVOs are not tracking QoS or Extra specs dictionary changes,
# we only support setting QoS or Extra specs on creation or add them
# later.
vol_type_id = changed.get('volume_type_id')
if vol_type_id == self.DEFAULT_TYPE.id:
if extra_specs or qos_specs:
raise cinder_exception.VolumeTypeUpdateFailed(
id=self.DEFAULT_TYPE.name)
elif vol_type_id:
vol_type_fields = {'id': volume.volume_type_id,
'name': volume.volume_type_id,
'extra_specs': extra_specs,
'is_public': True}
if qos_specs:
res = self.db.qos_specs_create(objects.CONTEXT,
{'name': volume.volume_type_id,
'consumer': 'back-end',
'specs': qos_specs})
# Cinder is automatically generating an ID, replace it
query = sqla_api.model_query(objects.CONTEXT,
models.QualityOfServiceSpecs)
query.filter_by(id=res['id']).update(
{'id': volume.volume_type.qos_specs_id})
self.db.volume_type_create(objects.CONTEXT, vol_type_fields)
else:
if extra_specs is not None:
self.db.volume_type_extra_specs_update_or_create(
objects.CONTEXT, volume.volume_type_id, extra_specs)
self.db.qos_specs_update(objects.CONTEXT,
volume.volume_type.qos_specs_id,
{'name': volume.volume_type_id,
'consumer': 'back-end',
'specs': qos_specs})
else:
volume._ovo.volume_type = self.DEFAULT_TYPE
volume._ovo.volume_type_id = self.DEFAULT_TYPE.id
changed['volume_type_id'] = self.DEFAULT_TYPE.id
# Create the volume
if 'id' in changed:
LOG.debug('set_volume creating %s', changed)
try:
self.db.volume_create(objects.CONTEXT, changed)
changed = None
except exception.DBDuplicateEntry:
del changed['id']
if changed:
LOG.debug('set_volume updating %s', changed)
self.db.volume_update(objects.CONTEXT, volume.id, changed)
super(DBPersistence, self).set_volume(volume)
@db_writer
def set_snapshot(self, snapshot):
changed = self.get_changed_fields(snapshot)
if not changed:
changed = self.get_fields(snapshot)
# Create
if 'id' in changed:
LOG.debug('set_snapshot creating %s', changed)
try:
self.db.snapshot_create(objects.CONTEXT, changed)
changed = None
except exception.DBDuplicateEntry:
del changed['id']
if changed:
LOG.debug('set_snapshot updating %s', changed)
self.db.snapshot_update(objects.CONTEXT, snapshot.id, changed)
super(DBPersistence, self).set_snapshot(snapshot)
@db_writer
def set_connection(self, connection):
changed = self.get_changed_fields(connection)
if not changed:
changed = self.get_fields(connection)
if 'connection_info' in changed:
connection._convert_connection_info_to_db_format(changed)
if 'connector' in changed:
connection._convert_connector_to_db_format(changed)
# Create
if 'id' in changed:
LOG.debug('set_connection creating %s', changed)
try:
sqla_api.volume_attach(objects.CONTEXT, changed)
changed = None
except exception.DBDuplicateEntry:
del changed['id']
if changed:
LOG.debug('set_connection updating %s', changed)
self.db.volume_attachment_update(objects.CONTEXT, connection.id,
changed)
super(DBPersistence, self).set_connection(connection)
@db_writer
def set_key_value(self, key_value):
session = objects.CONTEXT.session
kv = self._get_kv(session, key_value.key)
kv = kv[0] if kv else KeyValue(key=key_value.key)
kv.value = key_value.value
session.add(kv)
@db_writer
def delete_volume(self, volume):
delete_type = (volume.volume_type_id != self.DEFAULT_TYPE.id
and volume.volume_type_id)
if self.soft_deletes:
LOG.debug('soft deleting volume %s', volume.id)
self.db.volume_destroy(objects.CONTEXT, volume.id)
if delete_type:
LOG.debug('soft deleting volume type %s',
volume.volume_type_id)
self.db.volume_destroy(objects.CONTEXT, volume.volume_type_id)
if volume.volume_type.qos_specs_id:
self.db.qos_specs_delete(objects.CONTEXT,
volume.volume_type.qos_specs_id)
else:
LOG.debug('hard deleting volume %s', volume.id)
for model in (models.VolumeMetadata, models.VolumeAdminMetadata):
query = sqla_api.model_query(objects.CONTEXT, model)
query.filter_by(volume_id=volume.id).delete()
query = sqla_api.model_query(objects.CONTEXT, models.Volume)
query.filter_by(id=volume.id).delete()
if delete_type:
LOG.debug('hard deleting volume type %s',
volume.volume_type_id)
query = sqla_api.model_query(objects.CONTEXT,
models.VolumeTypeExtraSpecs)
query.filter_by(volume_type_id=volume.volume_type_id).delete()
query = sqla_api.model_query(objects.CONTEXT,
models.VolumeType)
query.filter_by(id=volume.volume_type_id).delete()
query = sqla_api.model_query(objects.CONTEXT,
models.QualityOfServiceSpecs)
qos_id = volume.volume_type.qos_specs_id
if qos_id:
query.filter(sqla_api.or_(
models.QualityOfServiceSpecs.id == qos_id,
models.QualityOfServiceSpecs.specs_id == qos_id
)).delete()
super(DBPersistence, self).delete_volume(volume)
@db_writer
def delete_snapshot(self, snapshot):
if self.soft_deletes:
LOG.debug('soft deleting snapshot %s', snapshot.id)
self.db.snapshot_destroy(objects.CONTEXT, snapshot.id)
else:
LOG.debug('hard deleting snapshot %s', snapshot.id)
query = sqla_api.model_query(objects.CONTEXT, models.Snapshot)
query.filter_by(id=snapshot.id).delete()
super(DBPersistence, self).delete_snapshot(snapshot)
@db_writer
def delete_connection(self, connection):
if self.soft_deletes:
LOG.debug('soft deleting connection %s', connection.id)
self.db.attachment_destroy(objects.CONTEXT, connection.id)
else:
LOG.debug('hard deleting connection %s', connection.id)
query = sqla_api.model_query(objects.CONTEXT,
models.VolumeAttachment)
query.filter_by(id=connection.id).delete()
super(DBPersistence, self).delete_connection(connection)
@db_writer
def delete_key_value(self, key_value):
session = objects.CONTEXT.session
query = session.query(KeyValue)
query.filter_by(key=key_value.key).delete()
class MemoryDBPersistence(DBPersistence):
def __init__(self):
super(MemoryDBPersistence, self).__init__(connection='sqlite://')

View File

@ -1,113 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderlib.persistence import base as persistence_base
class MemoryPersistence(persistence_base.PersistenceDriverBase):
volumes = {}
snapshots = {}
connections = {}
key_values = {}
def __init__(self):
# Create fake DB for drivers
self.fake_db = persistence_base.DB(self)
super(MemoryPersistence, self).__init__()
@property
def db(self):
return self.fake_db
@staticmethod
def _get_field(res, field):
res = getattr(res, field)
if field == 'host':
res = res.split('@')[1].split('#')[0]
return res
def _filter_by(self, values, field, value):
if not value:
return values
return [res for res in values if self._get_field(res, field) == value]
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
try:
res = ([self.volumes[volume_id]] if volume_id
else self.volumes.values())
except KeyError:
return []
res = self._filter_by(res, 'display_name', volume_name)
res = self._filter_by(res, 'host', backend_name)
return res
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
try:
result = ([self.snapshots[snapshot_id]] if snapshot_id
else self.snapshots.values())
except KeyError:
return []
result = self._filter_by(result, 'volume_id', volume_id)
result = self._filter_by(result, 'display_name', snapshot_name)
return result
def get_connections(self, connection_id=None, volume_id=None):
try:
result = ([self.connections[connection_id]] if connection_id
else self.connections.values())
except KeyError:
return []
result = self._filter_by(result, 'volume_id', volume_id)
return result
def get_key_values(self, key=None):
try:
result = ([self.key_values[key]] if key
else list(self.key_values.values()))
except KeyError:
return []
return result
def set_volume(self, volume):
self.volumes[volume.id] = volume
super(MemoryPersistence, self).set_volume(volume)
def set_snapshot(self, snapshot):
self.snapshots[snapshot.id] = snapshot
super(MemoryPersistence, self).set_snapshot(snapshot)
def set_connection(self, connection):
self.connections[connection.id] = connection
super(MemoryPersistence, self).set_connection(connection)
def set_key_value(self, key_value):
self.key_values[key_value.key] = key_value
def delete_volume(self, volume):
self.volumes.pop(volume.id, None)
super(MemoryPersistence, self).delete_volume(volume)
def delete_snapshot(self, snapshot):
self.snapshots.pop(snapshot.id, None)
super(MemoryPersistence, self).delete_snapshot(snapshot)
def delete_connection(self, connection):
self.connections.pop(connection.id, None)
super(MemoryPersistence, self).delete_connection(connection)
def delete_key_value(self, key_value):
self.key_values.pop(key_value.key, None)

View File

@ -1,204 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Oslo Versioned Objects helper file.
These methods help with the serialization of Cinderlib objects that uses the
OVO serialization mechanism, so we remove circular references when doing the
JSON serialization of objects (for example in a Volume OVO it has a 'snapshot'
field which is a Snapshot OVO that has a 'volume' back reference), piggy back
on the OVO's serialization mechanism to add/get additional data we want.
"""
import functools
import json as json_lib
from cinder.objects import base as cinder_base_ovo
from oslo_versionedobjects import base as base_ovo
from oslo_versionedobjects import fields as ovo_fields
from cinderlib import objects
# Variable used to avoid circular references
BACKEND_CLASS = None
def setup(backend_class):
global BACKEND_CLASS
BACKEND_CLASS = backend_class
# Use custom dehydration methods that prevent maximum recursion errors
# due to circular references:
# ie: snapshot -> volume -> snapshots -> snapshot
base_ovo.VersionedObject.obj_to_primitive = obj_to_primitive
cinder_base_ovo.CinderObject.obj_from_primitive = classmethod(
obj_from_primitive)
fields = base_ovo.obj_fields
fields.Object.to_primitive = staticmethod(field_ovo_to_primitive)
fields.Field.to_primitive = field_to_primitive
fields.List.to_primitive = iterable_to_primitive
fields.Set.to_primitive = iterable_to_primitive
fields.Dict.to_primitive = dict_to_primitive
fields.DateTime.to_primitive = staticmethod(datetime_to_primitive)
wrap_to_primitive(fields.FieldType)
wrap_to_primitive(fields.IPAddress)
def wrap_to_primitive(cls):
method = getattr(cls, 'to_primitive')
@functools.wraps(method)
def to_primitive(obj, attr, value, visited=None):
return method(obj, attr, value)
setattr(cls, 'to_primitive', staticmethod(to_primitive))
def _set_visited(element, visited):
# visited keeps track of elements visited to prevent loops
if visited is None:
visited = set()
# We only care about complex object that can have loops, others are ignored
# to prevent us from not serializing simple objects, such as booleans, that
# can have the same instance used for multiple fields.
if isinstance(element,
(ovo_fields.ObjectField, cinder_base_ovo.CinderObject)):
visited.add(id(element))
return visited
def obj_to_primitive(self, target_version=None,
version_manifest=None, visited=None):
# No target_version, version_manifest, or changes support
visited = _set_visited(self, visited)
primitive = {}
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
value = getattr(self, name)
# Skip cycles
if id(value) in visited:
continue
primitive[name] = field.to_primitive(self, name, value,
visited)
obj_name = self.obj_name()
obj = {
self._obj_primitive_key('name'): obj_name,
self._obj_primitive_key('namespace'): self.OBJ_PROJECT_NAMESPACE,
self._obj_primitive_key('version'): self.VERSION,
self._obj_primitive_key('data'): primitive
}
# Piggyback to store our own data
cl_obj = getattr(self, '_cl_obj', None)
clib_data = cl_obj and cl_obj._to_primitive()
if clib_data:
obj['cinderlib.data'] = clib_data
return obj
def obj_from_primitive(
cls, primitive, context=None,
original_method=cinder_base_ovo.CinderObject.obj_from_primitive):
result = original_method(primitive, context)
result.cinderlib_data = primitive.get('cinderlib.data')
return result
def field_ovo_to_primitive(obj, attr, value, visited=None):
return value.obj_to_primitive(visited=visited)
def field_to_primitive(self, obj, attr, value, visited=None):
if value is None:
return None
return self._type.to_primitive(obj, attr, value, visited)
def iterable_to_primitive(self, obj, attr, value, visited=None):
visited = _set_visited(self, visited)
result = []
for elem in value:
if id(elem) in visited:
continue
_set_visited(elem, visited)
r = self._element_type.to_primitive(obj, attr, elem, visited)
result.append(r)
return result
def dict_to_primitive(self, obj, attr, value, visited=None):
visited = _set_visited(self, visited)
primitive = {}
for key, elem in value.items():
if id(elem) in visited:
continue
_set_visited(elem, visited)
primitive[key] = self._element_type.to_primitive(
obj, '%s["%s"]' % (attr, key), elem, visited)
return primitive
def datetime_to_primitive(obj, attr, value, visited=None):
"""Stringify time in ISO 8601 with subsecond format.
This is the same code as the one used by the OVO DateTime to_primitive
but adding the subsecond resolution with the '.%f' part in strftime call.
This is backward compatible with cinderlib using code that didn't generate
subsecond resolution, because the from_primitive code of the OVO field uses
oslo_utils.timeutils.parse_isotime which in the end uses
iso8601.parse_date, and since the subsecond format is also ISO8601 it is
properly parsed.
"""
st = value.strftime('%Y-%m-%dT%H:%M:%S.%f')
tz = value.tzinfo.tzname(None) if value.tzinfo else 'UTC'
# Need to handle either iso8601 or python UTC format
st += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz)
return st
def load(json_src, save=False):
"""Load any json serialized cinderlib object."""
if isinstance(json_src, str):
json_src = json_lib.loads(json_src)
if isinstance(json_src, list):
return [getattr(objects, obj['class']).load(obj, save)
for obj in json_src]
return getattr(objects, json_src['class']).load(json_src, save)
def json():
"""Convert to Json everything we have in this system."""
return [backend.json for backend in BACKEND_CLASS.backends.values()]
def jsons():
"""Convert to a Json string everything we have in this system."""
return json_lib.dumps(json(), separators=(',', ':'))
def dump():
"""Convert to Json everything we have in this system."""
return [backend.dump for backend in BACKEND_CLASS.backends.values()]
def dumps():
"""Convert to a Json string everything we have in this system."""
return json_lib.dumps(dump(), separators=(',', ':'))

View File

@ -1,259 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import subprocess
import tempfile
import unittest
from oslo_utils import strutils
import yaml
import cinderlib
from cinderlib.cmd import cinder_to_yaml
def set_backend(func, new_name, backend_name):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.backend = cinderlib.Backend.backends[backend_name]
return func(self, *args, **kwargs)
wrapper.__name__ = new_name
wrapper.__wrapped__ = func
return wrapper
def test_all_backends(cls):
"""Decorator to run tests in a class for all available backends."""
config = BaseFunctTestCase.ensure_config_loaded()
# Prevent dictionary changed size during iteration on Python 3
for fname, func in dict(vars(cls)).items():
if fname.startswith('test_'):
for backend in config['backends']:
bname = backend['volume_backend_name']
test_name = '%s_on_%s' % (fname, bname)
setattr(cls, test_name, set_backend(func, test_name, bname))
delattr(cls, fname)
return cls
def get_bool_env(param_string, default=False):
param = os.environ.get(param_string, default)
return strutils.bool_from_string(param, strict=True)
class BaseFunctTestCase(unittest.TestCase):
FNULL = open(os.devnull, 'w')
CONFIG_FILE = os.environ.get('CL_FTEST_CFG', '/etc/cinder/cinder.conf')
PRECISION = os.environ.get('CL_FTEST_PRECISION', 0)
LOGGING_ENABLED = get_bool_env('CL_FTEST_LOGGING', False)
DEBUG_ENABLED = get_bool_env('CL_FTEST_DEBUG', False)
ROOT_HELPER = os.environ.get('CL_FTEST_ROOT_HELPER', 'sudo')
MEMORY_PERSISTENCE = get_bool_env('CL_FTEST_MEMORY_PERSISTENCE', True)
DEFAULT_POOL = os.environ.get('CL_FTEST_POOL_NAME', None)
tests_config = None
@classmethod
def ensure_config_loaded(cls):
if not cls.tests_config:
# If it's a .conf type of configuration file convert it to dict
if cls.CONFIG_FILE.endswith('.conf'):
cls.tests_config = cinder_to_yaml.convert(cls.CONFIG_FILE)
else:
with open(cls.CONFIG_FILE, 'r') as f:
cls.tests_config = yaml.safe_load(f)
cls.tests_config.setdefault('logs', cls.LOGGING_ENABLED)
cls.tests_config.setdefault('size_precision', cls.PRECISION)
cls.tests_config.setdefault('debug', cls.DEBUG_ENABLED)
backend = cls.tests_config['backends'][0]
if backend['volume_driver'].endswith('.RBDDriver'):
print('Cinderlib tests use config: %s' % cls.tests_config)
ceph_conf = open(backend['rbd_ceph_conf'], 'r').read()
print('Contents of ceph.conf are: %s' % ceph_conf)
return cls.tests_config
@classmethod
def setUpClass(cls):
config = cls.ensure_config_loaded()
# Use memory_db persistence instead of memory to ensure migrations work
cinderlib.setup(root_helper=cls.ROOT_HELPER,
disable_logs=not config['logs'],
debug=config['debug'],
persistence_config={'storage': 'memory_db'})
if cls.MEMORY_PERSISTENCE:
# Now replace it with the memory plugin for the tests to ensure the
# Cinder driver is compatible with the persistence plugin
# mechanism, as the DB plugin could hide issues.
cinderlib.Backend.global_initialization = False
cinderlib.setup(root_helper=cls.ROOT_HELPER,
disable_logs=not config['logs'],
debug=config['debug'],
persistence_config={'storage': 'memory'})
# Initialize backends
cls.backends = [cinderlib.Backend(**cfg) for cfg in
config['backends']]
# Lazy load backend's _volumes variable using the volumes property so
# new volumes are added to this list on successful creation.
for backend in cls.backends:
backend.volumes
# Set current backend, by default is the first
cls.backend = cls.backends[0]
cls.size_precision = config['size_precision']
@classmethod
def tearDownClass(cls):
errors = []
# Do the cleanup of the resources the tests haven't cleaned up already
for backend in cls.backends:
# For each of the volumes that haven't been deleted delete the
# snapshots that are still there and then the volume.
# NOTE(geguileo): Don't use volumes and snapshots iterables since
# they are modified when deleting.
# NOTE(geguileo): Cleanup in reverse because RBD driver cannot
# delete a snapshot that has a volume created from it.
for vol in list(backend.volumes)[::-1]:
for snap in list(vol.snapshots):
try:
snap.delete()
except Exception as exc:
errors.append('Error deleting snapshot %s from volume '
'%s: %s' % (snap.id, vol.id, exc))
# Detach if locally attached
if vol.local_attach:
try:
vol.detach()
except Exception as exc:
errors.append('Error detaching %s for volume %s: %s' %
(vol.local_attach.path, vol.id, exc))
# Disconnect any existing connections
for conn in vol.connections:
try:
conn.disconnect()
except Exception as exc:
errors.append('Error disconnecting volume %s: %s' %
(vol.id, exc))
try:
vol.delete()
except Exception as exc:
errors.append('Error deleting volume %s: %s' %
(vol.id, exc))
if errors:
raise Exception('Errors on test cleanup: %s' % '\n\t'.join(errors))
def _root_execute(self, *args, **kwargs):
cmd = [self.ROOT_HELPER]
cmd.extend(args)
cmd.extend("%s=%s" % (k, v) for k, v in kwargs.items())
return subprocess.check_output(cmd, stderr=self.FNULL)
def _create_vol(self, backend=None, **kwargs):
if not backend:
backend = self.backend
vol_size = kwargs.setdefault('size', 1)
name = kwargs.setdefault('name', backend.id)
kwargs.setdefault('pool_name', self.DEFAULT_POOL)
vol = backend.create_volume(**kwargs)
self.assertEqual('available', vol.status)
self.assertEqual(vol_size, vol.size)
self.assertEqual(name, vol.display_name)
self.assertIn(vol, backend.volumes)
return vol
def _create_snap(self, vol, **kwargs):
name = kwargs.setdefault('name', vol.id)
snap = vol.create_snapshot(name=vol.id)
self.assertEqual('available', snap.status)
self.assertEqual(vol.size, snap.volume_size)
self.assertEqual(name, snap.display_name)
self.assertIn(snap, vol.snapshots)
return snap
def _get_vol_size(self, vol, do_detach=True):
if not vol.local_attach:
vol.attach()
try:
while True:
try:
result = self._root_execute('lsblk', '-o', 'SIZE',
'-b', vol.local_attach.path)
size_bytes = result.split()[1]
return float(size_bytes) / 1024.0 / 1024.0 / 1024.0
# NOTE(geguileo): We can't catch subprocess.CalledProcessError
# because somehow we get an instance from a different
# subprocess.CalledProcessError class that isn't the same.
except Exception as exc:
# If the volume is not yet available
if getattr(exc, 'returncode', 0) != 32:
raise
finally:
if do_detach:
vol.detach()
def _write_data(self, vol, data=None, do_detach=True):
if not data:
data = b'0123456789' * 100
if not vol.local_attach:
vol.attach()
# TODO(geguileo: This will not work on Windows, for that we need to
# pass delete=False and do the manual deletion ourselves.
try:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self._root_execute('dd', 'if=' + f.name,
of=vol.local_attach.path)
finally:
if do_detach:
vol.detach()
return data
def _read_data(self, vol, length, do_detach=True):
if not vol.local_attach:
vol.attach()
try:
stdout = self._root_execute('dd', 'if=' + vol.local_attach.path,
count=1, ibs=length)
finally:
if do_detach:
vol.detach()
return stdout
def _pools_info(self, stats):
return stats.get('pools', [stats])
def assertSize(self, expected_size, actual_size):
if self.size_precision:
self.assertAlmostEqual(expected_size, actual_size,
self.size_precision)
else:
self.assertEqual(expected_size, actual_size)

View File

@ -1,12 +0,0 @@
# Logs are way too verbose, so we disable them
logs: true
debug: true
# We only define one backend
backends:
- volume_backend_name: ceph
volume_driver: cinder.volume.drivers.rbd.RBDDriver
rbd_user: admin
rbd_pool: rbd
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_keyring_conf: /etc/ceph/ceph.client.admin.keyring

View File

@ -1,15 +0,0 @@
# For Fedora, CentOS, RHEL we require the targetcli package.
# For Ubuntu we require lio-utils or changing the target iscsi_helper
#
# Logs are way too verbose, so we disable them
logs: true
debug: true
# We only define one backend
backends:
- volume_backend_name: lvm
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group: cinder-volumes
target_protocol: iscsi
target_helper: lioadm

View File

@ -1,275 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import ddt
import cinderlib
from cinderlib.tests.functional import base_tests
@ddt.ddt
class BaseFunctTestCase(base_tests.unittest.TestCase):
@ddt.data([], [1], [2])
def test_list_supported_drivers(self, args):
is_v2 = args == [2]
expected_type = dict if is_v2 else str
expected_keys = {'version', 'class_name', 'supported', 'ci_wiki_name',
'driver_options', 'class_fqn', 'desc'}
drivers = cinderlib.Backend.list_supported_drivers(*args)
self.assertNotEqual(0, len(drivers))
for name, driver_info in drivers.items():
self.assertEqual(expected_keys, set(driver_info.keys()))
# Ensure that the RBDDriver has the rbd_keyring_conf option and
# it's not deprecated
if name == 'RBDDriver':
keyring_conf = [conf for conf in driver_info['driver_options']
if conf['dest'] == 'rbd_keyring_conf']
self.assertEqual(1, len(keyring_conf))
expected_value = False if is_v2 else 'False'
self.assertEqual(expected_value,
keyring_conf[0]['deprecated_for_removal'])
for option in driver_info['driver_options']:
self.assertIsInstance(option['type'], expected_type)
if is_v2:
self.assertIn('type_class', option['type'])
else:
for v in option.values():
self.assertIsInstance(v, str)
@base_tests.test_all_backends
class BackendFunctBasic(base_tests.BaseFunctTestCase):
def test_stats(self):
stats = self.backend.stats()
self.assertIn('vendor_name', stats)
self.assertIn('volume_backend_name', stats)
pools_info = self._pools_info(stats)
for pool_info in pools_info:
self.assertIn('free_capacity_gb', pool_info)
self.assertIn('total_capacity_gb', pool_info)
def _volumes_in_pools(self, pools_info):
if not any('total_volumes' in p for p in pools_info):
return None
return sum(p.get('total_volumes', 0) for p in pools_info)
def test_stats_with_creation(self):
initial_stats = self.backend.stats(refresh=True)
initial_pools_info = self._pools_info(initial_stats)
initial_volumes = self._volumes_in_pools(initial_pools_info)
initial_size = sum(p.get('allocated_capacity_gb',
p.get('provisioned_capacity_gb', 0))
for p in initial_pools_info)
size = random.randint(1, 5)
vol = self._create_vol(self.backend, size=size)
# Check that without refresh we get the same data
duplicate_stats = self.backend.stats(refresh=False)
self.assertEqual(initial_stats, duplicate_stats)
new_stats = self.backend.stats(refresh=True)
new_pools_info = self._pools_info(new_stats)
new_volumes = self._volumes_in_pools(new_pools_info)
new_size = sum(p.get('allocated_capacity_gb',
p.get('provisioned_capacity_gb', vol.size))
for p in new_pools_info)
# We could be sharing the pool with other CI jobs or with parallel
# executions of this same one, so we cannot check that we have 1 more
# volume and 1 more GB used, so we just check that the values have
# changed. This could still fail if another job just deletes 1 volume
# of the same size, that's why we randomize the size, to reduce the
# risk of the volumes having the same size.
# If the backend is reporting the number of volumes, check them
if initial_volumes is not None:
self.assertNotEqual(initial_volumes, new_volumes)
self.assertNotEqual(initial_size, new_size)
def test_create_volume(self):
vol = self._create_vol(self.backend)
vol_size = self._get_vol_size(vol)
self.assertSize(vol.size, vol_size)
# We are not testing delete, so leave the deletion to the tearDown
def test_create_delete_volume(self):
vol = self._create_vol(self.backend)
vol.delete()
self.assertEqual('deleted', vol.status)
self.assertTrue(vol.deleted)
self.assertNotIn(vol, self.backend.volumes)
# Confirm idempotency of the operation by deleting it again
vol._ovo.status = 'error'
vol._ovo.deleted = False
vol.delete()
self.assertEqual('deleted', vol.status)
self.assertTrue(vol.deleted)
def test_create_snapshot(self):
vol = self._create_vol(self.backend)
self._create_snap(vol)
# We are not testing delete, so leave the deletion to the tearDown
def test_create_delete_snapshot(self):
vol = self._create_vol(self.backend)
snap = self._create_snap(vol)
snap.delete()
self.assertEqual('deleted', snap.status)
self.assertTrue(snap.deleted)
self.assertNotIn(snap, vol.snapshots)
# Confirm idempotency of the operation by deleting it again
snap._ovo.status = 'error'
snap._ovo.deleted = False
snap.delete()
self.assertEqual('deleted', snap.status)
self.assertTrue(snap.deleted)
def test_attach_volume(self):
vol = self._create_vol(self.backend)
attach = vol.attach()
path = attach.path
self.assertIs(attach, vol.local_attach)
self.assertIn(attach, vol.connections)
self.assertTrue(os.path.exists(path))
# We are not testing detach, so leave it to the tearDown
def test_attach_detach_volume(self):
vol = self._create_vol(self.backend)
attach = vol.attach()
self.assertIs(attach, vol.local_attach)
self.assertIn(attach, vol.connections)
vol.detach()
self.assertIsNone(vol.local_attach)
self.assertNotIn(attach, vol.connections)
def test_attach_detach_volume_via_attachment(self):
vol = self._create_vol(self.backend)
attach = vol.attach()
self.assertTrue(attach.attached)
path = attach.path
self.assertTrue(os.path.exists(path))
attach.detach()
self.assertFalse(attach.attached)
self.assertIsNone(vol.local_attach)
# We haven't disconnected the volume, just detached it
self.assertIn(attach, vol.connections)
attach.disconnect()
self.assertNotIn(attach, vol.connections)
def test_disk_io(self):
vol = self._create_vol(self.backend)
data = self._write_data(vol)
read_data = self._read_data(vol, len(data))
self.assertEqual(data, read_data)
def test_extend(self):
vol = self._create_vol(self.backend)
original_size = vol.size
result_original_size = self._get_vol_size(vol)
self.assertSize(original_size, result_original_size)
new_size = vol.size + 1
# Retrieve the volume from the persistence storage to ensure lazy
# loading works. Prevent regression after fixing bug #1852629
vol_from_db = self.backend.persistence.get_volumes(vol.id)[0]
vol_from_db.extend(new_size)
self.assertEqual(new_size, vol.size)
result_new_size = self._get_vol_size(vol)
self.assertSize(new_size, result_new_size)
def test_extend_attached(self):
vol = self._create_vol(self.backend)
original_size = vol.size
# Attach, get size, and leave volume attached
result_original_size = self._get_vol_size(vol, do_detach=False)
self.assertSize(original_size, result_original_size)
new_size = vol.size + 1
# Extending the volume should also extend the local view of the volume
reported_size = vol.extend(new_size)
# The instance size must have been updated
self.assertEqual(new_size, vol.size)
self.assertEqual(new_size, vol._ovo.size)
# Returned size must match the requested one
self.assertEqual(new_size * (1024 ** 3), reported_size)
# Get size of attached volume on the host and detach it
result_new_size = self._get_vol_size(vol)
self.assertSize(new_size, result_new_size)
def test_clone(self):
vol = self._create_vol(self.backend)
original_size = self._get_vol_size(vol, do_detach=False)
data = self._write_data(vol)
new_vol = vol.clone()
self.assertEqual(vol.size, new_vol.size)
self.assertEqual(vol.id, new_vol.source_volid)
cloned_size = self._get_vol_size(new_vol, do_detach=False)
read_data = self._read_data(new_vol, len(data))
self.assertEqual(original_size, cloned_size)
self.assertEqual(data, read_data)
def test_create_volume_from_snapshot(self):
# Create a volume and write some data
vol = self._create_vol(self.backend)
original_size = self._get_vol_size(vol, do_detach=False)
data = self._write_data(vol)
# Take a snapshot
snap = vol.create_snapshot()
self.assertEqual(vol.size, snap.volume_size)
# Change the data in the volume
reversed_data = data[::-1]
self._write_data(vol, data=reversed_data)
# Create a new volume from the snapshot with the original data
new_vol = snap.create_volume()
self.assertEqual(vol.size, new_vol.size)
created_size = self._get_vol_size(new_vol, do_detach=False)
read_data = self._read_data(new_vol, len(data))
self.assertEqual(original_size, created_size)
self.assertEqual(data, read_data)

View File

@ -1,48 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import cinderlib
from cinderlib.tests.unit import utils
cinderlib.setup(persistence_config={'storage': utils.get_mock_persistence()})
class BaseTest(unittest.TestCase):
PERSISTENCE_CFG = None
def setUp(self):
if not self.PERSISTENCE_CFG:
cfg = {'storage': utils.get_mock_persistence()}
cinderlib.Backend.set_persistence(cfg)
self.backend_name = 'fake_backend'
self.backend = utils.FakeBackend(volume_backend_name=self.backend_name)
self.persistence = self.backend.persistence
cinderlib.Backend._volumes_inflight = {}
def tearDown(self):
# Clear all existing backends
cinderlib.Backend.backends = {}
def patch(self, path, *args, **kwargs):
"""Use python mock to mock a path with automatic cleanup."""
patcher = mock.patch(path, *args, **kwargs)
result = patcher.start()
self.addCleanup(patcher.stop)
return result

View File

@ -1,291 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from cinderlib import exception
from cinderlib import objects
from cinderlib.tests.unit import base
@ddt.ddt
class TestConnection(base.BaseTest):
def setUp(self):
self.original_is_multipathed = objects.Connection._is_multipathed_conn
self.mock_is_mp = self.patch(
'cinderlib.objects.Connection._is_multipathed_conn')
self.mock_default = self.patch(
'os_brick.initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT')
super(TestConnection, self).setUp()
self.vol = objects.Volume(self.backend_name, size=10)
self.kwargs = {'k1': 'v1', 'k2': 'v2',
'connection_info': {'conn': {'data': {'t': 0}}}}
self.conn = objects.Connection(self.backend, volume=self.vol,
**self.kwargs)
self.conn._ovo.connection_info = {
'connector': {'multipath': mock.sentinel.mp_ovo_connector}}
def test_init(self):
self.mock_is_mp.assert_called_once_with(self.kwargs)
self.assertEqual(self.conn.use_multipath, self.mock_is_mp.return_value)
self.assertEqual(self.conn.scan_attempts, self.mock_default)
self.assertEqual(self.conn.attach_mode, 'rw')
self.assertIsNone(self.conn._connector)
self.assertEqual(self.vol, self.conn._volume)
self.assertEqual(self.vol._ovo, self.conn._ovo.volume)
self.assertEqual(self.vol._ovo.id, self.conn._ovo.volume_id)
def test__is_multipathed_conn_kwargs(self):
res = self.original_is_multipathed(dict(
use_multipath=mock.sentinel.mp_kwargs,
connector={'multipath': mock.sentinel.mp_connector},
__ovo=self.conn._ovo))
self.assertEqual(mock.sentinel.mp_kwargs, res)
def test__is_multipathed_conn_connector_kwarg(self):
res = self.original_is_multipathed(dict(
connector={'multipath': mock.sentinel.mp_connector},
__ovo=self.conn._ovo))
self.assertEqual(mock.sentinel.mp_connector, res)
def test__is_multipathed_conn_connector_ovo(self):
res = self.original_is_multipathed(dict(connector={},
__ovo=self.conn._ovo))
self.assertEqual(mock.sentinel.mp_ovo_connector, res)
def test__is_multipathed_conn_connection_info_iscsi_true(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_iqns': '',
'target_portals': ''}}}))
self.assertTrue(res)
def test__is_multipathed_conn_connection_info_iscsi_false(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_iqns': ''}}}))
self.assertFalse(res)
def test__is_multipathed_conn_connection_info_fc_true(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_wwn': []}}}))
self.assertTrue(res)
def test__is_multipathed_conn_connection_info_fc_false(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_wwn': ''}}}))
self.assertFalse(res)
def test_init_no_backend(self):
self.assertRaises(TypeError, objects.Connection)
def test_init_preference_attach_mode(self):
kwargs = {'attach_mode': 'ro',
'connection_info': {'conn': {'data': {'access_mode': 'rw'}}}}
conn = objects.Connection(self.backend, **kwargs)
self.assertEqual(conn.conn_info['data']['access_mode'], 'ro')
def test_init_no_volume(self):
self.mock_is_mp.reset_mock()
kwargs = {'attach_mode': 'ro',
'connection_info': {'conn': {'data': {'t': 0}}}}
conn = objects.Connection(self.backend, **kwargs)
self.mock_is_mp.assert_called_once_with(kwargs)
self.assertEqual(conn.use_multipath, self.mock_is_mp.return_value)
self.assertEqual(conn.scan_attempts, self.mock_default)
self.assertEqual(conn.attach_mode, 'ro')
self.assertEqual({'data': {'access_mode': 'ro', 't': 0}},
conn.conn_info)
self.assertIsNone(conn._connector)
def test_connect(self):
init_conn = self.backend.driver.initialize_connection
init_conn.return_value = {'data': {}}
connector = {'my_c': 'v'}
conn = self.conn.connect(self.vol, connector)
init_conn.assert_called_once_with(self.vol, connector)
self.assertIsInstance(conn, objects.Connection)
self.assertEqual('attached', conn.status)
self.assertEqual(init_conn.return_value, conn.connection_info['conn'])
self.assertEqual(connector, conn.connector_info)
self.persistence.set_connection.assert_called_once_with(conn)
@mock.patch('cinderlib.objects.Volume._disconnect')
@mock.patch('cinderlib.objects.Connection._disconnect')
def test_disconnect(self, mock_disc, mock_vol_disc):
self.conn.disconnect(force=mock.sentinel.force)
mock_disc.assert_called_once_with(mock.sentinel.force)
mock_vol_disc.assert_called_once_with(self.conn)
def test__disconnect(self):
conn_info = self.conn.connector_info
self.conn._disconnect(mock.sentinel.force)
self.backend.driver.terminate_connection.assert_called_once_with(
self.vol._ovo, conn_info, force=mock.sentinel.force)
self.assertEqual({}, self.conn.conn_info)
self.assertEqual('detached', self.conn.status)
self.persistence.delete_connection.assert_called_once_with(self.conn)
@mock.patch('cinderlib.objects.Connection.conn_info', {'data': 'mydata'})
@mock.patch('cinderlib.objects.Connection.path')
@mock.patch('cinderlib.objects.Connection.device_attached')
def test_attach(self, mock_attached, mock_path):
with mock.patch('cinderlib.objects.Connection.connector') as mock_conn:
self.conn.attach()
mock_conn.connect_volume.assert_called_once_with('mydata')
mock_attached.assert_called_once_with(
mock_conn.connect_volume.return_value)
mock_conn.check_valid_device.assert_called_once_with(mock_path)
self.assertEqual(self.conn, self.vol.local_attach)
@mock.patch('cinderlib.objects.Connection.conn_info', {'data': 'mydata'})
@mock.patch('cinderlib.objects.Connection.device')
def test_detach(self, mock_device):
self.vol.local_attach = mock.Mock()
with mock.patch('cinderlib.objects.Connection.connector') as mock_conn:
self.conn.detach(mock.sentinel.force, mock.sentinel.ignore)
mock_conn.disconnect_volume.assert_called_once_with(
'mydata',
mock_device,
force=mock.sentinel.force,
ignore_errors=mock.sentinel.ignore)
self.assertIsNone(self.vol.local_attach)
self.assertIsNone(self.conn.device)
self.assertIsNone(self.conn._connector)
self.persistence.set_connection.assert_called_once_with(self.conn)
def test_get_by_id(self):
self.persistence.get_connections.return_value = [mock.sentinel.conn]
res = objects.Connection.get_by_id(mock.sentinel.conn_id)
self.assertEqual(mock.sentinel.conn, res)
self.persistence.get_connections.assert_called_once_with(
connection_id=mock.sentinel.conn_id)
def test_get_by_id_not_found(self):
self.persistence.get_connections.return_value = None
self.assertRaises(exception.ConnectionNotFound,
objects.Connection.get_by_id,
mock.sentinel.conn_id)
self.persistence.get_connections.assert_called_once_with(
connection_id=mock.sentinel.conn_id)
def test_device_attached(self):
self.conn.device_attached(mock.sentinel.device)
self.assertEqual(mock.sentinel.device,
self.conn.connection_info['device'])
self.persistence.set_connection.assert_called_once_with(self.conn)
def test_conn_info_setter_changes_attach_mode(self):
self.assertEqual('rw', self.conn._ovo.attach_mode)
self.conn.conn_info = {'data': {'target_lun': 0, 'access_mode': 'ro'}}
self.assertEqual({'data': {'target_lun': 0, 'access_mode': 'ro'}},
self.conn._ovo.connection_info['conn'])
self.assertEqual('ro', self.conn._ovo.attach_mode)
def test_conn_info_setter_uses_attach_mode(self):
self.assertEqual('rw', self.conn._ovo.attach_mode)
self.conn._ovo.attach_mode = 'ro'
self.conn.conn_info = {'data': {'target_lun': 0}}
self.assertEqual({'data': {'target_lun': 0, 'access_mode': 'ro'}},
self.conn.conn_info)
self.assertEqual('ro', self.conn._ovo.attach_mode)
def test_conn_info_setter_clear(self):
self.conn.conn_info = {'data': {}}
self.conn.conn_info = {}
self.assertIsNone(self.conn._ovo.connection_info)
def test_conn_info_getter(self):
value = {'data': {'access_mode': 'ro'}}
self.conn.conn_info = value
self.assertEqual(value, self.conn.conn_info)
def test_conn_info_getter_none(self):
self.conn.conn_info = None
self.assertEqual({}, self.conn.conn_info)
def test_protocol(self):
self.conn.conn_info = {'driver_volume_type': mock.sentinel.iscsi}
self.assertEqual(mock.sentinel.iscsi, self.conn.protocol)
def test_connector_info_setter(self):
self.conn.connector_info = mock.sentinel.connector
self.assertEqual(mock.sentinel.connector,
self.conn._ovo.connection_info['connector'])
self.assertIn('connection_info', self.conn._ovo._changed_fields)
def test_connector_info_getter(self):
self.conn.connector_info = mock.sentinel.connector
self.assertEqual(mock.sentinel.connector, self.conn.connector_info)
def test_connector_info_getter_empty(self):
self.conn._ovo.connection_info = None
self.assertIsNone(self.conn.connector_info)
def test_device_setter(self):
self.conn.device = mock.sentinel.device
self.assertEqual(mock.sentinel.device,
self.conn._ovo.connection_info['device'])
self.assertIn('connection_info', self.conn._ovo._changed_fields)
def test_device_setter_none(self):
self.conn.device = mock.sentinel.device
self.conn.device = None
self.assertNotIn('device', self.conn._ovo.connection_info)
self.assertIn('connection_info', self.conn._ovo._changed_fields)
def test_device_getter(self):
self.conn.device = mock.sentinel.device
self.assertEqual(mock.sentinel.device, self.conn.device)
def test_path(self):
self.conn.device = {'path': mock.sentinel.path}
self.assertEqual(mock.sentinel.path, self.conn.path)
@mock.patch('cinderlib.objects.Connection.conn_info')
@mock.patch('cinderlib.objects.Connection.protocol')
@mock.patch('cinder.volume.volume_utils.brick_get_connector')
def test_connector_getter(self, mock_connector, mock_proto, mock_info):
res = self.conn.connector
self.assertEqual(mock_connector.return_value, res)
mock_connector.assert_called_once_with(
mock_proto,
use_multipath=self.mock_is_mp.return_value,
device_scan_attempts=self.mock_default,
conn=mock_info,
do_local_attach=True)
# Make sure we cache the value
res = self.conn.connector
self.assertEqual(1, mock_connector.call_count)
@ddt.data(True, False)
def test_attached_true(self, value):
with mock.patch('cinderlib.objects.Connection.device', value):
self.assertEqual(value, self.conn.attached)
@ddt.data(True, False)
def test_connected(self, value):
with mock.patch('cinderlib.objects.Connection.conn_info', value):
self.assertEqual(value, self.conn.connected)
def test_extend(self):
self.conn._ovo.connection_info['conn'] = {'data': mock.sentinel.data}
with mock.patch('cinderlib.objects.Connection.connector') as mock_conn:
res = self.conn.extend()
mock_conn.extend_volume.assert_called_once_with(mock.sentinel.data)
self.assertEqual(mock_conn.extend_volume.return_value, res)

View File

@ -1,152 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinderlib import exception
from cinderlib import objects
from cinderlib.tests.unit import base
class TestSnapshot(base.BaseTest):
def setUp(self):
super(TestSnapshot, self).setUp()
self.vol = objects.Volume(self.backend_name, size=10,
extra_specs={'e': 'v'},
qos_specs={'q': 'qv'})
self.snap = objects.Snapshot(self.vol,
name='my_snap', description='my_desc')
self.vol._snapshots.append(self.snap)
self.vol._ovo.snapshots.objects.append(self.snap._ovo)
def test_init_from_volume(self):
self.assertIsNotNone(self.snap.id)
self.assertEqual(self.backend, self.snap.backend)
self.assertEqual('my_snap', self.snap.name)
self.assertEqual('my_snap', self.snap.display_name)
self.assertEqual('my_desc', self.snap.description)
self.assertEqual(self.vol.user_id, self.snap.user_id)
self.assertEqual(self.vol.project_id, self.snap.project_id)
self.assertEqual(self.vol.id, self.snap.volume_id)
self.assertEqual(self.vol.size, self.snap.volume_size)
self.assertEqual(self.vol._ovo, self.snap._ovo.volume)
self.assertEqual(self.vol.volume_type_id, self.snap.volume_type_id)
self.assertEqual(self.vol, self.snap.volume)
def test_init_from_ovo(self):
snap2 = objects.Snapshot(None, __ovo=self.snap._ovo)
self.assertEqual(self.snap.backend, snap2.backend)
self.assertEqual(self.snap._ovo, snap2._ovo)
self.assertEqual(self.vol, self.snap.volume)
def test_create(self):
update_vol = {'provider_id': 'provider_id'}
self.backend.driver.create_snapshot.return_value = update_vol
self.snap.create()
self.assertEqual('available', self.snap.status)
self.assertEqual('provider_id', self.snap.provider_id)
self.backend.driver.create_snapshot.assert_called_once_with(
self.snap._ovo)
self.persistence.set_snapshot.assert_called_once_with(self.snap)
def test_create_error(self):
self.backend.driver.create_snapshot.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.snap.create()
self.assertEqual(self.snap, assert_context.exception.resource)
self.backend.driver.create_snapshot.assert_called_once_with(
self.snap._ovo)
self.assertEqual('error', self.snap.status)
self.persistence.set_snapshot.assert_called_once_with(self.snap)
def test_delete(self):
with mock.patch.object(
self.vol, '_snapshot_removed',
wraps=self.vol._snapshot_removed) as snap_removed_mock:
self.snap.delete()
snap_removed_mock.assert_called_once_with(self.snap)
self.backend.driver.delete_snapshot.assert_called_once_with(
self.snap._ovo)
self.persistence.delete_snapshot.assert_called_once_with(self.snap)
self.assertEqual([], self.vol.snapshots)
self.assertEqual([], self.vol._ovo.snapshots.objects)
self.assertEqual('deleted', self.snap._ovo.status)
@mock.patch('cinderlib.objects.Volume._snapshot_removed')
def test_delete_error(self, snap_removed_mock):
self.backend.driver.delete_snapshot.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.snap.delete()
self.assertEqual(self.snap, assert_context.exception.resource)
self.backend.driver.delete_snapshot.assert_called_once_with(
self.snap._ovo)
snap_removed_mock.assert_not_called()
self.persistence.delete_snapshot.assert_not_called()
self.assertEqual([self.snap], self.vol.snapshots)
self.assertEqual([self.snap._ovo], self.vol._ovo.snapshots.objects)
self.assertEqual('error_deleting', self.snap._ovo.status)
def test_create_volume(self):
create_mock = self.backend.driver.create_volume_from_snapshot
create_mock.return_value = None
vol2 = self.snap.create_volume(name='new_name', description='new_desc')
create_mock.assert_called_once_with(vol2._ovo, self.snap._ovo)
self.assertEqual('available', vol2.status)
self.assertEqual(1, len(self.backend._volumes))
self.assertEqual(vol2, self.backend._volumes[0])
self.persistence.set_volume.assert_called_once_with(vol2)
self.assertEqual(self.vol.id, self.vol.volume_type_id)
self.assertNotEqual(self.vol.id, vol2.id)
self.assertEqual(vol2.id, vol2.volume_type_id)
self.assertEqual(self.vol.volume_type.extra_specs,
vol2.volume_type.extra_specs)
self.assertEqual(self.vol.volume_type.qos_specs.specs,
vol2.volume_type.qos_specs.specs)
def test_create_volume_error(self):
create_mock = self.backend.driver.create_volume_from_snapshot
create_mock.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.snap.create_volume()
self.assertEqual(1, len(self.backend._volumes_inflight))
vol2 = list(self.backend._volumes_inflight.values())[0]
self.assertEqual(vol2, assert_context.exception.resource)
create_mock.assert_called_once_with(vol2, self.snap._ovo)
self.assertEqual('error', vol2.status)
self.persistence.set_volume.assert_called_once_with(mock.ANY)
def test_get_by_id(self):
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.return_value = [mock.sentinel.snap]
res = objects.Snapshot.get_by_id(mock.sentinel.snap_id)
mock_get_snaps.assert_called_once_with(
snapshot_id=mock.sentinel.snap_id)
self.assertEqual(mock.sentinel.snap, res)
def test_get_by_id_not_found(self):
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.return_value = None
self.assertRaises(exception.SnapshotNotFound,
objects.Snapshot.get_by_id, mock.sentinel.snap_id)
mock_get_snaps.assert_called_once_with(
snapshot_id=mock.sentinel.snap_id)
def test_get_by_name(self):
res = objects.Snapshot.get_by_name(mock.sentinel.name)
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.assert_called_once_with(
snapshot_name=mock.sentinel.name)
self.assertEqual(mock_get_snaps.return_value, res)

View File

@ -1,549 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinder import objects as cinder_ovos
from cinderlib import exception
from cinderlib import objects
from cinderlib.tests.unit import base
class TestVolume(base.BaseTest):
def test_init_from_args_backend_name(self):
vol = objects.Volume(self.backend_name,
name='vol_name', description='vol_desc', size=10)
self.assertEqual(self.backend, vol.backend)
self.assertEqual('vol_name', vol.name)
self.assertEqual('vol_name', vol.display_name)
self.assertEqual('vol_desc', vol.description)
self.assertEqual(10, vol.size)
self.assertIsNotNone(vol.id)
def test_init_from_args_backend(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
self.assertEqual(self.backend, vol.backend)
self.assertEqual('vol_name', vol.name)
self.assertEqual('vol_name', vol.display_name)
self.assertEqual('vol_desc', vol.description)
self.assertEqual(10, vol.size)
self.assertIsNotNone(vol.id)
def test_init_from_volume(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
vol2 = objects.Volume(vol, name='new_name', size=11)
self.assertEqual(self.backend, vol2.backend)
self.assertEqual('new_name', vol2.name)
self.assertEqual('new_name', vol2.display_name)
self.assertEqual(vol.description, vol2.description)
self.assertEqual(11, vol2.size)
self.assertIsNotNone(vol2.id)
self.assertNotEqual(vol.id, vol2.id)
def test_init_from_ovo(self):
vol = objects.Volume(self.backend, size=10)
vol2 = objects.Volume(self.backend, __ovo=vol._ovo)
self.assertEqual(vol._ovo, vol2._ovo)
def test_snapshots_lazy_loading(self):
vol = objects.Volume(self.backend, size=10)
vol._snapshots = None
snaps = [objects.Snapshot(vol, name='my_snap')]
# Persistence retrieves Snapshots without the Volume, just volume_id
snaps[0]._ovo.volume = None
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.return_value = snaps
result = vol.snapshots
mock_get_snaps.assert_called_once_with(volume_id=vol.id)
self.assertEqual(snaps, result)
self.assertEqual(snaps, vol._snapshots)
self.assertEqual(1, len(vol._ovo.snapshots))
self.assertEqual(vol._ovo.snapshots[0], result[0]._ovo)
# There is no second call when we reference it again
mock_get_snaps.reset_mock()
result = vol.snapshots
self.assertEqual(snaps, result)
mock_get_snaps.not_called()
def test_connections_lazy_loading(self):
vol = objects.Volume(self.backend, size=10)
vol._connections = None
delattr(vol._ovo, '_obj_volume_attachment')
conns = [objects.Connection(self.backend, connector={'k': 'v'},
volume_id=vol.id, status='attached',
attach_mode='rw',
connection_info={'conn': {}},
name='my_snap')]
mock_get_conns = self.persistence.get_connections
mock_get_conns.return_value = conns
result = vol.connections
mock_get_conns.assert_called_once_with(volume_id=vol.id)
self.assertEqual(conns, result)
self.assertEqual(conns, vol._connections)
self.assertEqual(1, len(vol._ovo.volume_attachment))
self.assertEqual(vol._ovo.volume_attachment[0], result[0]._ovo)
# There is no second call when we reference it again
mock_get_conns.reset_mock()
result = vol.connections
self.assertEqual(conns, result)
mock_get_conns.not_called()
@mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.'
'get_all_by_volume_id')
def test_connections_lazy_loading_from_ovo(self, get_all_mock):
"""Test we don't reload connections if data is in OVO."""
vol = objects.Volume(self.backend, size=10)
vol._connections = None
delattr(vol._ovo, '_obj_volume_attachment')
conns = [objects.Connection(self.backend, connector={'k': 'v'},
volume_id=vol.id, status='attached',
attach_mode='rw',
connection_info={'conn': {}},
name='my_snap')]
ovo_conns = [conn._ovo for conn in conns]
ovo_attach_list = cinder_ovos.VolumeAttachmentList(objects=ovo_conns)
get_all_mock.return_value = ovo_attach_list
mock_get_conns = self.persistence.get_connections
ovo_result = vol._ovo.volume_attachment
mock_get_conns.not_called()
self.assertEqual(ovo_attach_list, ovo_result)
# Cinderlib object doesn't have the connections yet
self.assertIsNone(vol._connections)
self.assertEqual(1, len(vol._ovo.volume_attachment))
self.assertEqual(vol._ovo.volume_attachment[0], ovo_result[0])
# There is no second call when we access the cinderlib object, as the
# data is retrieved from the OVO that already has it
result = vol.connections
mock_get_conns.not_called()
# Confirm we used the OVO
self.assertIs(ovo_conns[0], result[0]._ovo)
def test_get_by_id(self):
mock_get_vols = self.persistence.get_volumes
mock_get_vols.return_value = [mock.sentinel.vol]
res = objects.Volume.get_by_id(mock.sentinel.vol_id)
mock_get_vols.assert_called_once_with(volume_id=mock.sentinel.vol_id)
self.assertEqual(mock.sentinel.vol, res)
def test_get_by_id_not_found(self):
mock_get_vols = self.persistence.get_volumes
mock_get_vols.return_value = None
self.assertRaises(exception.VolumeNotFound,
objects.Volume.get_by_id, mock.sentinel.vol_id)
mock_get_vols.assert_called_once_with(volume_id=mock.sentinel.vol_id)
def test_get_by_name(self):
res = objects.Volume.get_by_name(mock.sentinel.name)
mock_get_vols = self.persistence.get_volumes
mock_get_vols.assert_called_once_with(volume_name=mock.sentinel.name)
self.assertEqual(mock_get_vols.return_value, res)
def test_create(self):
self.backend.driver.create_volume.return_value = None
vol = self.backend.create_volume(10, name='vol_name',
description='des')
self.backend.driver.create_volume.assert_called_once_with(vol._ovo)
self.assertEqual('available', vol.status)
self.persistence.set_volume.assert_called_once_with(vol)
def test_create_error(self):
self.backend.driver.create_volume.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.backend.create_volume(10, name='vol_name', description='des')
vol = assert_context.exception.resource
self.assertIsInstance(vol, objects.Volume)
self.assertEqual(10, vol.size)
self.assertEqual('vol_name', vol.name)
self.assertEqual('des', vol.description)
def test_delete(self):
vol = objects.Volume(self.backend_name, size=10)
vol.delete()
self.backend.driver.delete_volume.assert_called_once_with(vol._ovo)
self.persistence.delete_volume.assert_called_once_with(vol)
self.assertEqual('deleted', vol._ovo.status)
def test_delete_error_with_snaps(self):
vol = objects.Volume(self.backend_name, size=10, status='available')
snap = objects.Snapshot(vol)
vol._snapshots.append(snap)
self.assertRaises(exception.InvalidVolume, vol.delete)
self.assertEqual('available', vol._ovo.status)
def test_delete_error(self):
vol = objects.Volume(self.backend_name,
name='vol_name', description='vol_desc', size=10)
self.backend.driver.delete_volume.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
vol.delete()
self.assertEqual(vol, assert_context.exception.resource)
self.backend.driver.delete_volume.assert_called_once_with(vol._ovo)
self.assertEqual('error_deleting', vol._ovo.status)
def test_extend(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
res = vol.extend(11)
self.assertEqual(11 * (1024 ** 3), res) # size is in bytes not GBi
self.backend.driver.extend_volume.assert_called_once_with(vol._ovo, 11)
self.persistence.set_volume.assert_called_once_with(vol)
self.assertEqual('available', vol.status)
self.assertEqual(11, vol.size)
def test_extend_attached(self):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
vol.local_attach = mock.Mock()
res = vol.extend(11)
self.assertEqual(vol.local_attach.extend.return_value, res)
self.backend.driver.extend_volume.assert_called_once_with(vol._ovo, 11)
vol.local_attach.extend.assert_called_once_with()
self.persistence.set_volume.assert_called_once_with(vol)
self.assertEqual('in-use', vol.status)
self.assertEqual(11, vol.size)
def test_extend_error(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
self.backend.driver.extend_volume.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
vol.extend(11)
self.assertEqual(vol, assert_context.exception.resource)
self.backend.driver.extend_volume.assert_called_once_with(vol._ovo, 11)
self.persistence.set_volume.assert_called_once_with(vol)
self.assertEqual('error', vol.status)
self.assertEqual(10, vol.size)
def test_clone(self):
vol = objects.Volume(self.backend_name, status='available', size=10,
extra_specs={'e': 'v'}, qos_specs={'q': 'qv'})
mock_clone = self.backend.driver.create_cloned_volume
mock_clone.return_value = None
self.assertEqual(0, len(self.backend.volumes))
res = vol.clone(size=11)
mock_clone.assert_called_once_with(res._ovo, vol._ovo)
self.persistence.set_volume.assert_called_once_with(res)
self.assertEqual('available', res._ovo.status)
self.assertEqual(11, res.size)
self.assertEqual(vol.id, vol.volume_type_id)
self.assertNotEqual(vol.id, res.id)
self.assertEqual(res.id, res.volume_type_id)
self.assertEqual(vol.volume_type.extra_specs,
res.volume_type.extra_specs)
self.assertEqual(vol.volume_type.qos_specs.specs,
res.volume_type.qos_specs.specs)
self.assertEqual(vol.id, res.source_volid)
self.assertEqual(1, len(self.backend.volumes))
self.assertIsInstance(self.backend.volumes[0], objects.Volume)
def test_clone_error(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_clone = self.backend.driver.create_cloned_volume
mock_clone.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
vol.clone(size=11)
# Cloning volume is still in flight
self.assertEqual(1, len(self.backend._volumes_inflight))
new_vol = list(self.backend._volumes_inflight.values())[0]
self.assertEqual(new_vol, assert_context.exception.resource)
mock_clone.assert_called_once_with(new_vol, vol._ovo)
self.persistence.set_volume.assert_called_once_with(new_vol)
self.assertEqual('error', new_vol._ovo.status)
self.assertEqual(11, new_vol.size)
def test_create_snapshot(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_create = self.backend.driver.create_snapshot
mock_create.return_value = None
snap = vol.create_snapshot()
self.assertEqual([snap], vol.snapshots)
self.assertEqual([snap._ovo], vol._ovo.snapshots.objects)
mock_create.assert_called_once_with(snap._ovo)
self.assertEqual('available', snap.status)
self.assertEqual(10, snap.volume_size)
self.persistence.set_snapshot.assert_called_once_with(snap)
def test_create_snapshot_error(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_create = self.backend.driver.create_snapshot
mock_create.side_effect = exception.NotFound
self.assertRaises(exception.NotFound, vol.create_snapshot)
self.assertEqual(1, len(vol.snapshots))
snap = vol.snapshots[0]
self.persistence.set_snapshot.assert_called_once_with(snap)
self.assertEqual('error', snap.status)
mock_create.assert_called_once_with(snap._ovo)
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach(self, mock_connect, mock_conn_props):
vol = objects.Volume(self.backend_name, status='available', size=10)
res = vol.attach()
mock_conn_props.assert_called_once_with(
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
mock_connect.assert_called_once_with(mock_conn_props.return_value)
mock_connect.return_value.attach.assert_called_once_with()
self.assertEqual(mock_connect.return_value, res)
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach_error_connect(self, mock_connect, mock_conn_props):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_connect.side_effect = exception.NotFound
self.assertRaises(exception.NotFound, vol.attach)
mock_conn_props.assert_called_once_with(
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
mock_connect.assert_called_once_with(mock_conn_props.return_value)
mock_connect.return_value.attach.assert_not_called()
@mock.patch('cinderlib.objects.Volume.disconnect')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach_error_attach(self, mock_connect, mock_conn_props,
mock_disconnect):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_attach = mock_connect.return_value.attach
mock_attach.side_effect = exception.NotFound
self.assertRaises(exception.NotFound, vol.attach)
mock_conn_props.assert_called_once_with(
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
mock_connect.assert_called_once_with(mock_conn_props.return_value)
mock_disconnect.assert_called_once_with(mock_connect.return_value)
def test_detach_not_local(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
self.assertRaises(exception.NotLocal, vol.detach)
def test_detach(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
vol.local_attach = mock_conn
vol.detach(mock.sentinel.force, mock.sentinel.ignore_errors)
mock_conn.detach.assert_called_once_with(mock.sentinel.force,
mock.sentinel.ignore_errors,
mock.ANY)
mock_conn.disconnect.assert_called_once_with(mock.sentinel.force)
def test_detach_error_detach(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
mock_conn.detach.side_effect = exception.NotFound
vol.local_attach = mock_conn
self.assertRaises(exception.NotFound,
vol.detach,
False, mock.sentinel.ignore_errors)
mock_conn.detach.assert_called_once_with(False,
mock.sentinel.ignore_errors,
mock.ANY)
mock_conn.disconnect.assert_not_called()
def test_detach_error_disconnect(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
mock_conn.disconnect.side_effect = exception.NotFound
vol.local_attach = mock_conn
self.assertRaises(objects.brick_exception.ExceptionChainer,
vol.detach,
mock.sentinel.force, False)
mock_conn.detach.assert_called_once_with(mock.sentinel.force,
False,
mock.ANY)
mock_conn.disconnect.assert_called_once_with(mock.sentinel.force)
@mock.patch('cinderlib.objects.Connection.connect')
def test_connect(self, mock_connect):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_connect.return_value._ovo = objects.cinder_objs.VolumeAttachment()
mock_export = self.backend.driver.create_export
mock_export.return_value = None
res = vol.connect(mock.sentinel.conn_dict)
mock_connect.assert_called_once_with(vol, mock.sentinel.conn_dict)
self.assertEqual([res], vol.connections)
self.assertEqual([res._ovo], vol._ovo.volume_attachment.objects)
self.assertEqual('in-use', vol.status)
self.persistence.set_volume.assert_called_once_with(vol)
@mock.patch('cinderlib.objects.Volume._remove_export')
@mock.patch('cinderlib.objects.Connection.connect')
def test_connect_error(self, mock_connect, mock_remove_export):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_export = self.backend.driver.create_export
mock_export.return_value = None
mock_connect.side_effect = exception.NotFound
self.assertRaises(exception.NotFound,
vol.connect, mock.sentinel.conn_dict)
mock_connect.assert_called_once_with(vol, mock.sentinel.conn_dict)
self.assertEqual('available', vol.status)
self.persistence.set_volume.assert_not_called()
mock_remove_export.assert_called_once_with()
@mock.patch('cinderlib.objects.Volume._disconnect')
def test_disconnect(self, mock_disconnect):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
vol.disconnect(mock_conn, mock.sentinel.force)
mock_conn._disconnect.assert_called_once_with(mock.sentinel.force)
mock_disconnect.assert_called_once_with(mock_conn)
@mock.patch('cinderlib.objects.Volume._connection_removed')
@mock.patch('cinderlib.objects.Volume._remove_export')
def test__disconnect(self, mock_remove_export, mock_conn_removed):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
vol._disconnect(mock.sentinel.connection)
mock_remove_export.assert_called_once_with()
mock_conn_removed.assert_called_once_with(mock.sentinel.connection)
self.assertEqual('available', vol.status)
self.persistence.set_volume.assert_called_once_with(vol)
def test__remove_export(self):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
vol._remove_export()
self.backend.driver.remove_export.assert_called_once_with(vol._context,
vol._ovo)
@mock.patch('cinderlib.objects.Volume._remove_export')
def test_cleanup(self, mock_remove_export):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
connections = [mock.Mock(), mock.Mock()]
vol._connections = connections
vol.cleanup()
mock_remove_export.assert_called_once_with()
for c in connections:
c.detach.assert_called_once_with()
def test__snapshot_removed_not_loaded(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
vol._snapshots = None
snap = objects.Snapshot(vol)
# Just check it doesn't break
vol._snapshot_removed(snap)
def test__snapshot_removed_not_present(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
snap = objects.Snapshot(vol)
snap2 = objects.Snapshot(vol)
vol._snapshots = [snap2]
vol._ovo.snapshots.objects = [snap2._ovo]
# Just check it doesn't break or remove any other snaps
vol._snapshot_removed(snap)
self.assertEqual([snap2], vol._snapshots)
self.assertEqual([snap2._ovo], vol._ovo.snapshots.objects)
def test__snapshot_removed(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
snap = objects.Snapshot(vol)
snap2 = objects.Snapshot(vol)
snap_other_instance = objects.Snapshot(vol, id=snap.id,
description='d')
snap_other_instance2 = objects.Snapshot(vol, id=snap.id,
description='e')
vol._snapshots = [snap2, snap_other_instance]
vol._ovo.snapshots.objects = [snap2._ovo, snap_other_instance2._ovo]
# Just check it doesn't break or remove any other snaps
vol._snapshot_removed(snap)
self.assertEqual([snap2], vol._snapshots)
self.assertEqual([snap2._ovo], vol._ovo.snapshots.objects)
def test__connection_removed_not_loaded(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
vol._connections = None
conn = objects.Connection(self.backend, connection_info={'conn': {}})
# Just check it doesn't break
vol._connection_removed(conn)
def test__connection_removed_not_present(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
conn = objects.Connection(self.backend, connection_info={'conn': {}})
conn2 = objects.Connection(self.backend, connection_info={'conn': {}})
vol._connections = [conn2]
vol._ovo.volume_attachment.objects = [conn2._ovo]
# Just check it doesn't break or remove any other snaps
vol._connection_removed(conn)
self.assertEqual([conn2], vol._connections)
self.assertEqual([conn2._ovo], vol._ovo.volume_attachment.objects)
def test__connection_removed(self):
vol = objects.Volume(self.backend, size=10)
conn = objects.Connection(self.backend, connection_info={'conn': {}})
conn2 = objects.Connection(self.backend, connection_info={'conn': {}})
conn_other_instance = objects.Connection(self.backend, id=conn.id,
connection_info={'conn': {}})
conn_other_instance2 = objects.Connection(self.backend, id=conn.id,
connection_info={'conn': {}})
vol._connections = [conn2, conn_other_instance]
vol._ovo.volume_attachment.objects = [conn2._ovo,
conn_other_instance2._ovo]
# Just check it doesn't break or remove any other snaps
vol._connection_removed(conn)
self.assertEqual([conn2], vol._connections)
self.assertEqual([conn2._ovo], vol._ovo.volume_attachment.objects)

View File

@ -1,561 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
import cinderlib
from cinderlib.persistence import base as persistence_base
from cinderlib.tests.unit.persistence import helper
from cinderlib.tests.unit import utils
class BasePersistenceTest(helper.TestHelper):
def setUp(self):
super(BasePersistenceTest, self).setUp()
def assertListEqualObj(self, expected, actual):
exp = [self._convert_to_dict(e) for e in expected]
act = [self._convert_to_dict(a) for a in actual]
self.assertListEqual(exp, act)
def assertEqualObj(self, expected, actual):
exp = self._convert_to_dict(expected)
act = self._convert_to_dict(actual)
self.assertDictEqual(exp, act)
def test_db(self):
raise NotImplementedError('Test class must implement this method')
def test_set_volume(self):
raise NotImplementedError('Test class must implement this method')
def test_get_volumes_all(self):
vols = self.create_n_volumes(2)
res = self.persistence.get_volumes()
self.assertListEqualObj(vols, self.sorted(res))
def test_get_volumes_by_id(self):
vols = self.create_n_volumes(2)
res = self.persistence.get_volumes(volume_id=vols[1].id)
# Use res instead of res[0] in case res is empty list
self.assertListEqualObj([vols[1]], res)
def test_get_volumes_by_id_not_found(self):
self.create_n_volumes(2)
res = self.persistence.get_volumes(volume_id='fake-uuid')
self.assertListEqualObj([], res)
def test_get_volumes_by_name_single(self):
vols = self.create_n_volumes(2)
res = self.persistence.get_volumes(volume_name=vols[1].name)
self.assertListEqualObj([vols[1]], res)
def test_get_volumes_by_name_multiple(self):
volume_name = 'disk'
vols = self.create_volumes([{'size': 1, 'name': volume_name},
{'size': 2, 'name': volume_name}])
res = self.persistence.get_volumes(volume_name=volume_name)
self.assertListEqualObj(vols, self.sorted(res))
def test_get_volumes_by_name_not_found(self):
self.create_n_volumes(2)
res = self.persistence.get_volumes(volume_name='disk3')
self.assertListEqualObj([], res)
def test_get_volumes_by_backend(self):
vols = self.create_n_volumes(2)
backend2 = utils.FakeBackend(volume_backend_name='fake2')
vol = self.create_volumes([{'backend_or_vol': backend2, 'size': 3}])
res = self.persistence.get_volumes(backend_name=self.backend.id)
self.assertListEqualObj(vols, self.sorted(res))
res = self.persistence.get_volumes(backend_name=backend2.id)
self.assertListEqualObj(vol, res)
def test_get_volumes_by_backend_not_found(self):
self.create_n_volumes(2)
res = self.persistence.get_volumes(backend_name='fake2')
self.assertListEqualObj([], res)
def test_get_volumes_by_multiple(self):
volume_name = 'disk'
vols = self.create_volumes([{'size': 1, 'name': volume_name},
{'size': 2, 'name': volume_name}])
res = self.persistence.get_volumes(backend_name=self.backend.id,
volume_name=volume_name,
volume_id=vols[0].id)
self.assertListEqualObj([vols[0]], res)
def test_get_volumes_by_multiple_not_found(self):
vols = self.create_n_volumes(2)
res = self.persistence.get_volumes(backend_name=self.backend.id,
volume_name=vols[1].name,
volume_id=vols[0].id)
self.assertListEqualObj([], res)
def _check_volume_type(self, extra_specs, qos_specs, vol):
self.assertEqual(vol.id, vol.volume_type.id)
self.assertEqual(vol.id, vol.volume_type.name)
self.assertTrue(vol.volume_type.is_public)
self.assertEqual(extra_specs, vol.volume_type.extra_specs)
if qos_specs:
self.assertEqual(vol.id, vol.volume_type.qos_specs_id)
self.assertEqual(vol.id, vol.volume_type.qos_specs.id)
self.assertEqual(vol.id, vol.volume_type.qos_specs.name)
self.assertEqual('back-end', vol.volume_type.qos_specs.consumer)
self.assertEqual(qos_specs, vol.volume_type.qos_specs.specs)
else:
self.assertIsNone(vol.volume_type.qos_specs_id)
def test_get_volumes_extra_specs(self):
extra_specs = [{'k1': 'v1', 'k2': 'v2'},
{'kk1': 'vv1', 'kk2': 'vv2', 'kk3': 'vv3'}]
vols = self.create_volumes(
[{'size': 1, 'extra_specs': extra_specs[0]},
{'size': 2, 'extra_specs': extra_specs[1]}],
sort=False)
# Check the volume type and the extra specs on created volumes
for i in range(len(vols)):
self._check_volume_type(extra_specs[i], None, vols[i])
# Check that we get what we stored
res = self.persistence.get_volumes(backend_name=self.backend.id)
vols = self.sorted(vols)
self.assertListEqualObj(vols, self.sorted(res))
for i in range(len(vols)):
self._check_volume_type(vols[i].volume_type.extra_specs, {},
vols[i])
def test_get_volumes_qos_specs(self):
qos_specs = [{'q1': 'r1', 'q2': 'r2'},
{'qq1': 'rr1', 'qq2': 'rr2', 'qq3': 'rr3'}]
vols = self.create_volumes(
[{'size': 1, 'qos_specs': qos_specs[0]},
{'size': 2, 'qos_specs': qos_specs[1]}],
sort=False)
# Check the volume type and the extra specs on created volumes
for i in range(len(vols)):
self._check_volume_type({}, qos_specs[i], vols[i])
# Check that we get what we stored
res = self.persistence.get_volumes(backend_name=self.backend.id)
vols = self.sorted(vols)
res = self.sorted(res)
self.assertListEqualObj(vols, res)
for i in range(len(vols)):
self._check_volume_type({}, vols[i].volume_type.qos_specs.specs,
vols[i])
def test_get_volumes_extra_and_qos_specs(self):
qos_specs = [{'q1': 'r1', 'q2': 'r2'},
{'qq1': 'rr1', 'qq2': 'rr2', 'qq3': 'rr3'}]
extra_specs = [{'k1': 'v1', 'k2': 'v2'},
{'kk1': 'vv1', 'kk2': 'vv2', 'kk3': 'vv3'}]
vols = self.create_volumes(
[{'size': 1, 'qos_specs': qos_specs[0],
'extra_specs': extra_specs[0]},
{'size': 2, 'qos_specs': qos_specs[1],
'extra_specs': extra_specs[1]}],
sort=False)
# Check the volume type and the extra specs on created volumes
for i in range(len(vols)):
self._check_volume_type(extra_specs[i], qos_specs[i], vols[i])
# Check that we get what we stored
res = self.persistence.get_volumes(backend_name=self.backend.id)
vols = self.sorted(vols)
self.assertListEqualObj(vols, self.sorted(res))
for i in range(len(vols)):
self._check_volume_type(vols[i].volume_type.extra_specs,
vols[i].volume_type.qos_specs.specs,
vols[i])
def test_delete_volume(self):
vols = self.create_n_volumes(2)
self.persistence.delete_volume(vols[0])
res = self.persistence.get_volumes()
self.assertListEqualObj([vols[1]], res)
def test_delete_volume_not_found(self):
vols = self.create_n_volumes(2)
fake_vol = cinderlib.Volume(backend_or_vol=self.backend)
self.persistence.delete_volume(fake_vol)
res = self.persistence.get_volumes()
self.assertListEqualObj(vols, self.sorted(res))
def test_set_snapshot(self):
raise NotImplementedError('Test class must implement this method')
def get_snapshots_all(self):
snaps = self.create_snapshots()
res = self.persistence.get_snapshots()
self.assertListEqualObj(snaps, self.sorted(res))
def test_get_snapshots_by_id(self):
snaps = self.create_snapshots()
res = self.persistence.get_snapshots(snapshot_id=snaps[1].id)
self.assertListEqualObj([snaps[1]], res)
def test_get_snapshots_by_id_not_found(self):
self.create_snapshots()
res = self.persistence.get_snapshots(snapshot_id='fake-uuid')
self.assertListEqualObj([], res)
def test_get_snapshots_by_name_single(self):
snaps = self.create_snapshots()
res = self.persistence.get_snapshots(snapshot_name=snaps[1].name)
self.assertListEqualObj([snaps[1]], res)
def test_get_snapshots_by_name_multiple(self):
snap_name = 'snap'
vol = self.create_volumes([{'size': 1}])[0]
snaps = [cinderlib.Snapshot(vol, name=snap_name) for i in range(2)]
[self.persistence.set_snapshot(snap) for snap in snaps]
res = self.persistence.get_snapshots(snapshot_name=snap_name)
self.assertListEqualObj(self.sorted(snaps), self.sorted(res))
def test_get_snapshots_by_name_not_found(self):
self.create_snapshots()
res = self.persistence.get_snapshots(snapshot_name='snap3')
self.assertListEqualObj([], res)
def test_get_snapshots_by_volume(self):
snaps = self.create_snapshots()
vol = snaps[0].volume
expected_snaps = [snaps[0], cinderlib.Snapshot(vol)]
self.persistence.set_snapshot(expected_snaps[1])
res = self.persistence.get_snapshots(volume_id=vol.id)
self.assertListEqualObj(self.sorted(expected_snaps), self.sorted(res))
def test_get_snapshots_by_volume_not_found(self):
self.create_snapshots()
res = self.persistence.get_snapshots(volume_id='fake_uuid')
self.assertListEqualObj([], res)
def test_get_snapshots_by_multiple(self):
snap_name = 'snap'
vol = self.create_volumes([{'size': 1}])[0]
snaps = [cinderlib.Snapshot(vol, name=snap_name) for i in range(2)]
[self.persistence.set_snapshot(snap) for snap in snaps]
res = self.persistence.get_snapshots(volume_id=vol.id,
snapshot_name=snap_name,
snapshot_id=snaps[0].id)
self.assertListEqualObj([snaps[0]], self.sorted(res))
def test_get_snapshots_by_multiple_not_found(self):
snaps = self.create_snapshots()
res = self.persistence.get_snapshots(snapshot_name=snaps[1].name,
volume_id=snaps[0].volume.id)
self.assertListEqualObj([], res)
def test_delete_snapshot(self):
snaps = self.create_snapshots()
self.persistence.delete_snapshot(snaps[0])
res = self.persistence.get_snapshots()
self.assertListEqualObj([snaps[1]], res)
def test_delete_snapshot_not_found(self):
snaps = self.create_snapshots()
fake_snap = cinderlib.Snapshot(snaps[0].volume)
self.persistence.delete_snapshot(fake_snap)
res = self.persistence.get_snapshots()
self.assertListEqualObj(snaps, self.sorted(res))
def test_set_connection(self):
raise NotImplementedError('Test class must implement this method')
def get_connections_all(self):
conns = self.create_connections()
res = self.persistence.get_connections()
self.assertListEqual(conns, self.sorted(res))
def test_get_connections_by_id(self):
conns = self.create_connections()
res = self.persistence.get_connections(connection_id=conns[1].id)
self.assertListEqualObj([conns[1]], res)
def test_get_connections_by_id_not_found(self):
self.create_connections()
res = self.persistence.get_connections(connection_id='fake-uuid')
self.assertListEqualObj([], res)
def test_get_connections_by_volume(self):
conns = self.create_connections()
vol = conns[0].volume
expected_conns = [conns[0], cinderlib.Connection(
self.backend, volume=vol, connection_info={'conn': {'data': {}}})]
self.persistence.set_connection(expected_conns[1])
res = self.persistence.get_connections(volume_id=vol.id)
self.assertListEqualObj(self.sorted(expected_conns), self.sorted(res))
def test_get_connections_by_volume_not_found(self):
self.create_connections()
res = self.persistence.get_connections(volume_id='fake_uuid')
self.assertListEqualObj([], res)
def test_get_connections_by_multiple(self):
vol = self.create_volumes([{'size': 1}])[0]
conns = [cinderlib.Connection(self.backend, volume=vol,
connection_info={'conn': {'data': {}}})
for i in range(2)]
[self.persistence.set_connection(conn) for conn in conns]
res = self.persistence.get_connections(volume_id=vol.id,
connection_id=conns[0].id)
self.assertListEqualObj([conns[0]], self.sorted(res))
def test_get_connections_by_multiple_not_found(self):
conns = self.create_connections()
res = self.persistence.get_connections(volume_id=conns[0].volume.id,
connection_id=conns[1].id)
self.assertListEqualObj([], res)
def test_delete_connection(self):
conns = self.create_connections()
self.persistence.delete_connection(conns[1])
res = self.persistence.get_connections()
self.assertListEqualObj([conns[0]], res)
def test_delete_connection_not_found(self):
conns = self.create_connections()
fake_conn = cinderlib.Connection(
self.backend,
volume=conns[0].volume,
connection_info={'conn': {'data': {}}})
self.persistence.delete_connection(fake_conn)
res = self.persistence.get_connections()
self.assertListEqualObj(conns, self.sorted(res))
def test_set_key_values(self):
raise NotImplementedError('Test class must implement this method')
def assertKVsEqual(self, expected, actual):
if len(expected) == len(actual):
for (key, value), actual in zip(expected, actual):
self.assertEqual(key, actual.key)
self.assertEqual(value, actual.value)
return
assert False, '%s is not equal to %s' % (expected, actual)
def get_key_values_all(self):
kvs = self.create_key_values()
res = self.persistence.get_key_values()
self.assertListEqual(kvs, self.sorted(res, 'key'))
def test_get_key_values_by_key(self):
kvs = self.create_key_values()
res = self.persistence.get_key_values(key=kvs[1].key)
self.assertListEqual([kvs[1]], res)
def test_get_key_values_by_key_not_found(self):
self.create_key_values()
res = self.persistence.get_key_values(key='fake-uuid')
self.assertListEqual([], res)
def test_delete_key_value(self):
kvs = self.create_key_values()
self.persistence.delete_key_value(kvs[1])
res = self.persistence.get_key_values()
self.assertListEqual([kvs[0]], res)
def test_delete_key_not_found(self):
kvs = self.create_key_values()
fake_key = cinderlib.KeyValue('fake-key')
self.persistence.delete_key_value(fake_key)
res = self.persistence.get_key_values()
self.assertListEqual(kvs, self.sorted(res, 'key'))
@mock.patch('cinderlib.persistence.base.DB.volume_type_get')
def test__volume_type_get_by_name(self, get_mock):
# Only test when using our fake DB class. We cannot use
# unittest.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
# Volume type id and name are the same, so method must be too
res = self.persistence.db._volume_type_get_by_name(self.context,
mock.sentinel.name)
self.assertEqual(get_mock.return_value, res)
get_mock.assert_called_once_with(self.context, mock.sentinel.name)
def test_volume_type_get_by_id(self):
extra_specs = [{'k1': 'v1', 'k2': 'v2'},
{'kk1': 'vv1', 'kk2': 'vv2', 'kk3': 'vv3'}]
vols = self.create_volumes(
[{'size': 1, 'extra_specs': extra_specs[0]},
{'size': 2, 'extra_specs': extra_specs[1]}],
sort=False)
res = self.persistence.db.volume_type_get(self.context, vols[0].id)
self.assertEqual(vols[0].id, res['id'])
self.assertEqual(vols[0].id, res['name'])
self.assertEqual(extra_specs[0], res['extra_specs'])
def test_volume_get_all_by_host(self):
# Only test when using our fake DB class. We cannot use
# unittest.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
persistence_db = self.persistence.db
host = '%s@%s' % (cfg.CONF.host, self.backend.id)
vols = [v._ovo for v in self.create_n_volumes(2)]
backend2 = utils.FakeBackend(volume_backend_name='fake2')
vol = self.create_volumes([{'backend_or_vol': backend2, 'size': 3}])
# We should be able to get it using the host@backend
res = persistence_db.volume_get_all_by_host(self.context, host)
self.assertListEqualObj(vols, self.sorted(res))
# Confirm it also works when we pass a host that includes the pool
res = persistence_db.volume_get_all_by_host(self.context, vols[0].host)
self.assertListEqualObj(vols, self.sorted(res))
# Check we also get the other backend's volume
host = '%s@%s' % (cfg.CONF.host, backend2.id)
res = persistence_db.volume_get_all_by_host(self.context, host)
self.assertListEqualObj(vol[0]._ovo, res[0])
def test__volume_admin_metadata_get(self):
# Only test when using our fake DB class. We cannot use
# unittest.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
admin_metadata = {'k': 'v'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': admin_metadata}])
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual(admin_metadata, result)
def test__volume_admin_metadata_update(self):
# Only test when using our fake DB class. We cannot use
# unittest.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
create_admin_metadata = {'k': 'v', 'k2': 'v2'}
admin_metadata = {'k2': 'v2.1', 'k3': 'v3'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': create_admin_metadata}])
self.persistence.db._volume_admin_metadata_update(self.context,
vols[0].id,
admin_metadata,
delete=True,
add=True,
update=True)
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual({'k2': 'v2.1', 'k3': 'v3'}, result)
def test__volume_admin_metadata_update_do_nothing(self):
# Only test when using our fake DB class. We cannot use
# unittest.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
create_admin_metadata = {'k': 'v', 'k2': 'v2'}
admin_metadata = {'k2': 'v2.1', 'k3': 'v3'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': create_admin_metadata}])
# Setting delete, add, and update to False means we don't do anything
self.persistence.db._volume_admin_metadata_update(self.context,
vols[0].id,
admin_metadata,
delete=False,
add=False,
update=False)
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual(create_admin_metadata, result)
def test_volume_admin_metadata_delete(self):
# Only test when using our fake DB class. We cannot use
# unittest.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
admin_metadata = {'k': 'v', 'k2': 'v2'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': admin_metadata}])
self.persistence.db.volume_admin_metadata_delete(self.context,
vols[0].id,
'k2')
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual({'k': 'v'}, result)
@mock.patch('cinderlib.objects.Volume.get_by_id')
@mock.patch('cinderlib.objects.Volume.snapshots',
new_callable=mock.PropertyMock)
@mock.patch('cinderlib.objects.Volume.connections',
new_callable=mock.PropertyMock)
def test_volume_refresh(self, get_conns_mock, get_snaps_mock, get_mock):
vol = self.create_n_volumes(1)[0]
vol_id = vol.id
# This is to simulate situation where the persistence does lazy loading
vol._snapshots = vol._connections = None
get_mock.return_value = cinderlib.Volume(vol)
vol.refresh()
get_mock.assert_called_once_with(vol_id)
get_conns_mock.assert_not_called()
get_snaps_mock.assert_not_called()
self.assertIsNone(vol.local_attach)
@mock.patch('cinderlib.objects.Volume.get_by_id')
@mock.patch('cinderlib.objects.Volume.snapshots',
new_callable=mock.PropertyMock)
@mock.patch('cinderlib.objects.Volume.connections',
new_callable=mock.PropertyMock)
def test_volume_refresh_with_conn_and_snaps(self, get_conns_mock,
get_snaps_mock, get_mock):
vol = self.create_n_volumes(1)[0]
vol_id = vol.id
vol.local_attach = mock.sentinel.local_attach
get_mock.return_value = cinderlib.Volume(vol)
vol.refresh()
get_mock.assert_called_once_with(vol_id)
get_conns_mock.assert_called_once_with()
get_snaps_mock.assert_called_once_with()
self.assertIs(mock.sentinel.local_attach, vol.local_attach)

View File

@ -1,125 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.cmd import volume as volume_cmd
from cinder.db.sqlalchemy import api
from cinder.db.sqlalchemy import models
from cinder import objects
from cinder.objects import base as cinder_base_ovo
from oslo_versionedobjects import fields
import cinderlib
from cinderlib.tests.unit import base
class TestHelper(base.BaseTest):
@classmethod
def setUpClass(cls):
# Save OVO methods that some persistence plugins mess up
cls.ovo_methods = {}
for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes():
ovo_cls = getattr(objects, ovo_name)
cls.ovo_methods[ovo_name] = {
'save': getattr(ovo_cls, 'save', None),
'get_by_id': getattr(ovo_cls, 'get_by_id', None),
}
cls.original_impl = volume_cmd.session.IMPL
cinderlib.Backend.global_initialization = False
cinderlib.setup(persistence_config=cls.PERSISTENCE_CFG)
@classmethod
def tearDownClass(cls):
volume_cmd.session.IMPL = cls.original_impl
cinderlib.Backend.global_initialization = False
# Cannot just replace the context manager itself because it is already
# decorating cinder DB methods and those would continue accessing the
# old database, so we replace the existing CM'sinternal transaction
# factory, efectively "reseting" the context manager.
cm = api.main_context_manager
if cm.is_started:
cm._root_factory = api.enginefacade._TransactionFactory()
for ovo_name, methods in cls.ovo_methods.items():
ovo_cls = getattr(objects, ovo_name)
for method_name, method in methods.items():
if method:
setattr(ovo_cls, method_name, method)
def setUp(self):
super(TestHelper, self).setUp()
self.context = cinderlib.objects.CONTEXT
def sorted(self, resources, key='id'):
return sorted(resources, key=lambda x: getattr(x, key))
def create_n_volumes(self, n):
return self.create_volumes([{'size': i, 'name': 'disk%s' % i}
for i in range(1, n + 1)])
def create_volumes(self, data, sort=True):
vols = []
for d in data:
d.setdefault('backend_or_vol', self.backend)
vol = cinderlib.Volume(**d)
vols.append(vol)
self.persistence.set_volume(vol)
if sort:
return self.sorted(vols)
return vols
def create_snapshots(self):
vols = self.create_n_volumes(2)
snaps = []
for i, vol in enumerate(vols):
snap = cinderlib.Snapshot(vol, name='snaps%s' % (i + i))
snaps.append(snap)
self.persistence.set_snapshot(snap)
return self.sorted(snaps)
def create_connections(self):
vols = self.create_n_volumes(2)
conns = []
for i, vol in enumerate(vols):
conn = cinderlib.Connection(self.backend, volume=vol,
connection_info={'conn': {'data': {}}})
conns.append(conn)
self.persistence.set_connection(conn)
return self.sorted(conns)
def create_key_values(self):
kvs = []
for i in range(2):
kv = cinderlib.KeyValue(key='key%i' % i, value='value%i' % i)
kvs.append(kv)
self.persistence.set_key_value(kv)
return kvs
def _convert_to_dict(self, obj):
if isinstance(obj, models.BASE):
return dict(obj)
if not isinstance(obj, cinderlib.objects.Object):
return obj
res = dict(obj._ovo)
for key, value in obj._ovo.fields.items():
if isinstance(value, fields.ObjectField):
res.pop(key, None)
res.pop('glance_metadata', None)
res.pop('metadata', None)
return res

View File

@ -1,43 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cinderlib
from cinderlib.tests.unit.persistence import helper
class TestBasePersistence(helper.TestHelper):
PERSISTENCE_CFG = {'storage': 'memory'}
def tearDown(self):
self.persistence.volumes.clear()
self.persistence.snapshots.clear()
self.persistence.connections.clear()
self.persistence.key_values.clear()
super(TestBasePersistence, self).tearDown()
def test_get_changed_fields_volume(self):
vol = cinderlib.Volume(self.backend, size=1, extra_specs={'k': 'v'})
self.persistence.set_volume(vol)
vol._ovo.display_name = "abcde"
result = self.persistence.get_changed_fields(vol)
self.assertEqual(result, {'display_name': vol._ovo.display_name})
def test_get_changed_fields_snapshot(self):
vol = cinderlib.Volume(self.backend, size=1, extra_specs={'k': 'v'})
snap = cinderlib.Snapshot(vol)
self.persistence.set_snapshot(snap)
snap._ovo.display_name = "abcde"
result = self.persistence.get_changed_fields(snap)
self.assertEqual(result, {'display_name': snap._ovo.display_name})

View File

@ -1,173 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from unittest import mock
import alembic.script.revision
import alembic.util.exc
from cinder.db.sqlalchemy import api as sqla_api
from cinder.db.sqlalchemy import models as sqla_models
from cinder import objects as cinder_ovos
from oslo_db import api as oslo_db_api
import cinderlib
from cinderlib.persistence import dbms
from cinderlib.tests.unit.persistence import base
class TestDBPersistence(base.BasePersistenceTest):
CONNECTION = 'sqlite:///' + tempfile.NamedTemporaryFile().name
PERSISTENCE_CFG = {'storage': 'db',
'connection': CONNECTION}
def tearDown(self):
super(TestDBPersistence, self).tearDown()
with sqla_api.main_context_manager.writer.using(self.context):
sqla_api.model_query(self.context, sqla_models.Snapshot).delete()
sqla_api.model_query(self.context,
sqla_models.VolumeAttachment).delete()
sqla_api.model_query(self.context, sqla_models.Volume).delete()
self.context.session.query(dbms.KeyValue).delete()
def test_db(self):
self.assertIsInstance(self.persistence.db,
oslo_db_api.DBAPI)
def test_set_volume(self):
res = sqla_api.volume_get_all(self.context)
self.assertListEqual([], res)
vol = cinderlib.Volume(self.backend, size=1, name='disk')
expected = {'availability_zone': vol.availability_zone,
'size': vol.size, 'name': vol.name}
self.persistence.set_volume(vol)
db_vol = sqla_api.volume_get(self.context, vol.id)
actual = {'availability_zone': db_vol.availability_zone,
'size': db_vol.size, 'name': db_vol.display_name}
self.assertDictEqual(expected, actual)
def test_set_snapshot(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
# This will assign a volume type, which is necessary for the snapshot
vol.save()
snap = cinderlib.Snapshot(vol, name='disk')
self.assertEqual(0, len(sqla_api.snapshot_get_all(self.context)))
self.persistence.set_snapshot(snap)
db_entries = sqla_api.snapshot_get_all(self.context)
self.assertEqual(1, len(db_entries))
ovo_snap = cinder_ovos.Snapshot(self.context)
ovo_snap._from_db_object(ovo_snap._context, ovo_snap, db_entries[0])
cl_snap = cinderlib.Snapshot(vol, __ovo=ovo_snap)
self.assertEqualObj(snap, cl_snap)
def test_set_connection(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
conn = cinderlib.Connection(self.backend, volume=vol, connector={},
connection_info={'conn': {'data': {}}})
self.assertEqual(0,
len(sqla_api.volume_attachment_get_all(self.context)))
self.persistence.set_connection(conn)
db_entries = sqla_api.volume_attachment_get_all(self.context)
self.assertEqual(1, len(db_entries))
ovo_conn = cinder_ovos.VolumeAttachment(self.context)
ovo_conn._from_db_object(ovo_conn._context, ovo_conn, db_entries[0])
cl_conn = cinderlib.Connection(vol.backend, volume=vol, __ovo=ovo_conn)
self.assertEqualObj(conn, cl_conn)
def test_set_key_values(self):
with sqla_api.main_context_manager.reader.using(self.context):
res = self.context.session.query(dbms.KeyValue).all()
self.assertListEqual([], res)
expected = [dbms.KeyValue(key='key', value='value')]
self.persistence.set_key_value(expected[0])
with sqla_api.main_context_manager.reader.using(self.context):
actual = self.context.session.query(dbms.KeyValue).all()
self.assertListEqualObj(expected, actual)
def test_create_volume_with_default_volume_type(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
self.persistence.set_volume(vol)
self.assertEqual(self.persistence.DEFAULT_TYPE.id, vol.volume_type_id)
self.assertIs(self.persistence.DEFAULT_TYPE, vol.volume_type)
res = sqla_api.volume_type_get(self.context, vol.volume_type_id)
self.assertIsNotNone(res)
self.assertEqual('__DEFAULT__', res['name'])
def test_default_volume_type(self):
self.assertIsInstance(self.persistence.DEFAULT_TYPE,
cinder_ovos.VolumeType)
self.assertEqual('__DEFAULT__', self.persistence.DEFAULT_TYPE.name)
def test_delete_volume_with_metadata(self):
vols = self.create_volumes([{'size': i, 'name': 'disk%s' % i,
'metadata': {'k': 'v', 'k2': 'v2'},
'admin_metadata': {'k': '1'}}
for i in range(1, 3)])
self.persistence.delete_volume(vols[0])
res = self.persistence.get_volumes()
self.assertListEqualObj([vols[1]], res)
for model in (dbms.models.VolumeMetadata,
dbms.models.VolumeAdminMetadata):
with sqla_api.main_context_manager.reader.using(self.context):
query = dbms.sqla_api.model_query(self.context, model)
res = query.filter_by(volume_id=vols[0].id).all()
self.assertEqual([], res)
class TestDBPersistenceNewerSchema(base.helper.TestHelper):
"""Test DBMS plugin can start when the DB has a newer schema."""
CONNECTION = 'sqlite:///' + tempfile.NamedTemporaryFile().name
PERSISTENCE_CFG = {'storage': 'db',
'connection': CONNECTION}
@classmethod
def setUpClass(cls):
pass
def _raise_exc(self):
inner_exc = alembic.script.revision.ResolutionError('foo', 'rev')
outer_exc = alembic.util.exc.CommandError('bar')
self.original_db_sync()
raise outer_exc from inner_exc
def test_newer_db_schema(self):
self.original_db_sync = dbms.migration.db_sync
with mock.patch.object(dbms.migration, 'db_sync',
side_effect=self._raise_exc) as db_sync_mock:
super(TestDBPersistenceNewerSchema, self).setUpClass()
db_sync_mock.assert_called_once()
self.assertIsInstance(cinderlib.Backend.persistence,
dbms.DBPersistence)
class TestMemoryDBPersistence(TestDBPersistence):
PERSISTENCE_CFG = {'storage': 'memory_db'}

View File

@ -1,92 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinder import objects as ovos
import cinderlib
from cinderlib import objects
from cinderlib.tests.unit.persistence import base
class TestMemoryPersistence(base.BasePersistenceTest):
PERSISTENCE_CFG = {'storage': 'memory'}
def tearDown(self):
# Since this plugin uses class attributes we have to clear them
self.persistence.volumes.clear()
self.persistence.snapshots.clear()
self.persistence.connections.clear()
self.persistence.key_values.clear()
super(TestMemoryPersistence, self).tearDown()
def test_db(self):
self.assertIsInstance(self.persistence.db,
cinderlib.persistence.base.DB)
self.assertEqual(self.persistence.db._DB__connections_get,
ovos.VolumeAttachmentList.get_all_by_volume_id)
def test___connections_get(self):
"""Check we can get volume_attachment from OVO."""
vol = objects.Volume(self.backend, size=10)
vol._connections = None
delattr(vol._ovo, '_obj_volume_attachment')
conns = [objects.Connection(self.backend, connector={'k': 'v'},
volume_id=vol.id, status='attached',
attach_mode='rw',
connection_info={'conn': {}})]
with mock.patch.object(self.persistence, 'get_connections') \
as get_conns_mock:
get_conns_mock.return_value = conns
res = vol._ovo.volume_attachment
self.assertIsInstance(res, ovos.VolumeAttachmentList)
self.assertEqual(1, len(res))
self.assertEqual(conns[0]._ovo, res.objects[0])
get_conns_mock.assert_called_once_with(volume_id=vol.id)
def test_set_volume(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
self.assertDictEqual({}, self.persistence.volumes)
self.persistence.set_volume(vol)
self.assertDictEqual({vol.id: vol}, self.persistence.volumes)
def test_set_snapshot(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
snap = cinderlib.Snapshot(vol, name='disk')
self.assertDictEqual({}, self.persistence.snapshots)
self.persistence.set_snapshot(snap)
self.assertDictEqual({snap.id: snap}, self.persistence.snapshots)
def test_set_connection(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
conn = cinderlib.Connection(self.backend, volume=vol, connector={},
connection_info={'conn': {'data': {}}})
self.assertDictEqual({}, self.persistence.connections)
self.persistence.set_connection(conn)
self.assertDictEqual({conn.id: conn}, self.persistence.connections)
def test_set_key_values(self):
self.assertDictEqual({}, self.persistence.key_values)
expected = [cinderlib.KeyValue('key', 'value')]
self.persistence.set_key_value(expected[0])
self.assertIn('key', self.persistence.key_values)
self.assertEqual(expected, list(self.persistence.key_values.values()))

View File

@ -1,716 +0,0 @@
# Copyright (c) 2017, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import configparser
import os
from unittest import mock
from cinder import utils
import ddt
from oslo_config import cfg
from oslo_privsep import priv_context
import cinderlib
from cinderlib import objects
from cinderlib.tests.unit import base
@ddt.ddt
class TestCinderlib(base.BaseTest):
@ddt.data([], [1], [2])
def test_list_supported_drivers(self, args):
is_v2 = args == [2]
expected_type = dict if is_v2 else str
expected_keys = {'version', 'class_name', 'supported', 'ci_wiki_name',
'driver_options', 'class_fqn', 'desc'}
drivers = cinderlib.Backend.list_supported_drivers(*args)
self.assertNotEqual(0, len(drivers))
for name, driver_info in drivers.items():
self.assertEqual(expected_keys, set(driver_info.keys()))
# Ensure that the RBDDriver has the rbd_keyring_conf option and
# it's not deprecated
if name == 'RBDDriver':
keyring_conf = [conf for conf in driver_info['driver_options']
if conf['dest'] == 'rbd_keyring_conf']
self.assertEqual(1, len(keyring_conf))
expected_value = False if is_v2 else 'False'
self.assertEqual(expected_value,
keyring_conf[0]['deprecated_for_removal'])
for option in driver_info['driver_options']:
self.assertIsInstance(option['type'], expected_type)
if is_v2:
self.assertIn('type_class', option['type'])
else:
for v in option.values():
self.assertIsInstance(v, str)
def test_lib_assignations(self):
self.assertEqual(cinderlib.setup, cinderlib.Backend.global_setup)
self.assertEqual(cinderlib.Backend, cinderlib.objects.Backend)
self.assertEqual(cinderlib.Backend,
cinderlib.objects.Object.backend_class)
@mock.patch('cinderlib.Backend._apply_backend_workarounds')
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch('cinderlib.Backend._get_backend_config')
@mock.patch('cinderlib.Backend.global_setup')
def test_init(self, mock_global_setup, mock_config, mock_import,
mock_workarounds):
cfg.CONF.set_override('host', 'host')
driver_cfg = {'k': 'v', 'k2': 'v2', 'volume_backend_name': 'Test'}
cinderlib.Backend.global_initialization = False
driver = mock_import.return_value
driver.capabilities = {'pools': [{'pool_name': 'default'}]}
backend = objects.Backend(**driver_cfg)
mock_global_setup.assert_called_once_with()
self.assertIn('Test', objects.Backend.backends)
self.assertEqual(backend, objects.Backend.backends['Test'])
mock_config.assert_called_once_with(driver_cfg)
conf = mock_config.return_value
mock_import.assert_called_once_with(conf.volume_driver,
configuration=conf,
db=self.persistence.db,
host='host@Test',
cluster_name=None,
active_backend_id=None)
self.assertEqual(backend.driver, driver)
driver.do_setup.assert_called_once_with(objects.CONTEXT)
driver.check_for_setup_error.assert_called_once_with()
driver.init_capabilities.assert_called_once_with()
driver.set_throttle.assert_called_once_with()
driver.set_initialized.assert_called_once_with()
self.assertEqual(driver_cfg, backend._driver_cfg)
self.assertIsNone(backend._volumes)
driver.get_volume_stats.assert_not_called()
self.assertEqual(('default',), backend.pool_names)
mock_workarounds.assert_called_once_with(mock_config.return_value)
@mock.patch('cinderlib.Backend._apply_backend_workarounds')
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch('cinderlib.Backend._get_backend_config')
@mock.patch('cinderlib.Backend.global_setup')
def test_init_setup(self, mock_global_setup, mock_config, mock_import,
mock_workarounds):
"""Test initialization with the new 'setup' driver method."""
cfg.CONF.set_override('host', 'host')
driver_cfg = {'k': 'v', 'k2': 'v2', 'volume_backend_name': 'Test'}
cinderlib.Backend.global_initialization = False
driver = mock_import.return_value
driver.do_setup.side_effect = AttributeError
driver.capabilities = {'pools': [{'pool_name': 'default'}]}
backend = objects.Backend(**driver_cfg)
mock_global_setup.assert_called_once_with()
self.assertIn('Test', objects.Backend.backends)
self.assertEqual(backend, objects.Backend.backends['Test'])
mock_config.assert_called_once_with(driver_cfg)
conf = mock_config.return_value
mock_import.assert_called_once_with(conf.volume_driver,
configuration=conf,
db=self.persistence.db,
host='host@Test',
cluster_name=None,
active_backend_id=None)
self.assertEqual(backend.driver, driver)
driver.do_setup.assert_called_once_with(objects.CONTEXT)
driver.check_for_setup_error.assert_not_called()
driver.setup.assert_called_once_with(objects.CONTEXT)
driver.init_capabilities.assert_called_once_with()
driver.set_throttle.assert_called_once_with()
driver.set_initialized.assert_called_once_with()
self.assertEqual(driver_cfg, backend._driver_cfg)
self.assertIsNone(backend._volumes)
driver.get_volume_stats.assert_not_called()
self.assertEqual(('default',), backend.pool_names)
mock_workarounds.assert_called_once_with(mock_config.return_value)
@mock.patch.object(objects.Backend, 'global_initialization', True)
@mock.patch.object(objects.Backend, '_apply_backend_workarounds')
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch.object(objects.Backend, '_get_backend_config')
def test_init_call_twice(self, mock_config, mock_import, mock_workarounds):
cinderlib.Backend.global_initialization = False
driver_cfg = {'k': 'v', 'k2': 'v2', 'volume_backend_name': 'Test'}
driver = mock_import.return_value
driver.capabilities = {'pools': [{'pool_name': 'default'}]}
backend = objects.Backend(**driver_cfg)
self.assertEqual(1, mock_config.call_count)
self.assertEqual(1, mock_import.call_count)
self.assertEqual(1, mock_workarounds.call_count)
# When initiallizing a Backend with the same configuration the Backend
# class must behave as a singleton and we won't initialize it again
backend_second = objects.Backend(**driver_cfg)
self.assertIs(backend, backend_second)
self.assertEqual(1, mock_config.call_count)
self.assertEqual(1, mock_import.call_count)
self.assertEqual(1, mock_workarounds.call_count)
@mock.patch.object(objects.Backend, 'global_initialization', True)
@mock.patch.object(objects.Backend, '_apply_backend_workarounds')
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch.object(objects.Backend, '_get_backend_config')
def test_init_call_twice_different_config(self, mock_config, mock_import,
mock_workarounds):
cinderlib.Backend.global_initialization = False
driver_cfg = {'k': 'v', 'k2': 'v2', 'volume_backend_name': 'Test'}
driver = mock_import.return_value
driver.capabilities = {'pools': [{'pool_name': 'default'}]}
objects.Backend(**driver_cfg)
self.assertEqual(1, mock_config.call_count)
self.assertEqual(1, mock_import.call_count)
self.assertEqual(1, mock_workarounds.call_count)
# It should fail if we reuse the backend name but change the config
self.assertRaises(ValueError, objects.Backend, k3='v3', **driver_cfg)
self.assertEqual(1, mock_config.call_count)
self.assertEqual(1, mock_import.call_count)
self.assertEqual(1, mock_workarounds.call_count)
@mock.patch('cinderlib.Backend._validate_and_set_options')
@mock.patch.object(cfg, 'CONF')
def test__set_cinder_config(self, conf_mock, validate_mock):
objects.Backend._set_cinder_config('host', 'locks_path',
mock.sentinel.cfg)
self.assertEqual(2, conf_mock.set_default.call_count)
conf_mock.set_default.assert_has_calls(
[mock.call('state_path', os.getcwd()),
mock.call('lock_path', '$state_path', 'oslo_concurrency')])
self.assertEqual(cinderlib.__version__, cfg.CONF.version)
self.assertEqual('locks_path', cfg.CONF.oslo_concurrency.lock_path)
self.assertEqual('file://locks_path',
cfg.CONF.coordination.backend_url)
self.assertEqual('host', cfg.CONF.host)
validate_mock.assert_called_once_with(mock.sentinel.cfg)
self.assertIsNone(cfg._CachedArgumentParser().parse_args())
@mock.patch('cinderlib.Backend._set_priv_helper')
@mock.patch('cinderlib.Backend._set_cinder_config')
@mock.patch('urllib3.disable_warnings')
@mock.patch('cinder.coordination.COORDINATOR')
@mock.patch('cinderlib.Backend._set_logging')
@mock.patch('cinderlib.cinderlib.serialization')
@mock.patch('cinderlib.Backend.set_persistence')
def test_global_setup(self, mock_set_pers, mock_serial, mock_log,
mock_coord, mock_disable_warn, mock_set_config,
mock_priv_helper):
cls = objects.Backend
cls.global_initialization = False
cinder_cfg = {'k': 'v', 'k2': 'v2'}
# Save the current class configuration
saved_cfg = vars(cls).copy()
try:
cls.global_setup(mock.sentinel.locks_path,
mock.sentinel.root_helper,
mock.sentinel.ssl_warnings,
mock.sentinel.disable_logs,
mock.sentinel.non_uuid_ids,
mock.sentinel.backend_info,
mock.sentinel.project_id,
mock.sentinel.user_id,
mock.sentinel.pers_cfg,
mock.sentinel.fail_missing_backend,
mock.sentinel.host,
**cinder_cfg)
mock_set_config.assert_called_once_with(mock.sentinel.host,
mock.sentinel.locks_path,
cinder_cfg)
self.assertEqual(mock.sentinel.fail_missing_backend,
cls.fail_on_missing_backend)
self.assertEqual(mock.sentinel.project_id, cls.project_id)
self.assertEqual(mock.sentinel.user_id, cls.user_id)
self.assertEqual(mock.sentinel.non_uuid_ids, cls.non_uuid_ids)
mock_set_pers.assert_called_once_with(mock.sentinel.pers_cfg)
mock_serial.setup.assert_called_once_with(cls)
mock_log.assert_called_once_with(mock.sentinel.disable_logs)
mock_coord.start.assert_called_once_with()
mock_priv_helper.assert_called_once_with(mock.sentinel.root_helper)
self.assertEqual(2, mock_disable_warn.call_count)
self.assertTrue(cls.global_initialization)
self.assertEqual(mock.sentinel.backend_info,
cls.output_all_backend_info)
finally:
# Restore the class configuration
for k, v in saved_cfg.items():
if not k.startswith('__'):
setattr(cls, k, v)
@mock.patch('cinderlib.cinderlib.LOG.warning')
def test__validate_and_set_options(self, warning_mock):
self.addCleanup(cfg.CONF.clear_override, 'osapi_volume_extension')
self.addCleanup(cfg.CONF.clear_override, 'debug')
# Validate default group config with Boolean and MultiStrOpt
self.backend._validate_and_set_options(
{'debug': True,
'osapi_volume_extension': ['a', 'b', 'c'],
})
# Global values overrides are left
self.assertIs(True, cfg.CONF.debug)
self.assertEqual(['a', 'b', 'c'], cfg.CONF.osapi_volume_extension)
cinder_cfg = {
'volume_driver': 'cinder.volume.drivers.lvm.LVMVolumeDriver',
'volume_group': 'lvm-volumes',
'target_secondary_ip_addresses': ['w.x.y.z', 'a.b.c.d'],
'target_port': 12345,
}
expected_cfg = cinder_cfg.copy()
# Test driver options with String, ListOpt, PortOpt
self.backend._validate_and_set_options(cinder_cfg)
# Non global value overrides have been cleaned up
self.assertEqual('cinder-volumes',
cfg.CONF.backend_defaults.volume_group)
self.assertEqual(
[], cfg.CONF.backend_defaults.target_secondary_ip_addresses)
self.assertEqual(3260, cfg.CONF.backend_defaults.target_port)
self.assertEqual(expected_cfg, cinder_cfg)
warning_mock.assert_not_called()
@mock.patch('cinderlib.cinderlib.LOG.warning')
def test__validate_and_set_options_rbd(self, warning_mock):
original_override = cfg.CONF.set_override
original_getattr = cfg.ConfigOpts.GroupAttr.__getattr__
def my_override(option, value, *args):
original_override(option, value, *args)
# Simulate that the config option is missing if it's not
if option == 'rbd_keyring_conf':
raise cfg.NoSuchOptError('rbd_keyring_conf')
def my_getattr(self, name):
res = original_getattr(self, name)
# Simulate that the config option is missing if it's not
if name == 'rbd_keyring_conf':
raise AttributeError()
return res
self.patch('oslo_config.cfg.ConfigOpts.GroupAttr.__getattr__',
my_getattr)
self.patch('oslo_config.cfg.CONF.set_override',
side_effect=my_override)
cinder_cfg = {'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_keyring_conf': '/etc/ceph/ceph.client.adm.keyring',
'rbd_user': 'adm',
'rbd_pool': 'volumes'}
expected_cfg = cinder_cfg.copy()
# Test driver options with String, ListOpt, PortOpt
self.backend._validate_and_set_options(cinder_cfg)
self.assertEqual(expected_cfg, cinder_cfg)
# Non global value overrides have been cleaned up
self.assertEqual(None, cfg.CONF.backend_defaults.rbd_user)
self.assertEqual('rbd', cfg.CONF.backend_defaults.rbd_pool)
warning_mock.assert_not_called()
@ddt.data(
('debug', 'sure', None),
('target_port', 'abc', 'cinder.volume.drivers.lvm.LVMVolumeDriver'))
@ddt.unpack
def test__validate_and_set_options_failures(self, option, value,
driver):
self.assertRaises(
ValueError,
self.backend._validate_and_set_options,
{'volume_driver': driver,
option: value})
@mock.patch('cinderlib.cinderlib.LOG.warning')
def test__validate_and_set_options_unknown(self, warning_mock):
self.backend._validate_and_set_options(
{'volume_driver': 'cinder.volume.drivers.lvm.LVMVolumeDriver',
'vmware_cluster_name': 'name'})
self.assertEqual(1, warning_mock.call_count)
def test_validate_and_set_options_templates(self):
self.addCleanup(cfg.CONF.clear_override, 'my_ip')
cfg.CONF.set_override('my_ip', '127.0.0.1')
config_options = dict(
volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
volume_backend_name='lvm_iscsi',
volume_group='my-${backend_defaults.volume_backend_name}-vg',
target_ip_address='$my_ip',
)
expected = dict(
volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
volume_backend_name='lvm_iscsi',
volume_group='my-lvm_iscsi-vg',
target_ip_address='127.0.0.1',
)
self.backend._validate_and_set_options(config_options)
self.assertDictEqual(expected, config_options)
# Non global value overrides have been cleaned up
self.assertEqual('cinder-volumes',
cfg.CONF.backend_defaults.volume_group)
@mock.patch('cinderlib.cinderlib.Backend._validate_and_set_options')
def test__get_backend_config(self, mock_validate):
def my_validate(*args):
# Simulate the cache clear happening in _validate_and_set_options
cfg.CONF.clear_override('my_ip')
mock_validate.side_effect = my_validate
config_options = dict(
volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
volume_backend_name='lvm_iscsi',
volume_group='volumes',
)
res = self.backend._get_backend_config(config_options)
mock_validate.assert_called_once_with(config_options)
self.assertEqual('lvm_iscsi', res.config_group)
for opt in config_options.keys():
self.assertEqual(config_options[opt], getattr(res, opt))
def test_pool_names(self):
pool_names = [mock.sentinel._pool_names]
self.backend._pool_names = pool_names
self.assertEqual(pool_names, self.backend.pool_names)
def test_volumes(self):
self.backend._volumes = None
res = self.backend.volumes
self.assertEqual(self.persistence.get_volumes.return_value, res)
self.assertEqual(self.persistence.get_volumes.return_value,
self.backend._volumes)
self.persistence.get_volumes.assert_called_once_with(
backend_name=self.backend.id)
def test_id(self):
self.assertEqual(self.backend._driver_cfg['volume_backend_name'],
self.backend.id)
def test_volumes_filtered(self):
res = self.backend.volumes_filtered(mock.sentinel.vol_id,
mock.sentinel.vol_name)
self.assertEqual(self.persistence.get_volumes.return_value, res)
self.assertEqual([], self.backend._volumes)
self.persistence.get_volumes.assert_called_once_with(
backend_name=self.backend.id,
volume_id=mock.sentinel.vol_id,
volume_name=mock.sentinel.vol_name)
def test_stats(self):
expect = {'pools': [mock.sentinel.data]}
with mock.patch.object(self.backend.driver, 'get_volume_stats',
return_value=expect) as mock_stat:
res = self.backend.stats(mock.sentinel.refresh)
self.assertEqual(expect, res)
mock_stat.assert_called_once_with(refresh=mock.sentinel.refresh)
def test_stats_single(self):
stat_value = {'driver_version': 'v1', 'key': 'value'}
expect = {'driver_version': 'v1', 'key': 'value',
'pools': [{'key': 'value', 'pool_name': self.backend_name}]}
with mock.patch.object(self.backend.driver, 'get_volume_stats',
return_value=stat_value) as mock_stat:
res = self.backend.stats(mock.sentinel.refresh)
self.assertEqual(expect, res)
mock_stat.assert_called_once_with(refresh=mock.sentinel.refresh)
@mock.patch('cinderlib.objects.Volume')
def test_create_volume(self, mock_vol):
kwargs = {'k': 'v', 'k2': 'v2'}
res = self.backend.create_volume(mock.sentinel.size,
mock.sentinel.name,
mock.sentinel.desc,
mock.sentinel.boot,
**kwargs)
self.assertEqual(mock_vol.return_value, res)
mock_vol.assert_called_once_with(self.backend, size=mock.sentinel.size,
name=mock.sentinel.name,
description=mock.sentinel.desc,
bootable=mock.sentinel.boot,
**kwargs)
mock_vol.return_value.create.assert_called_once_with()
def test__volume_removed_no_list(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
self.backend._volume_removed(vol)
def test__volume_removed(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
vol2 = cinderlib.objects.Volume(self.backend, id=vol.id, size=10)
self.backend._volumes.append(vol)
self.backend._volume_removed(vol2)
self.assertEqual([], self.backend.volumes)
def test__volume_created(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
self.backend._volume_created(vol)
self.assertEqual([vol], self.backend.volumes)
def test__volume_created_is_none(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
self.backend._volume_created(vol)
self.assertEqual([vol], self.backend.volumes)
def test_validate_connector(self):
self.backend.validate_connector(mock.sentinel.connector)
self.backend.driver.validate_connector.assert_called_once_with(
mock.sentinel.connector)
@mock.patch('cinderlib.objects.setup')
@mock.patch('cinderlib.persistence.setup')
def test_set_persistence(self, mock_pers_setup, mock_obj_setup):
cinderlib.Backend.global_initialization = True
cinderlib.Backend.set_persistence(mock.sentinel.pers_cfg)
mock_pers_setup.assert_called_once_with(mock.sentinel.pers_cfg)
self.assertEqual(mock_pers_setup.return_value,
cinderlib.Backend.persistence)
mock_obj_setup.assert_called_once_with(mock_pers_setup.return_value,
cinderlib.Backend,
self.backend.project_id,
self.backend.user_id,
self.backend.non_uuid_ids)
self.assertEqual(mock_pers_setup.return_value.db,
self.backend.driver.db)
def test_config(self):
self.backend.output_all_backend_info = False
res = self.backend.config
self.assertEqual({'volume_backend_name': self.backend.id}, res)
def test_config_full(self):
self.backend.output_all_backend_info = True
with mock.patch.object(self.backend, '_driver_cfg') as mock_driver:
res = self.backend.config
self.assertEqual(mock_driver, res)
def test_refresh(self):
self.backend.refresh()
self.persistence.get_volumes.assert_called_once_with(
backend_name=self.backend.id)
def test_refresh_no_call(self):
self.backend._volumes = None
self.backend.refresh()
self.persistence.get_volumes.assert_not_called()
@staticmethod
def odict(*args):
res = collections.OrderedDict()
for i in range(0, len(args), 2):
res[args[i]] = args[i + 1]
return res
@mock.patch('cinderlib.cinderlib.cfg.CONF')
def test__apply_backend_workarounds(self, mock_conf):
cfg = mock.Mock(volume_driver='cinder.volume.drivers.netapp...')
self.backend._apply_backend_workarounds(cfg)
self.assertEqual(cfg.volume_backend_name,
mock_conf.list_all_sections())
@mock.patch('cinderlib.cinderlib.cfg.CONF')
def test__apply_backend_workarounds_do_nothing(self, mock_conf):
cfg = mock.Mock(volume_driver='cinder.volume.drivers.lvm...')
self.backend._apply_backend_workarounds(cfg)
self.assertEqual(mock_conf.list_all_sections.return_value,
mock_conf.list_all_sections())
def _check_privsep_root_helper_opt(self, is_changed):
for opt in priv_context.OPTS:
if opt.name == 'helper_command':
break
helper_path = os.path.join(os.path.dirname(cinderlib.__file__),
'bin/venv-privsep-helper')
self.assertIs(is_changed,
f'mysudo {helper_path}' == opt.default)
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch('os.path.exists')
@mock.patch('configparser.ConfigParser')
@mock.patch('oslo_privsep.priv_context.init')
def test__set_priv_helper_no_venv_sudo(self, mock_ctxt_init, mock_parser,
mock_exists):
original_helper_func = utils.get_root_helper
original_rootwrap_config = cfg.CONF.rootwrap_config
rootwrap_config = '/etc/cinder/rootwrap.conf'
# Not using set_override because it's not working as it should
cfg.CONF.rootwrap_config = rootwrap_config
try:
self.backend._set_priv_helper('sudo')
mock_exists.assert_not_called()
mock_parser.assert_not_called()
mock_ctxt_init.assert_not_called()
self.assertIs(original_helper_func, utils.get_root_helper)
self.assertIs(rootwrap_config, cfg.CONF.rootwrap_config)
self._check_privsep_root_helper_opt(is_changed=False)
finally:
cfg.CONF.rootwrap_config = original_rootwrap_config
@mock.patch('configparser.ConfigParser.read', mock.Mock())
@mock.patch('configparser.ConfigParser.write', mock.Mock())
@mock.patch('cinderlib.cinderlib.utils.__file__',
'/.venv/lib/python3.7/site-packages/cinder')
@mock.patch('cinderlib.cinderlib.os.environ', {'VIRTUAL_ENV': '/.venv'})
@mock.patch('cinderlib.cinderlib.open')
@mock.patch('os.path.exists', return_value=False)
@mock.patch('oslo_privsep.priv_context.init')
def test__set_priv_helper_venv_no_sudo(self, mock_ctxt_init, mock_exists,
mock_open):
file_contents = {'DEFAULT': {'filters_path': '/etc/cinder/rootwrap.d',
'exec_dirs': '/dir1,/dir2'}}
parser = configparser.ConfigParser()
venv_wrap_cfg = '/.venv/etc/cinder/rootwrap.conf'
original_helper_func = utils.get_root_helper
original_rootwrap_config = cfg.CONF.rootwrap_config
# Not using set_override because it's not working as it should
default_wrap_cfg = '/etc/cinder/rootwrap.conf'
cfg.CONF.rootwrap_config = default_wrap_cfg
try:
with mock.patch('cinder.utils.get_root_helper',
return_value='sudo wrapper') as mock_helper, \
mock.patch.dict(parser, file_contents, clear=True), \
mock.patch('configparser.ConfigParser') as mock_parser:
mock_parser.return_value = parser
self.backend._set_priv_helper('mysudo')
mock_exists.assert_called_once_with(default_wrap_cfg)
mock_parser.assert_called_once_with()
parser.read.assert_called_once_with(venv_wrap_cfg)
self.assertEqual('/.venv/etc/cinder/rootwrap.d',
parser['DEFAULT']['filters_path'])
self.assertEqual('/.venv/bin,/dir1,/dir2',
parser['DEFAULT']['exec_dirs'])
mock_open.assert_called_once_with(venv_wrap_cfg, 'w')
parser.write.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
self.assertEqual('mysudo wrapper', utils.get_root_helper())
mock_helper.assert_called_once_with()
mock_ctxt_init.assert_called_once_with(root_helper=['mysudo'])
self.assertIs(original_helper_func, utils.get_root_helper)
self.assertEqual(venv_wrap_cfg, cfg.CONF.rootwrap_config)
self._check_privsep_root_helper_opt(is_changed=True)
finally:
cfg.CONF.rootwrap_config = original_rootwrap_config
utils.get_root_helper = original_helper_func
@mock.patch('configparser.ConfigParser.read', mock.Mock())
@mock.patch('configparser.ConfigParser.write', mock.Mock())
@mock.patch('cinderlib.cinderlib.utils.__file__', '/opt/stack/cinder')
@mock.patch('cinderlib.cinderlib.os.environ', {'VIRTUAL_ENV': '/.venv'})
@mock.patch('shutil.copytree')
@mock.patch('glob.glob',)
@mock.patch('cinderlib.cinderlib.open')
@mock.patch('os.path.exists', return_value=False)
@mock.patch('oslo_privsep.priv_context.init')
def test__set_priv_helper_venv_editable_no_sudo(self, mock_ctxt_init,
mock_exists, mock_open,
mock_glob, mock_copy):
link_file = '/.venv/lib/python3.7/site-packages/cinder.egg-link'
cinder_source_path = '/opt/stack/cinder'
link_file_contents = cinder_source_path + '\n.'
mock_glob.return_value = [link_file]
open_fd = mock_open.return_value.__enter__.return_value
open_fd.read.return_value = link_file_contents
file_contents = {'DEFAULT': {'filters_path': '/etc/cinder/rootwrap.d',
'exec_dirs': '/dir1,/dir2'}}
parser = configparser.ConfigParser()
venv_wrap_cfg = '/.venv/etc/cinder/rootwrap.conf'
original_helper_func = utils.get_root_helper
original_rootwrap_config = cfg.CONF.rootwrap_config
# Not using set_override because it's not working as it should
default_wrap_cfg = '/etc/cinder/rootwrap.conf'
cfg.CONF.rootwrap_config = default_wrap_cfg
try:
with mock.patch('cinder.utils.get_root_helper',
return_value='sudo wrapper') as mock_helper, \
mock.patch.dict(parser, file_contents, clear=True), \
mock.patch('configparser.ConfigParser') as mock_parser:
mock_parser.return_value = parser
self.backend._set_priv_helper('mysudo')
mock_glob.assert_called_once_with(
'/.venv/lib/python*/site-packages/cinder.egg-link')
self.assertEqual(2, mock_exists.call_count)
mock_exists.assert_has_calls([mock.call(default_wrap_cfg),
mock.call(venv_wrap_cfg)])
self.assertEqual(2, mock_open.call_count)
mock_open.assert_any_call(link_file, 'r')
mock_copy.assert_called_once_with(
cinder_source_path + '/etc/cinder', '/.venv/etc/cinder')
mock_parser.assert_called_once_with()
parser.read.assert_called_once_with(venv_wrap_cfg)
self.assertEqual('/.venv/etc/cinder/rootwrap.d',
parser['DEFAULT']['filters_path'])
self.assertEqual('/.venv/bin,/dir1,/dir2',
parser['DEFAULT']['exec_dirs'])
mock_open.assert_any_call(venv_wrap_cfg, 'w')
parser.write.assert_called_once_with(open_fd)
self.assertEqual('mysudo wrapper', utils.get_root_helper())
mock_helper.assert_called_once_with()
mock_ctxt_init.assert_called_once_with(root_helper=['mysudo'])
self.assertIs(original_helper_func, utils.get_root_helper)
self.assertEqual(venv_wrap_cfg, cfg.CONF.rootwrap_config)
self._check_privsep_root_helper_opt(is_changed=True)
finally:
cfg.CONF.rootwrap_config = original_rootwrap_config
utils.get_root_helper = original_helper_func

View File

@ -1,85 +0,0 @@
# Copyright (c) 2021, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinderlib import objects
from cinderlib.tests.unit import base
class TestSerialization(base.BaseTest):
def test_vol_to_and_from(self):
vol = objects.Volume(self.backend, size=10)
snap = objects.Snapshot(vol, name='disk')
# Associate the snapshot with the volume
vol._snapshots = None
with mock.patch.object(vol.persistence, 'get_snapshots',
return_value=[snap]):
vol.snapshots
self.assertEqual(1, len(vol.snapshots))
json_data = vol.json
# Confirm vol.json property is equivalent to the non simplified version
self.assertEqual(json_data, vol.to_json(simplified=False))
vol2 = objects.Volume.load(json_data)
# Check snapshots are recovered as well
self.assertEqual(1, len(vol2.snapshots))
self.assertEqual(vol.json, vol2.json)
def test_snap_to_and_from(self):
vol = objects.Volume(self.backend, size=10)
snap = objects.Snapshot(vol, name='disk')
json_data = snap.json
# Confirm vol.json property is equivalent to the non simplified version
self.assertEqual(json_data, snap.to_json(simplified=False))
snap2 = objects.Snapshot.load(json_data)
self.assertEqual(snap.json, snap2.json)
def test_conn_to_and_from(self):
vol = objects.Volume(self.backend, size=1, name='disk')
conn = objects.Connection(self.backend, volume=vol, connector={},
connection_info={'conn': {'data': {}}})
json_data = conn.json
# Confirm vol.json property is equivalent to the non simplified version
self.assertEqual(json_data, conn.to_json(simplified=False))
conn2 = objects.Connection.load(json_data)
self.assertEqual(conn.json, conn2.json)
def test_datetime_subsecond(self):
"""Test microsecond serialization of DateTime fields."""
microsecond = 123456
vol = objects.Volume(self.backend, size=1, name='disk')
vol._ovo.created_at = vol.created_at.replace(microsecond=microsecond)
created_at = vol.created_at
json_data = vol.json
vol2 = objects.Volume.load(json_data)
self.assertEqual(created_at, vol2.created_at)
self.assertEqual(microsecond, vol2.created_at.microsecond)
def test_datetime_non_subsecond(self):
"""Test rehydration of DateTime field without microsecond."""
vol = objects.Volume(self.backend, size=1, name='disk')
vol._ovo.created_at = vol.created_at.replace(microsecond=123456)
with mock.patch.object(vol._ovo.fields['created_at'], 'to_primitive',
return_value='2021-06-28T17:14:59Z'):
json_data = vol.json
vol2 = objects.Volume.load(json_data)
self.assertEqual(0, vol2.created_at.microsecond)

View File

@ -1,34 +0,0 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import cinderlib
from cinderlib.persistence import base
def get_mock_persistence():
return mock.MagicMock(spec=base.PersistenceDriverBase)
class FakeBackend(cinderlib.Backend):
def __init__(self, *args, **kwargs):
driver_name = kwargs.get('volume_backend_name', 'fake')
cinderlib.Backend.backends[driver_name] = self
self._driver_cfg = {'volume_backend_name': driver_name}
self.driver = mock.Mock()
self.driver.persistence = cinderlib.Backend.persistence
self._pool_names = (driver_name,)
self._volumes = []

View File

@ -1,31 +0,0 @@
# Copyright (c) 2019, Red Hat, Inc.
# All Rights Reserved.
#