From 81b1dbebdfadace7036ee82765c0e636685c948a Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Thu, 22 Jan 2015 19:09:30 +0000 Subject: [PATCH] Created the Brick library from Cinder This is the external Brick library that came from Cinder's codebase. It's intended to be used as a standalone library and subproject of Cinder. --- .coveragerc | 7 + .gitignore | 53 ++ .gitreview | 4 + .mailmap | 3 + .testr.conf | 7 + CONTRIBUTING.rst | 16 + HACKING.rst | 4 + LICENSE | 176 ++++ MANIFEST.in | 6 + README.rst | 15 + babel.cfg | 2 + brick/README.txt | 9 + brick/__init__.py | 0 brick/exception.py | 120 +++ brick/executor.py | 34 + brick/i18n.py | 38 + brick/initiator/__init__.py | 0 brick/initiator/connector.py | 1050 +++++++++++++++++++++++ brick/initiator/host_driver.py | 30 + brick/initiator/linuxfc.py | 140 +++ brick/initiator/linuxscsi.py | 193 +++++ brick/openstack/__init__.py | 0 brick/openstack/common/__init__.py | 0 brick/openstack/common/_i18n.py | 45 + brick/openstack/common/local.py | 45 + brick/openstack/common/log.py | 718 ++++++++++++++++ brick/openstack/common/loopingcall.py | 147 ++++ brick/remotefs/__init__.py | 0 brick/remotefs/remotefs.py | 174 ++++ brick/tests/__init__.py | 0 brick/tests/base.py | 159 ++++ brick/tests/initiator/__init__.py | 0 brick/tests/initiator/test_connector.py | 645 ++++++++++++++ brick/tests/initiator/test_linuxfc.py | 176 ++++ brick/tests/initiator/test_linuxscsi.py | 226 +++++ brick/tests/test_brick.py | 28 + brick/tests/test_exception.py | 59 ++ doc/source/conf.py | 75 ++ doc/source/contributing.rst | 4 + doc/source/index.rst | 50 ++ doc/source/installation.rst | 12 + doc/source/readme.rst | 1 + doc/source/usage.rst | 7 + openstack-common.conf | 10 + requirements.txt | 12 + setup.cfg | 56 ++ setup.py | 22 + test-requirements.txt | 14 + tox.ini | 62 ++ 49 files changed, 4654 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .gitreview create mode 100644 .mailmap create mode 100644 .testr.conf create mode 100644 CONTRIBUTING.rst create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100644 brick/README.txt create mode 100644 brick/__init__.py create mode 100644 brick/exception.py create mode 100644 brick/executor.py create mode 100644 brick/i18n.py create mode 100644 brick/initiator/__init__.py create mode 100644 brick/initiator/connector.py create mode 100644 brick/initiator/host_driver.py create mode 100644 brick/initiator/linuxfc.py create mode 100644 brick/initiator/linuxscsi.py create mode 100644 brick/openstack/__init__.py create mode 100644 brick/openstack/common/__init__.py create mode 100644 brick/openstack/common/_i18n.py create mode 100644 brick/openstack/common/local.py create mode 100644 brick/openstack/common/log.py create mode 100644 brick/openstack/common/loopingcall.py create mode 100644 brick/remotefs/__init__.py create mode 100644 brick/remotefs/remotefs.py create mode 100644 brick/tests/__init__.py create mode 100644 brick/tests/base.py create mode 100644 brick/tests/initiator/__init__.py create mode 100644 brick/tests/initiator/test_connector.py create mode 100644 brick/tests/initiator/test_linuxfc.py create mode 100644 brick/tests/initiator/test_linuxscsi.py create mode 100644 brick/tests/test_brick.py create mode 100644 brick/tests/test_exception.py create mode 100755 doc/source/conf.py create mode 100644 doc/source/contributing.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/installation.rst create mode 100644 doc/source/readme.rst create mode 100644 doc/source/usage.rst create mode 100644 openstack-common.conf create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100755 setup.py create mode 100644 test-requirements.txt create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..d10d4ed96 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = brick +omit = brick/tests/*,brick/openstack/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..8a3c70429 --- /dev/null +++ b/.gitignore @@ -0,0 +1,53 @@ +*.py[cod] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox +nosetests.xml +.testrepository +.venv + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# Complexity +output/*.html +output/*/index.html + +# Sphinx +doc/build + +# pbr generates these +AUTHORS +ChangeLog + +# Editors +*~ +.*.swp +.*sw? diff --git a/.gitreview b/.gitreview new file mode 100644 index 000000000..7154b2b61 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/brick.git diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..516ae6fe0 --- /dev/null +++ b/.mailmap @@ -0,0 +1,3 @@ +# Format is: +# +# diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 000000000..6d83b3c4e --- /dev/null +++ b/.testr.conf @@ -0,0 +1,7 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 000000000..b7cc8ec88 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,16 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in this page: + + http://docs.openstack.org/infra/manual/developers.html + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/brick diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 000000000..d53997786 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,4 @@ +brick Style Commandments +=============================================== + +Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..c978a52da --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..1fae2498e --- /dev/null +++ b/README.rst @@ -0,0 +1,15 @@ +=============================== +brick +=============================== + +OpenStack Cinder brick library for managing local volume attaches + +* Free software: Apache license +* Documentation: http://docs.openstack.org/developer/brick +* Source: http://git.openstack.org/cgit/openstack/brick +* Bugs: http://bugs.launchpad.net/cinder + +Features +-------- + +* TODO diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000..15cd6cb76 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/brick/README.txt b/brick/README.txt new file mode 100644 index 000000000..bc26cd11b --- /dev/null +++ b/brick/README.txt @@ -0,0 +1,9 @@ +Brick is a new library that currently is maintained in Cinder for +the Havana release. It will eventually be moved external to Cinder, +possibly oslo, or pypi. Any defects found in Brick, should be submitted +against Cinder and fixed there, then pulled into other projects that +are using brick. + +* Brick is used outside of Cinder and therefore + cannot have any dependencies on Cinder and/or + it's database. diff --git a/brick/__init__.py b/brick/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/exception.py b/brick/exception.py new file mode 100644 index 000000000..7619dc3e5 --- /dev/null +++ b/brick/exception.py @@ -0,0 +1,120 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Exceptions for the Brick library.""" + +from brick.i18n import _ +from brick.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class BrickException(Exception): + """Base Brick Exception + + To correctly use this class, inherit from it and define + a 'msg_fmt' property. That msg_fmt will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.message % kwargs + + except Exception: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + msg = (_("Exception in string format operation. msg='%s'") + % self.message) + LOG.exception(msg) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + + # at least get the core message out if something happened + message = self.message + + # Put the message in 'msg' so that we can access it. If we have it in + # message it will be overshadowed by the class' message attribute + self.msg = message + super(BrickException, self).__init__(message) + + def __unicode__(self): + return unicode(self.msg) + + +class NotFound(BrickException): + message = _("Resource could not be found.") + code = 404 + safe = True + + +class Invalid(BrickException): + message = _("Unacceptable parameters.") + code = 400 + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class NoFibreChannelHostsFound(BrickException): + message = _("We are unable to locate any Fibre Channel devices.") + + +class NoFibreChannelVolumeDeviceFound(BrickException): + message = _("Unable to find a Fibre Channel volume device.") + + +class VolumeDeviceNotFound(BrickException): + message = _("Volume device not found at %(device)s.") + + +class VolumeGroupNotFound(BrickException): + message = _('Unable to find Volume Group: %(vg_name)s') + + +class VolumeGroupCreationFailed(BrickException): + message = _('Failed to create Volume Group: %(vg_name)s') + + +class ISCSITargetCreateFailed(BrickException): + message = _("Failed to create iscsi target for volume %(volume_id)s.") + + +class ISCSITargetRemoveFailed(BrickException): + message = _("Failed to remove iscsi target for volume %(volume_id)s.") + + +class ISCSITargetAttachFailed(BrickException): + message = _("Failed to attach iSCSI target for volume %(volume_id)s.") + + +class ProtocolNotSupported(BrickException): + message = _("Connect to volume via protocol %(protocol)s not supported.") diff --git a/brick/executor.py b/brick/executor.py new file mode 100644 index 000000000..fe82b8826 --- /dev/null +++ b/brick/executor.py @@ -0,0 +1,34 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic exec utility that allows us to set the + execute and root_helper attributes for putils. + Some projects need their own execute wrapper + and root_helper settings, so this provides that hook. +""" + +from oslo_concurrency import processutils as putils + + +class Executor(object): + def __init__(self, root_helper, execute=putils.execute, + *args, **kwargs): + self.set_execute(execute) + self.set_root_helper(root_helper) + + def set_execute(self, execute): + self._execute = execute + + def set_root_helper(self, helper): + self._root_helper = helper diff --git a/brick/i18n.py b/brick/i18n.py new file mode 100644 index 000000000..c97378335 --- /dev/null +++ b/brick/i18n.py @@ -0,0 +1,38 @@ +# Copyright 2014 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html . + +""" + +from oslo import i18n + +DOMAIN = 'brick' + +_translators = i18n.TranslatorFactory(domain=DOMAIN) + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical diff --git a/brick/initiator/__init__.py b/brick/initiator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/initiator/connector.py b/brick/initiator/connector.py new file mode 100644 index 000000000..975a00735 --- /dev/null +++ b/brick/initiator/connector.py @@ -0,0 +1,1050 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import socket +import time + +from oslo_concurrency import lockutils +from oslo_concurrency import processutils as putils + +from brick import exception +from brick import executor +from brick.initiator import host_driver +from brick.initiator import linuxfc +from brick.initiator import linuxscsi +from brick.remotefs import remotefs +from brick.i18n import _, _LE, _LW +from brick.openstack.common import log as logging +from brick.openstack.common import loopingcall + +LOG = logging.getLogger(__name__) + +synchronized = lockutils.synchronized_with_prefix('brick-') +DEVICE_SCAN_ATTEMPTS_DEFAULT = 3 + + +def get_connector_properties(root_helper, my_ip): + """Get the connection properties for all protocols.""" + + iscsi = ISCSIConnector(root_helper=root_helper) + fc = linuxfc.LinuxFibreChannel(root_helper=root_helper) + + props = {} + props['ip'] = my_ip + props['host'] = socket.gethostname() + initiator = iscsi.get_initiator() + if initiator: + props['initiator'] = initiator + wwpns = fc.get_fc_wwpns() + if wwpns: + props['wwpns'] = wwpns + wwnns = fc.get_fc_wwnns() + if wwnns: + props['wwnns'] = wwnns + + return props + + +class InitiatorConnector(executor.Executor): + def __init__(self, root_helper, driver=None, + execute=putils.execute, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + super(InitiatorConnector, self).__init__(root_helper, execute=execute, + *args, **kwargs) + if not driver: + driver = host_driver.HostDriver() + self.set_driver(driver) + self.device_scan_attempts = device_scan_attempts + + def set_driver(self, driver): + """The driver is used to find used LUNs.""" + + self.driver = driver + + @staticmethod + def factory(protocol, root_helper, driver=None, + execute=putils.execute, use_multipath=False, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + """Build a Connector object based upon protocol.""" + LOG.debug("Factory for %s" % protocol) + protocol = protocol.upper() + if protocol == "ISCSI": + return ISCSIConnector(root_helper=root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "ISER": + return ISERConnector(root_helper=root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "FIBRE_CHANNEL": + return FibreChannelConnector(root_helper=root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + elif protocol == "AOE": + return AoEConnector(root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "NFS" or protocol == "GLUSTERFS": + return RemoteFsConnector(mount_type=protocol.lower(), + root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "LOCAL": + return LocalConnector(root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "HUAWEISDSHYPERVISOR": + return HuaweiStorHyperConnector(root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + else: + msg = (_("Invalid InitiatorConnector protocol " + "specified %(protocol)s") % + dict(protocol=protocol)) + raise ValueError(msg) + + def check_valid_device(self, path, run_as_root=True): + cmd = ('dd', 'if=%(path)s' % {"path": path}, + 'of=/dev/null', 'count=1') + out, info = None, None + try: + out, info = self._execute(*cmd, run_as_root=run_as_root, + root_helper=self._root_helper) + except putils.ProcessExecutionError as e: + LOG.error(_LE("Failed to access the device on the path " + "%(path)s: %(error)s %(info)s.") % + {"path": path, "error": e.stderr, + "info": info}) + return False + # If the info is none, the path does not exist. + if info is None: + return False + return True + + def connect_volume(self, connection_properties): + """Connect to a volume. + + The connection_properties describes the information needed by + the specific protocol to use to make the connection. + """ + raise NotImplementedError() + + def disconnect_volume(self, connection_properties, device_info): + """Disconnect a volume from the local host. + + The connection_properties are the same as from connect_volume. + The device_info is returned from connect_volume. + """ + raise NotImplementedError() + + +class ISCSIConnector(InitiatorConnector): + """Connector class to attach/detach iSCSI volumes.""" + + def __init__(self, root_helper, driver=None, + execute=putils.execute, use_multipath=False, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute) + super(ISCSIConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + self.use_multipath = use_multipath + + def set_execute(self, execute): + super(ISCSIConnector, self).set_execute(execute) + self._linuxscsi.set_execute(execute) + + @synchronized('connect_volume') + def connect_volume(self, connection_properties): + """Attach the volume to instance_name. + + connection_properties for iSCSI must include: + target_portal - ip and optional port + target_iqn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + + device_info = {'type': 'block'} + + if self.use_multipath: + #multipath installed, discovering other targets if available + target_portal = connection_properties['target_portal'] + out = self._run_iscsiadm_bare(['-m', + 'discovery', + '-t', + 'sendtargets', + '-p', + target_portal], + check_exit_code=[0, 255])[0] \ + or "" + + for ip, iqn in self._get_target_portals_from_iscsiadm_output(out): + props = connection_properties.copy() + props['target_portal'] = ip + props['target_iqn'] = iqn + self._connect_to_iscsi_portal(props) + + self._rescan_iscsi() + else: + self._connect_to_iscsi_portal(connection_properties) + + host_device = self._get_device_path(connection_properties) + + # The /dev/disk/by-path/... node is not always present immediately + # TODO(justinsb): This retry-with-delay is a pattern, move to utils? + tries = 0 + while not os.path.exists(host_device): + if tries >= self.device_scan_attempts: + raise exception.VolumeDeviceNotFound(device=host_device) + + LOG.warn(_LW("ISCSI volume not yet found at: %(host_device)s. " + "Will rescan & retry. Try number: %(tries)s"), + {'host_device': host_device, + 'tries': tries}) + + # The rescan isn't documented as being necessary(?), but it helps + self._run_iscsiadm(connection_properties, ("--rescan",)) + + tries = tries + 1 + if not os.path.exists(host_device): + time.sleep(tries ** 2) + + if tries != 0: + LOG.debug("Found iSCSI node %(host_device)s " + "(after %(tries)s rescans)", + {'host_device': host_device, 'tries': tries}) + + if self.use_multipath: + #we use the multipath device instead of the single path device + self._rescan_multipath() + multipath_device = self._get_multipath_device_name(host_device) + if multipath_device is not None: + host_device = multipath_device + + device_info['path'] = host_device + return device_info + + @synchronized('connect_volume') + def disconnect_volume(self, connection_properties, device_info): + """Detach the volume from instance_name. + + connection_properties for iSCSI must include: + target_portal - IP and optional port + target_iqn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + # Moved _rescan_iscsi and _rescan_multipath + # from _disconnect_volume_multipath_iscsi to here. + # Otherwise, if we do rescan after _linuxscsi.remove_multipath_device + # but before logging out, the removed devices under /dev/disk/by-path + # will reappear after rescan. + self._rescan_iscsi() + host_device = self._get_device_path(connection_properties) + multipath_device = None + if self.use_multipath: + self._rescan_multipath() + multipath_device = self._get_multipath_device_name(host_device) + if multipath_device: + device_realpath = os.path.realpath(host_device) + self._linuxscsi.remove_multipath_device(device_realpath) + return self._disconnect_volume_multipath_iscsi( + connection_properties, multipath_device) + + # remove the device from the scsi subsystem + # this eliminates any stale entries until logout + dev_name = self._linuxscsi.get_name_from_path(host_device) + if dev_name: + self._linuxscsi.remove_scsi_device(dev_name) + + # NOTE(vish): Only disconnect from the target if no luns from the + # target are in use. + device_prefix = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-" % + {'portal': connection_properties['target_portal'], + 'iqn': connection_properties['target_iqn']}) + devices = self.driver.get_all_block_devices() + devices = [dev for dev in devices if dev.startswith(device_prefix) + and os.path.exists(dev)] + + if not devices: + self._disconnect_from_iscsi_portal(connection_properties) + + def _get_device_path(self, connection_properties): + path = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-%(lun)s" % + {'portal': connection_properties['target_portal'], + 'iqn': connection_properties['target_iqn'], + 'lun': connection_properties.get('target_lun', 0)}) + return path + + def get_initiator(self): + """Secure helper to read file as root.""" + file_path = '/etc/iscsi/initiatorname.iscsi' + try: + lines, _err = self._execute('cat', file_path, run_as_root=True, + root_helper=self._root_helper) + + for l in lines.split('\n'): + if l.startswith('InitiatorName='): + return l[l.index('=') + 1:].strip() + except putils.ProcessExecutionError: + msg = (_("Could not find the iSCSI Initiator File %s") + % file_path) + LOG.warn(msg) + return None + + def _run_iscsiadm(self, connection_properties, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', + connection_properties['target_iqn'], + '-p', + connection_properties['target_portal'], + *iscsi_command, run_as_root=True, + root_helper=self._root_helper, + check_exit_code=check_exit_code) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, connection_properties, property_key, + property_value, **kwargs): + iscsi_command = ('--op', 'update', '-n', property_key, + '-v', property_value) + return self._run_iscsiadm(connection_properties, iscsi_command, + **kwargs) + + def _get_target_portals_from_iscsiadm_output(self, output): + # return both portals and iqns + return [line.split() for line in output.splitlines()] + + def _disconnect_volume_multipath_iscsi(self, connection_properties, + multipath_name): + """This removes a multipath device and it's LUNs.""" + LOG.debug("Disconnect multipath device %s" % multipath_name) + block_devices = self.driver.get_all_block_devices() + devices = [] + for dev in block_devices: + if os.path.exists(dev): + if "/mapper/" in dev: + devices.append(dev) + else: + mpdev = self._get_multipath_device_name(dev) + if mpdev: + devices.append(mpdev) + + # Do a discovery to find all targets. + # Targets for multiple paths for the same multipath device + # may not be the same. + out = self._run_iscsiadm_bare(['-m', + 'discovery', + '-t', + 'sendtargets', + '-p', + connection_properties['target_portal']], + check_exit_code=[0, 255])[0] \ + or "" + + ips_iqns = self._get_target_portals_from_iscsiadm_output(out) + + if not devices: + # disconnect if no other multipath devices + self._disconnect_mpath(connection_properties, ips_iqns) + return + + # Get a target for all other multipath devices + other_iqns = [self._get_multipath_iqn(device) + for device in devices] + # Get all the targets for the current multipath device + current_iqns = [iqn for ip, iqn in ips_iqns] + + in_use = False + for current in current_iqns: + if current in other_iqns: + in_use = True + break + + # If no other multipath device attached has the same iqn + # as the current device + if not in_use: + # disconnect if no other multipath devices with same iqn + self._disconnect_mpath(connection_properties, ips_iqns) + return + + # else do not disconnect iscsi portals, + # as they are used for other luns + return + + def _connect_to_iscsi_portal(self, connection_properties): + # NOTE(vish): If we are on the same host as nova volume, the + # discovery makes the target so we don't need to + # run --op new. Therefore, we check to see if the + # target exists, and if we get 255 (Not Found), then + # we run --op new. This will also happen if another + # volume is using the same target. + try: + self._run_iscsiadm(connection_properties, ()) + except putils.ProcessExecutionError as exc: + # iscsiadm returns 21 for "No records found" after version 2.0-871 + if exc.exit_code in [21, 255]: + self._run_iscsiadm(connection_properties, ('--op', 'new')) + else: + raise + + if connection_properties.get('auth_method'): + self._iscsiadm_update(connection_properties, + "node.session.auth.authmethod", + connection_properties['auth_method']) + self._iscsiadm_update(connection_properties, + "node.session.auth.username", + connection_properties['auth_username']) + self._iscsiadm_update(connection_properties, + "node.session.auth.password", + connection_properties['auth_password']) + + #duplicate logins crash iscsiadm after load, + #so we scan active sessions to see if the node is logged in. + out = self._run_iscsiadm_bare(["-m", "session"], + run_as_root=True, + check_exit_code=[0, 1, 21])[0] or "" + + portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]} + for p in out.splitlines() if p.startswith("tcp:")] + + stripped_portal = connection_properties['target_portal'].split(",")[0] + if len(portals) == 0 or len([s for s in portals + if stripped_portal == + s['portal'].split(",")[0] + and + s['iqn'] == + connection_properties['target_iqn']] + ) == 0: + try: + self._run_iscsiadm(connection_properties, + ("--login",), + check_exit_code=[0, 255]) + except putils.ProcessExecutionError as err: + #as this might be one of many paths, + #only set successful logins to startup automatically + if err.exit_code in [15]: + self._iscsiadm_update(connection_properties, + "node.startup", + "automatic") + return + + self._iscsiadm_update(connection_properties, + "node.startup", + "automatic") + + def _disconnect_from_iscsi_portal(self, connection_properties): + self._iscsiadm_update(connection_properties, "node.startup", "manual", + check_exit_code=[0, 21, 255]) + self._run_iscsiadm(connection_properties, ("--logout",), + check_exit_code=[0, 21, 255]) + self._run_iscsiadm(connection_properties, ('--op', 'delete'), + check_exit_code=[0, 21, 255]) + + def _get_multipath_device_name(self, single_path_device): + device = os.path.realpath(single_path_device) + out = self._run_multipath(['-ll', + device], + check_exit_code=[0, 1])[0] + mpath_line = [line for line in out.splitlines() + if "scsi_id" not in line] # ignore udev errors + if len(mpath_line) > 0 and len(mpath_line[0]) > 0: + return "/dev/mapper/%s" % mpath_line[0].split(" ")[0] + + return None + + def _get_iscsi_devices(self): + try: + devices = list(os.walk('/dev/disk/by-path'))[0][-1] + except IndexError: + return [] + return [entry for entry in devices if entry.startswith("ip-")] + + def _disconnect_mpath(self, connection_properties, ips_iqns): + for ip, iqn in ips_iqns: + props = connection_properties.copy() + props['target_portal'] = ip + props['target_iqn'] = iqn + self._disconnect_from_iscsi_portal(props) + + self._rescan_multipath() + + def _get_multipath_iqn(self, multipath_device): + entries = self._get_iscsi_devices() + for entry in entries: + entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry) + entry_multipath = self._get_multipath_device_name(entry_real_path) + if entry_multipath == multipath_device: + return entry.split("iscsi-")[1].split("-lun")[0] + return None + + def _run_iscsiadm_bare(self, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('iscsiadm', + *iscsi_command, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=check_exit_code) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _run_multipath(self, multipath_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('multipath', + *multipath_command, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=check_exit_code) + LOG.debug("multipath %s: stdout=%s stderr=%s" % + (multipath_command, out, err)) + return (out, err) + + def _rescan_iscsi(self): + self._run_iscsiadm_bare(('-m', 'node', '--rescan'), + check_exit_code=[0, 1, 21, 255]) + self._run_iscsiadm_bare(('-m', 'session', '--rescan'), + check_exit_code=[0, 1, 21, 255]) + + def _rescan_multipath(self): + self._run_multipath('-r', check_exit_code=[0, 1, 21]) + + +class ISERConnector(ISCSIConnector): + + def _get_device_path(self, iser_properties): + return ("/dev/disk/by-path/ip-%s-iser-%s-lun-%s" % + (iser_properties['target_portal'], + iser_properties['target_iqn'], + iser_properties.get('target_lun', 0))) + + +class FibreChannelConnector(InitiatorConnector): + """Connector class to attach/detach Fibre Channel volumes.""" + + def __init__(self, root_helper, driver=None, + execute=putils.execute, use_multipath=False, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute) + self._linuxfc = linuxfc.LinuxFibreChannel(root_helper, execute) + super(FibreChannelConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + self.use_multipath = use_multipath + + def set_execute(self, execute): + super(FibreChannelConnector, self).set_execute(execute) + self._linuxscsi.set_execute(execute) + self._linuxfc.set_execute(execute) + + @synchronized('connect_volume') + def connect_volume(self, connection_properties): + """Attach the volume to instance_name. + + connection_properties for Fibre Channel must include: + target_portal - ip and optional port + target_iqn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + LOG.debug("execute = %s" % self._execute) + device_info = {'type': 'block'} + + ports = connection_properties['target_wwn'] + wwns = [] + # we support a list of wwns or a single wwn + if isinstance(ports, list): + for wwn in ports: + wwns.append(str(wwn)) + elif isinstance(ports, basestring): + wwns.append(str(ports)) + + # We need to look for wwns on every hba + # because we don't know ahead of time + # where they will show up. + hbas = self._linuxfc.get_fc_hbas_info() + host_devices = [] + for hba in hbas: + pci_num = self._get_pci_num(hba) + if pci_num is not None: + for wwn in wwns: + target_wwn = "0x%s" % wwn.lower() + host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" % + (pci_num, + target_wwn, + connection_properties.get( + 'target_lun', 0))) + host_devices.append(host_device) + + if len(host_devices) == 0: + # this is empty because we don't have any FC HBAs + msg = _("We are unable to locate any Fibre Channel devices") + LOG.warn(msg) + raise exception.NoFibreChannelHostsFound() + + # The /dev/disk/by-path/... node is not always present immediately + # We only need to find the first device. Once we see the first device + # multipath will have any others. + def _wait_for_device_discovery(host_devices): + tries = self.tries + for device in host_devices: + LOG.debug("Looking for Fibre Channel dev %(device)s", + {'device': device}) + if os.path.exists(device): + self.host_device = device + # get the /dev/sdX device. This is used + # to find the multipath device. + self.device_name = os.path.realpath(device) + raise loopingcall.LoopingCallDone() + + if self.tries >= self.device_scan_attempts: + msg = _("Fibre Channel volume device not found.") + LOG.error(msg) + raise exception.NoFibreChannelVolumeDeviceFound() + + LOG.warn(_LW("Fibre volume not yet found. " + "Will rescan & retry. Try number: %(tries)s"), + {'tries': tries}) + + self._linuxfc.rescan_hosts(hbas) + self.tries = self.tries + 1 + + self.host_device = None + self.device_name = None + self.tries = 0 + timer = loopingcall.FixedIntervalLoopingCall( + _wait_for_device_discovery, host_devices) + timer.start(interval=2).wait() + + tries = self.tries + if self.host_device is not None and self.device_name is not None: + LOG.debug("Found Fibre Channel volume %(name)s " + "(after %(tries)s rescans)", + {'name': self.device_name, 'tries': tries}) + + # see if the new drive is part of a multipath + # device. If so, we'll use the multipath device. + if self.use_multipath: + mdev_info = self._linuxscsi.find_multipath_device(self.device_name) + if mdev_info is not None: + LOG.debug("Multipath device discovered %(device)s" + % {'device': mdev_info['device']}) + device_path = mdev_info['device'] + devices = mdev_info['devices'] + device_info['multipath_id'] = mdev_info['id'] + else: + # we didn't find a multipath device. + # so we assume the kernel only sees 1 device + device_path = self.host_device + dev_info = self._linuxscsi.get_device_info(self.device_name) + devices = [dev_info] + else: + device_path = self.host_device + dev_info = self._linuxscsi.get_device_info(self.device_name) + devices = [dev_info] + + device_info['path'] = device_path + device_info['devices'] = devices + return device_info + + @synchronized('connect_volume') + def disconnect_volume(self, connection_properties, device_info): + """Detach the volume from instance_name. + + connection_properties for Fibre Channel must include: + target_wwn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + devices = device_info['devices'] + + # If this is a multipath device, we need to search again + # and make sure we remove all the devices. Some of them + # might not have shown up at attach time. + if self.use_multipath and 'multipath_id' in device_info: + multipath_id = device_info['multipath_id'] + mdev_info = self._linuxscsi.find_multipath_device(multipath_id) + devices = mdev_info['devices'] + LOG.debug("devices to remove = %s" % devices) + self._linuxscsi.flush_multipath_device(multipath_id) + + # There may have been more than 1 device mounted + # by the kernel for this volume. We have to remove + # all of them + for device in devices: + self._linuxscsi.remove_scsi_device(device["device"]) + + def _get_pci_num(self, hba): + # NOTE(walter-boring) + # device path is in format of + # /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2 + # sometimes an extra entry exists before the host2 value + # we always want the value prior to the host2 value + pci_num = None + if hba is not None: + if "device_path" in hba: + index = 0 + device_path = hba['device_path'].split('/') + for value in device_path: + if value.startswith('host'): + break + index = index + 1 + + if index > 0: + pci_num = device_path[index - 1] + + return pci_num + + +class AoEConnector(InitiatorConnector): + """Connector class to attach/detach AoE volumes.""" + def __init__(self, root_helper, driver=None, + execute=putils.execute, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + super(AoEConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + + def _get_aoe_info(self, connection_properties): + shelf = connection_properties['target_shelf'] + lun = connection_properties['target_lun'] + aoe_device = 'e%(shelf)s.%(lun)s' % {'shelf': shelf, + 'lun': lun} + aoe_path = '/dev/etherd/%s' % (aoe_device) + return aoe_device, aoe_path + + @lockutils.synchronized('aoe_control', 'aoe-') + def connect_volume(self, connection_properties): + """Discover and attach the volume. + + connection_properties for AoE must include: + target_shelf - shelf id of volume + target_lun - lun id of volume + """ + aoe_device, aoe_path = self._get_aoe_info(connection_properties) + + device_info = { + 'type': 'block', + 'device': aoe_device, + 'path': aoe_path, + } + + if os.path.exists(aoe_path): + self._aoe_revalidate(aoe_device) + else: + self._aoe_discover() + + waiting_status = {'tries': 0} + + #NOTE(jbr_): Device path is not always present immediately + def _wait_for_discovery(aoe_path): + if os.path.exists(aoe_path): + raise loopingcall.LoopingCallDone + + if waiting_status['tries'] >= self.device_scan_attempts: + raise exception.VolumeDeviceNotFound(device=aoe_path) + + LOG.warn(_LW("AoE volume not yet found at: %(path)s. " + "Try number: %(tries)s"), + {'path': aoe_device, + 'tries': waiting_status['tries']}) + + self._aoe_discover() + waiting_status['tries'] += 1 + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_discovery, + aoe_path) + timer.start(interval=2).wait() + + if waiting_status['tries']: + LOG.debug("Found AoE device %(path)s " + "(after %(tries)s rediscover)", + {'path': aoe_path, + 'tries': waiting_status['tries']}) + + return device_info + + @lockutils.synchronized('aoe_control', 'aoe-') + def disconnect_volume(self, connection_properties, device_info): + """Detach and flush the volume. + + connection_properties for AoE must include: + target_shelf - shelf id of volume + target_lun - lun id of volume + """ + aoe_device, aoe_path = self._get_aoe_info(connection_properties) + + if os.path.exists(aoe_path): + self._aoe_flush(aoe_device) + + def _aoe_discover(self): + (out, err) = self._execute('aoe-discover', + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=0) + + LOG.debug('aoe-discover: stdout=%(out)s stderr%(err)s' % + {'out': out, 'err': err}) + + def _aoe_revalidate(self, aoe_device): + (out, err) = self._execute('aoe-revalidate', + aoe_device, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=0) + + LOG.debug('aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s' % + {'dev': aoe_device, 'out': out, 'err': err}) + + def _aoe_flush(self, aoe_device): + (out, err) = self._execute('aoe-flush', + aoe_device, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=0) + LOG.debug('aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s' % + {'dev': aoe_device, 'out': out, 'err': err}) + + +class RemoteFsConnector(InitiatorConnector): + """Connector class to attach/detach NFS and GlusterFS volumes.""" + + def __init__(self, mount_type, root_helper, driver=None, + execute=putils.execute, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + kwargs = kwargs or {} + conn = kwargs.get('conn') + if conn: + mount_point_base = conn.get('mount_point_base') + if mount_type.lower() == 'nfs': + kwargs['nfs_mount_point_base'] =\ + kwargs.get('nfs_mount_point_base') or\ + mount_point_base + elif mount_type.lower() == 'glusterfs': + kwargs['glusterfs_mount_point_base'] =\ + kwargs.get('glusterfs_mount_point_base') or\ + mount_point_base + else: + LOG.warn(_LW("Connection details not present." + " RemoteFsClient may not initialize properly.")) + self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper, + execute=execute, + *args, **kwargs) + super(RemoteFsConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + + def set_execute(self, execute): + super(RemoteFsConnector, self).set_execute(execute) + self._remotefsclient.set_execute(execute) + + def connect_volume(self, connection_properties): + """Ensure that the filesystem containing the volume is mounted. + + connection_properties must include: + export - remote filesystem device (e.g. '172.18.194.100:/var/nfs') + name - file name within the filesystem + + connection_properties may optionally include: + options - options to pass to mount + """ + + mnt_flags = [] + if connection_properties.get('options'): + mnt_flags = connection_properties['options'].split() + + nfs_share = connection_properties['export'] + self._remotefsclient.mount(nfs_share, mnt_flags) + mount_point = self._remotefsclient.get_mount_point(nfs_share) + + path = mount_point + '/' + connection_properties['name'] + + return {'path': path} + + def disconnect_volume(self, connection_properties, device_info): + """No need to do anything to disconnect a volume in a filesystem.""" + + +class LocalConnector(InitiatorConnector): + """"Connector class to attach/detach File System backed volumes.""" + + def __init__(self, root_helper, driver=None, execute=putils.execute, + *args, **kwargs): + super(LocalConnector, self).__init__(root_helper, driver=driver, + execute=execute, *args, **kwargs) + + def connect_volume(self, connection_properties): + """Connect to a volume. + + connection_properties must include: + device_path - path to the volume to be connected + """ + if 'device_path' not in connection_properties: + msg = (_("Invalid connection_properties specified " + "no device_path attribute")) + raise ValueError(msg) + + device_info = {'type': 'local', + 'path': connection_properties['device_path']} + return device_info + + def disconnect_volume(self, connection_properties, device_info): + """Disconnect a volume from the local host.""" + pass + + +class HuaweiStorHyperConnector(InitiatorConnector): + """"Connector class to attach/detach SDSHypervisor volumes.""" + attached_success_code = 0 + has_been_attached_code = 50151401 + attach_mnid_done_code = 50151405 + vbs_unnormal_code = 50151209 + not_mount_node_code = 50155007 + iscliexist = True + + def __init__(self, root_helper, driver=None, execute=putils.execute, + *args, **kwargs): + self.cli_path = os.getenv('HUAWEISDSHYPERVISORCLI_PATH') + if not self.cli_path: + self.cli_path = '/usr/local/bin/sds/sds_cli' + LOG.debug("CLI path is not configured, using default %s." + % self.cli_path) + if not os.path.isfile(self.cli_path): + self.iscliexist = False + LOG.error(_LE('SDS CLI file not found, ' + 'HuaweiStorHyperConnector init failed.')) + super(HuaweiStorHyperConnector, self).__init__(root_helper, + driver=driver, + execute=execute, + *args, **kwargs) + + @synchronized('connect_volume') + def connect_volume(self, connection_properties): + """Connect to a volume.""" + LOG.debug("Connect_volume connection properties: %s." + % connection_properties) + out = self._attach_volume(connection_properties['volume_id']) + if not out or int(out['ret_code']) not in (self.attached_success_code, + self.has_been_attached_code, + self.attach_mnid_done_code): + msg = (_("Attach volume failed, " + "error code is %s") % out['ret_code']) + raise exception.BrickException(msg=msg) + out = self._query_attached_volume( + connection_properties['volume_id']) + if not out or int(out['ret_code']) != 0: + msg = _("query attached volume failed or volume not attached.") + raise exception.BrickException(msg=msg) + + device_info = {'type': 'block', + 'path': out['dev_addr']} + return device_info + + @synchronized('connect_volume') + def disconnect_volume(self, connection_properties, device_info): + """Disconnect a volume from the local host.""" + LOG.debug("Disconnect_volume: %s." % connection_properties) + out = self._detach_volume(connection_properties['volume_id']) + if not out or int(out['ret_code']) not in (self.attached_success_code, + self.vbs_unnormal_code, + self.not_mount_node_code): + msg = (_("Disconnect_volume failed, " + "error code is %s") % out['ret_code']) + raise exception.BrickException(msg=msg) + + def is_volume_connected(self, volume_name): + """Check if volume already connected to host""" + LOG.debug('Check if volume %s already connected to a host.' + % volume_name) + out = self._query_attached_volume(volume_name) + if out: + return int(out['ret_code']) == 0 + return False + + def _attach_volume(self, volume_name): + return self._cli_cmd('attach', volume_name) + + def _detach_volume(self, volume_name): + return self._cli_cmd('detach', volume_name) + + def _query_attached_volume(self, volume_name): + return self._cli_cmd('querydev', volume_name) + + def _cli_cmd(self, method, volume_name): + LOG.debug("Enter into _cli_cmd.") + if not self.iscliexist: + msg = _("SDS command line doesn't exist, " + "can't execute SDS command.") + raise exception.BrickException(msg=msg) + if not method or volume_name is None: + return + cmd = [self.cli_path, '-c', method, '-v', volume_name] + out, clilog = self._execute(*cmd, run_as_root=False, + root_helper=self._root_helper) + analyse_result = self._analyze_output(out) + LOG.debug('%(method)s volume returns %(analyse_result)s.' + % {'method': method, 'analyse_result': analyse_result}) + if clilog: + LOG.error(_LE("SDS CLI output some log: %s.") + % clilog) + return analyse_result + + def _analyze_output(self, out): + LOG.debug("Enter into _analyze_output.") + if out: + analyse_result = {} + out_temp = out.split('\n') + for line in out_temp: + LOG.debug("Line is %s." % line) + if line.find('=') != -1: + key, val = line.split('=', 1) + LOG.debug(key + " = " + val) + if key in ['ret_code', 'ret_desc', 'dev_addr']: + analyse_result[key] = val + return analyse_result + else: + return None diff --git a/brick/initiator/host_driver.py b/brick/initiator/host_driver.py new file mode 100644 index 000000000..5de57ccda --- /dev/null +++ b/brick/initiator/host_driver.py @@ -0,0 +1,30 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + + +class HostDriver(object): + + def get_all_block_devices(self): + """Get the list of all block devices seen in /dev/disk/by-path/.""" + files = [] + dir = "/dev/disk/by-path/" + if os.path.isdir(dir): + files = os.listdir(dir) + devices = [] + for file in files: + devices.append(dir + file) + return devices diff --git a/brick/initiator/linuxfc.py b/brick/initiator/linuxfc.py new file mode 100644 index 000000000..9a45fbc51 --- /dev/null +++ b/brick/initiator/linuxfc.py @@ -0,0 +1,140 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic linux Fibre Channel utilities.""" + +import errno + +from oslo_concurrency import processutils as putils + +from brick.i18n import _LW +from brick.initiator import linuxscsi +from brick.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class LinuxFibreChannel(linuxscsi.LinuxSCSI): + def __init__(self, root_helper, execute=putils.execute, + *args, **kwargs): + super(LinuxFibreChannel, self).__init__(root_helper, execute, + *args, **kwargs) + + def rescan_hosts(self, hbas): + for hba in hbas: + self.echo_scsi_command("/sys/class/scsi_host/%s/scan" + % hba['host_device'], "- - -") + + def get_fc_hbas(self): + """Get the Fibre Channel HBA information.""" + out = None + try: + out, _err = self._execute('systool', '-c', 'fc_host', '-v', + run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + # This handles the case where rootwrap is used + # and systool is not installed + # 96 = nova.cmd.rootwrap.RC_NOEXECFOUND: + if exc.exit_code == 96: + LOG.warn(_LW("systool is not installed")) + return [] + except OSError as exc: + # This handles the case where rootwrap is NOT used + # and systool is not installed + if exc.errno == errno.ENOENT: + LOG.warn(_LW("systool is not installed")) + return [] + + # No FC HBAs were found + if out is None: + return [] + + lines = out.split('\n') + # ignore the first 2 lines + lines = lines[2:] + hbas = [] + hba = {} + lastline = None + for line in lines: + line = line.strip() + # 2 newlines denotes a new hba port + if line == '' and lastline == '': + if len(hba) > 0: + hbas.append(hba) + hba = {} + else: + val = line.split('=') + if len(val) == 2: + key = val[0].strip().replace(" ", "") + value = val[1].strip() + hba[key] = value.replace('"', '') + lastline = line + + return hbas + + def get_fc_hbas_info(self): + """Get Fibre Channel WWNs and device paths from the system, if any.""" + + # Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys + # and are obtainable via the systool app + hbas = self.get_fc_hbas() + if not hbas: + return [] + + hbas_info = [] + for hba in hbas: + wwpn = hba['port_name'].replace('0x', '') + wwnn = hba['node_name'].replace('0x', '') + device_path = hba['ClassDevicepath'] + device = hba['ClassDevice'] + hbas_info.append({'port_name': wwpn, + 'node_name': wwnn, + 'host_device': device, + 'device_path': device_path}) + return hbas_info + + def get_fc_wwpns(self): + """Get Fibre Channel WWPNs from the system, if any.""" + + # Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys + # and are obtainable via the systool app + hbas = self.get_fc_hbas() + + wwpns = [] + if hbas: + for hba in hbas: + if hba['port_state'] == 'Online': + wwpn = hba['port_name'].replace('0x', '') + wwpns.append(wwpn) + + return wwpns + + def get_fc_wwnns(self): + """Get Fibre Channel WWNNs from the system, if any.""" + + # Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys + # and are obtainable via the systool app + hbas = self.get_fc_hbas() + if not hbas: + return [] + + wwnns = [] + if hbas: + for hba in hbas: + if hba['port_state'] == 'Online': + wwnn = hba['node_name'].replace('0x', '') + wwnns.append(wwnn) + + return wwnns diff --git a/brick/initiator/linuxscsi.py b/brick/initiator/linuxscsi.py new file mode 100644 index 000000000..0006d11aa --- /dev/null +++ b/brick/initiator/linuxscsi.py @@ -0,0 +1,193 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic linux scsi subsystem and Multipath utilities. + + Note, this is not iSCSI. +""" +import os +import re + +from oslo_concurrency import processutils as putils + +from brick import executor +from brick.i18n import _, _LW +from brick.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$") + + +class LinuxSCSI(executor.Executor): + def __init__(self, root_helper, execute=putils.execute, + *args, **kwargs): + super(LinuxSCSI, self).__init__(root_helper, execute, + *args, **kwargs) + + def echo_scsi_command(self, path, content): + """Used to echo strings to scsi subsystem.""" + + args = ["-a", path] + kwargs = dict(process_input=content, + run_as_root=True, + root_helper=self._root_helper) + self._execute('tee', *args, **kwargs) + + def get_name_from_path(self, path): + """Translates /dev/disk/by-path/ entry to /dev/sdX.""" + + name = os.path.realpath(path) + if name.startswith("/dev/"): + return name + else: + return None + + def remove_scsi_device(self, device): + """Removes a scsi device based upon /dev/sdX name.""" + + path = "/sys/block/%s/device/delete" % device.replace("/dev/", "") + if os.path.exists(path): + # flush any outstanding IO first + self.flush_device_io(device) + + LOG.debug("Remove SCSI device(%s) with %s" % (device, path)) + self.echo_scsi_command(path, "1") + + def get_device_info(self, device): + (out, _err) = self._execute('sg_scan', device, run_as_root=True, + root_helper=self._root_helper) + dev_info = {'device': device, 'host': None, + 'channel': None, 'id': None, 'lun': None} + if out: + line = out.strip() + line = line.replace(device + ": ", "") + info = line.split(" ") + + for item in info: + if '=' in item: + pair = item.split('=') + dev_info[pair[0]] = pair[1] + elif 'scsi' in item: + dev_info['host'] = item.replace('scsi', '') + + return dev_info + + def remove_multipath_device(self, multipath_name): + """This removes LUNs associated with a multipath device + and the multipath device itself. + """ + + LOG.debug("remove multipath device %s" % multipath_name) + mpath_dev = self.find_multipath_device(multipath_name) + if mpath_dev: + devices = mpath_dev['devices'] + LOG.debug("multipath LUNs to remove %s" % devices) + for device in devices: + self.remove_scsi_device(device['device']) + self.flush_multipath_device(mpath_dev['id']) + + def flush_device_io(self, device): + """This is used to flush any remaining IO in the buffers.""" + try: + LOG.debug("Flushing IO for device %s" % device) + self._execute('blockdev', '--flushbufs', device, run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + msg = _("Failed to flush IO buffers prior to removing" + " device: (%(code)s)") % {'code': exc.exit_code} + LOG.warn(msg) + + def flush_multipath_device(self, device): + try: + LOG.debug("Flush multipath device %s" % device) + self._execute('multipath', '-f', device, run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + LOG.warn(_LW("multipath call failed exit (%(code)s)") + % {'code': exc.exit_code}) + + def flush_multipath_devices(self): + try: + self._execute('multipath', '-F', run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + LOG.warn(_LW("multipath call failed exit (%(code)s)") + % {'code': exc.exit_code}) + + def find_multipath_device(self, device): + """Find a multipath device associated with a LUN device name. + + device can be either a /dev/sdX entry or a multipath id. + """ + + mdev = None + devices = [] + out = None + try: + (out, _err) = self._execute('multipath', '-l', device, + run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + LOG.warn(_LW("multipath call failed exit (%(code)s)") + % {'code': exc.exit_code}) + return None + + if out: + lines = out.strip() + lines = lines.split("\n") + lines = [line for line in lines + if not re.match(MULTIPATH_ERROR_REGEX, line)] + if lines: + line = lines[0] + info = line.split(" ") + # device line output is different depending + # on /etc/multipath.conf settings. + if info[1][:2] == "dm": + mdev = "/dev/%s" % info[1] + mdev_id = info[0] + elif info[2][:2] == "dm": + mdev = "/dev/%s" % info[2] + mdev_id = info[1].replace('(', '') + mdev_id = mdev_id.replace(')', '') + + if mdev is None: + LOG.warn(_LW("Couldn't find multipath device %(line)s") + % {'line': line}) + return None + + LOG.debug("Found multipath device = %(mdev)s" + % {'mdev': mdev}) + device_lines = lines[3:] + for dev_line in device_lines: + if dev_line.find("policy") != -1: + continue + + dev_line = dev_line.lstrip(' |-`') + dev_info = dev_line.split() + address = dev_info[0].split(":") + + dev = {'device': '/dev/%s' % dev_info[1], + 'host': address[0], 'channel': address[1], + 'id': address[2], 'lun': address[3] + } + + devices.append(dev) + + if mdev is not None: + info = {"device": mdev, + "id": mdev_id, + "devices": devices} + return info + return None diff --git a/brick/openstack/__init__.py b/brick/openstack/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/openstack/common/__init__.py b/brick/openstack/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/openstack/common/_i18n.py b/brick/openstack/common/_i18n.py new file mode 100644 index 000000000..df752ed23 --- /dev/null +++ b/brick/openstack/common/_i18n.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +try: + import oslo.i18n + + # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the + # application name when this module is synced into the separate + # repository. It is OK to have more than one translation function + # using the same domain, since there will still only be one message + # catalog. + _translators = oslo.i18n.TranslatorFactory(domain='brick') + + # The primary translation function using the well-known name "_" + _ = _translators.primary + + # Translators for log levels. + # + # The abbreviated names are meant to reflect the usual use of a short + # name like '_'. The "L" is for "log" and the other letter comes from + # the level. + _LI = _translators.log_info + _LW = _translators.log_warning + _LE = _translators.log_error + _LC = _translators.log_critical +except ImportError: + # NOTE(dims): Support for cases where a project wants to use + # code from oslo-incubator, but is not ready to be internationalized + # (like tempest) + _ = _LI = _LW = _LE = _LC = lambda x: x diff --git a/brick/openstack/common/local.py b/brick/openstack/common/local.py new file mode 100644 index 000000000..0819d5b97 --- /dev/null +++ b/brick/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/brick/openstack/common/log.py b/brick/openstack/common/log.py new file mode 100644 index 000000000..0cc358685 --- /dev/null +++ b/brick/openstack/common/log.py @@ -0,0 +1,718 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""OpenStack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import copy +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import socket +import sys +import traceback + +from oslo.config import cfg +from oslo.serialization import jsonutils +from oslo.utils import importutils +import six +from six import moves + +_PY26 = sys.version_info[0:2] == (2, 6) + +from brick.openstack.common._i18n import _ +from brick.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), + cfg.StrOpt('log-format', + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s .'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths.'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will change in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='Syslog facility to receive log lines.') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error.') +] + +DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', + 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN', + 'urllib3.connectionpool=WARN', 'websocket=WARN', + "keystonemiddleware=WARN", "routes.middleware=WARN", + "stevedore=WARN"] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='Format string to use for log messages with context.'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='Format string to use for log messages without context.'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='Data to append to log format when level is DEBUG.'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='Prefix each line of exception output with this format.'), + cfg.ListOpt('default_log_levels', + default=DEFAULT_LOG_LEVELS, + help='List of logger=LEVEL pairs.'), + cfg.BoolOpt('publish_errors', + default=False, + help='Enables or disables publication of error events.'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='Enables or disables fatal status of deprecations.'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='The format for an instance that is passed with the log ' + 'message.'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='The format for an instance UUID that is passed with the ' + 'log message.'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + + +def list_opts(): + """Entry point for oslo.config-generator.""" + return [(None, copy.deepcopy(common_cli_opts)), + (None, copy.deepcopy(logging_cli_opts)), + (None, copy.deepcopy(generic_log_opts)), + (None, copy.deepcopy(log_opts)), + ] + + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def isEnabledFor(self, level): + if _PY26: + # This method was added in python 2.7 (and it does the exact + # same logic, so we need to do the exact same logic so that + # python 2.6 has this capability as well). + return self.logger.isEnabledFor(level) + else: + return super(BaseLoggerAdapter, self).isEnabledFor(level) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + if six.PY3: + # In Python 3, the code fails because the 'manager' attribute + # cannot be found when using a LoggerAdapter as the + # underlying logger. Work around this issue. + self._logger.manager = self._logger.logger.manager + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + self._deprecated_messages_sent = dict() + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(msg, six.text_type): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid') or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {'exc_info': (exc_type, value, tb)} + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except (moves.configparser.Error, KeyError) as exc: + raise LogConfigError(log_config_append, six.text_type(exc)) + + +def setup(product_name, version='unknown'): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf(product_name, version) + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string=None, + default_log_levels=None): + # Just in case the caller is not setting the + # default_log_level. This is insurance because + # we introduced the default_log_level parameter + # later in a backwards in-compatible change + if default_log_levels is not None: + cfg.set_defaults( + log_opts, + default_log_levels=default_log_levels) + if logging_context_format_string is not None: + cfg.set_defaults( + log_opts, + logging_context_format_string=logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + handler = importutils.import_object( + "oslo.messaging.notify.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + logger = logging.getLogger(mod) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + + if CONF.use_syslog: + try: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + except socket.error: + log_root.error('Unable to add syslog handler. Verify that syslog ' + 'is running.') + + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + If available, uses the context value stored in TLS - local.store.context + + """ + + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(record.msg, six.text_type): + record.msg = six.text_type(record.msg) + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color', 'user_identity'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id'): + fmt = CONF.logging_context_format_string + else: + fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + fmt += " " + CONF.logging_debug_format_suffix + + if sys.version_info < (3, 2): + self._fmt = fmt + else: + self._style = logging.PercentStyle(fmt) + self._fmt = self._style._fmt + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/brick/openstack/common/loopingcall.py b/brick/openstack/common/loopingcall.py new file mode 100644 index 000000000..c7f15199e --- /dev/null +++ b/brick/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import time + +from eventlet import event +from eventlet import greenthread + +from brick.openstack.common._i18n import _LE, _LW +from brick.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCallBase. + + The poll-function passed to LoopingCallBase can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCallBase.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCallBase.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = _ts() + self.f(*self.args, **self.kw) + end = _ts() + if not self._running: + break + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)r run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': self.f, 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug('Dynamic looping call %(func_name)r sleeping ' + 'for %(idle).02f seconds', + {'func_name': self.f, 'idle': idle}) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/brick/remotefs/__init__.py b/brick/remotefs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/remotefs/remotefs.py b/brick/remotefs/remotefs.py new file mode 100644 index 000000000..00963572d --- /dev/null +++ b/brick/remotefs/remotefs.py @@ -0,0 +1,174 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Remote filesystem client utilities.""" + +import hashlib +import os +import re + +from oslo_concurrency import processutils as putils +import six + +from brick import exception +from brick.i18n import _, _LI +from brick.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class RemoteFsClient(object): + + def __init__(self, mount_type, root_helper, + execute=putils.execute, *args, **kwargs): + + self._mount_type = mount_type + if mount_type == "nfs": + self._mount_base = kwargs.get('nfs_mount_point_base', None) + if not self._mount_base: + raise exception.InvalidParameterValue( + err=_('nfs_mount_point_base required')) + self._mount_options = kwargs.get('nfs_mount_options', None) + self._check_nfs_options() + elif mount_type == "cifs": + self._mount_base = kwargs.get('smbfs_mount_point_base', None) + if not self._mount_base: + raise exception.InvalidParameterValue( + err=_('smbfs_mount_point_base required')) + self._mount_options = kwargs.get('smbfs_mount_options', None) + elif mount_type == "glusterfs": + self._mount_base = kwargs.get('glusterfs_mount_point_base', None) + if not self._mount_base: + raise exception.InvalidParameterValue( + err=_('glusterfs_mount_point_base required')) + self._mount_options = None + else: + raise exception.ProtocolNotSupported(protocol=mount_type) + self.root_helper = root_helper + self.set_execute(execute) + + def set_execute(self, execute): + self._execute = execute + + def _get_hash_str(self, base_str): + """Return a string that represents hash of base_str + (in a hex format). + """ + return hashlib.md5(base_str).hexdigest() + + def get_mount_point(self, device_name): + """Get Mount Point. + + :param device_name: example 172.18.194.100:/var/nfs + """ + return os.path.join(self._mount_base, + self._get_hash_str(device_name)) + + def _read_mounts(self): + (out, _err) = self._execute('mount', check_exit_code=0) + lines = out.split('\n') + mounts = {} + for line in lines: + tokens = line.split() + if 2 < len(tokens): + device = tokens[0] + mnt_point = tokens[2] + mounts[mnt_point] = device + return mounts + + def mount(self, share, flags=None): + """Mount given share.""" + mount_path = self.get_mount_point(share) + + if mount_path in self._read_mounts(): + LOG.info(_LI('Already mounted: %s') % mount_path) + return + + self._execute('mkdir', '-p', mount_path, check_exit_code=0) + if self._mount_type == 'nfs': + self._mount_nfs(share, mount_path, flags) + else: + self._do_mount(self._mount_type, share, mount_path, + self._mount_options, flags) + + def _do_mount(self, mount_type, share, mount_path, mount_options=None, + flags=None): + """Mounts share based on the specified params.""" + mnt_cmd = ['mount', '-t', mount_type] + if mount_options is not None: + mnt_cmd.extend(['-o', mount_options]) + if flags is not None: + mnt_cmd.extend(flags) + mnt_cmd.extend([share, mount_path]) + + self._execute(*mnt_cmd, root_helper=self.root_helper, + run_as_root=True, check_exit_code=0) + + def _mount_nfs(self, nfs_share, mount_path, flags=None): + """Mount nfs share using present mount types.""" + mnt_errors = {} + + # This loop allows us to first try to mount with NFS 4.1 for pNFS + # support but falls back to mount NFS 4 or NFS 3 if either the client + # or server do not support it. + for mnt_type in sorted(self._nfs_mount_type_opts.keys(), reverse=True): + options = self._nfs_mount_type_opts[mnt_type] + try: + self._do_mount('nfs', nfs_share, mount_path, options, flags) + LOG.debug('Mounted %(sh)s using %(mnt_type)s.' + % {'sh': nfs_share, 'mnt_type': mnt_type}) + return + except Exception as e: + mnt_errors[mnt_type] = six.text_type(e) + LOG.debug('Failed to do %s mount.', mnt_type) + raise exception.BrickException(_("NFS mount failed for share %(sh)s. " + "Error - %(error)s") + % {'sh': nfs_share, + 'error': mnt_errors}) + + def _check_nfs_options(self): + """Checks and prepares nfs mount type options.""" + self._nfs_mount_type_opts = {'nfs': self._mount_options} + nfs_vers_opt_patterns = ['^nfsvers', '^vers', '^v[\d]'] + for opt in nfs_vers_opt_patterns: + if self._option_exists(self._mount_options, opt): + return + + # pNFS requires NFS 4.1. The mount.nfs4 utility does not automatically + # negotiate 4.1 support, we have to ask for it by specifying two + # options: vers=4 and minorversion=1. + pnfs_opts = self._update_option(self._mount_options, 'vers', '4') + pnfs_opts = self._update_option(pnfs_opts, 'minorversion', '1') + self._nfs_mount_type_opts['pnfs'] = pnfs_opts + + def _option_exists(self, options, opt_pattern): + """Checks if the option exists in nfs options and returns position.""" + options = [x.strip() for x in options.split(',')] if options else [] + pos = 0 + for opt in options: + pos = pos + 1 + if re.match(opt_pattern, opt, flags=0): + return pos + return 0 + + def _update_option(self, options, option, value=None): + """Update option if exists else adds it and returns new options.""" + opts = [x.strip() for x in options.split(',')] if options else [] + pos = self._option_exists(options, option) + if pos: + opts.pop(pos - 1) + opt = '%s=%s' % (option, value) if value else option + opts.append(opt) + return ",".join(opts) if len(opts) > 1 else opts[0] diff --git a/brick/tests/__init__.py b/brick/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/tests/base.py b/brick/tests/base.py new file mode 100644 index 000000000..66cd8be60 --- /dev/null +++ b/brick/tests/base.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- + +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os + +import fixtures +import mock +from oslo.utils import strutils +from oslotest import base + +from brick.openstack.common import log as oslo_logging + + +LOG = oslo_logging.getLogger(__name__) + + +class TestCase(base.BaseTestCase): + + """Test case base class for all unit tests.""" + + def setUp(self): + """Run before each test method to initialize test environment.""" + super(TestCase, self).setUp() + + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) + try: + test_timeout = int(test_timeout) + except ValueError: + # If timeout value is invalid do not set a timeout. + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + + environ_enabled = (lambda var_name: + strutils.bool_from_string(os.environ.get(var_name))) + if environ_enabled('OS_STDOUT_CAPTURE'): + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if environ_enabled('OS_STDERR_CAPTURE'): + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + if environ_enabled('OS_LOG_CAPTURE'): + log_format = '%(levelname)s [%(name)s] %(message)s' + if environ_enabled('OS_DEBUG'): + level = logging.DEBUG + else: + level = logging.INFO + self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, + format=log_format, + level=level)) + + def _common_cleanup(self): + """Runs after each test method to tear down test environment.""" + + # Stop any timers + for x in self.injected: + try: + x.stop() + except AssertionError: + pass + + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + + def log_level(self, level): + """Set logging level to the specified value.""" + log_root = logging.getLogger(None).logger + log_root.setLevel(level) + + def mock_object(self, obj, attr_name, new_attr=None, **kwargs): + """Use python mock to mock an object attribute + + Mocks the specified objects attribute with the given value. + Automatically performs 'addCleanup' for the mock. + + """ + if not new_attr: + new_attr = mock.Mock() + patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs) + patcher.start() + self.addCleanup(patcher.stop) + + # Useful assertions + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): + """Assert two dicts are equivalent. + + This is a 'deep' match in the sense that it handles nested + dictionaries appropriately. + + NOTE: + + If you don't care (or don't know) a given value, you can specify + the string DONTCARE as the value. This will cause that dict-item + to be skipped. + + """ + def raise_assertion(msg): + d1str = d1 + d2str = d2 + base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' + 'd2: %(d2str)s' % + {'msg': msg, 'd1str': d1str, 'd2str': d2str}) + raise AssertionError(base_msg) + + d1keys = set(d1.keys()) + d2keys = set(d2.keys()) + if d1keys != d2keys: + d1only = d1keys - d2keys + d2only = d2keys - d1keys + raise_assertion('Keys in d1 and not d2: %(d1only)s. ' + 'Keys in d2 and not d1: %(d2only)s' % + {'d1only': d1only, 'd2only': d2only}) + + for key in d1keys: + d1value = d1[key] + d2value = d2[key] + try: + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance + except (ValueError, TypeError): + # If both values aren't convertible to float, just ignore + # ValueError if arg is a str, TypeError if it's something else + # (like None) + within_tolerance = False + + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): + self.assertDictMatch(d1value, d2value) + elif 'DONTCARE' in (d1value, d2value): + continue + elif approx_equal and within_tolerance: + continue + elif d1value != d2value: + raise_assertion("d1['%(key)s']=%(d1value)s != " + "d2['%(key)s']=%(d2value)s" % + { + 'key': key, + 'd1value': d1value, + 'd2value': d2value, + }) diff --git a/brick/tests/initiator/__init__.py b/brick/tests/initiator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/brick/tests/initiator/test_connector.py b/brick/tests/initiator/test_connector.py new file mode 100644 index 000000000..827b3e285 --- /dev/null +++ b/brick/tests/initiator/test_connector.py @@ -0,0 +1,645 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import string +import time + +import mock +from oslo_concurrency import processutils as putils +import testtools + +from brick import exception +from brick.i18n import _ +from brick.initiator import connector +from brick.initiator import host_driver +from brick.openstack.common import log as logging +from brick.openstack.common import loopingcall +from brick.tests import base + + +LOG = logging.getLogger(__name__) + + +class ConnectorTestCase(base.TestCase): + + def setUp(self): + super(ConnectorTestCase, self).setUp() + self.cmds = [] + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def test_connect_volume(self): + self.connector = connector.InitiatorConnector(None) + self.assertRaises(NotImplementedError, + self.connector.connect_volume, None) + + def test_disconnect_volume(self): + self.connector = connector.InitiatorConnector(None) + self.assertRaises(NotImplementedError, + self.connector.disconnect_volume, None, None) + + def test_factory(self): + obj = connector.InitiatorConnector.factory('iscsi', None) + self.assertEqual(obj.__class__.__name__, "ISCSIConnector") + + obj = connector.InitiatorConnector.factory('fibre_channel', None) + self.assertEqual(obj.__class__.__name__, "FibreChannelConnector") + + obj = connector.InitiatorConnector.factory('aoe', None) + self.assertEqual(obj.__class__.__name__, "AoEConnector") + + obj = connector.InitiatorConnector.factory( + 'nfs', None, nfs_mount_point_base='/mnt/test') + self.assertEqual(obj.__class__.__name__, "RemoteFsConnector") + + obj = connector.InitiatorConnector.factory( + 'glusterfs', None, glusterfs_mount_point_base='/mnt/test') + self.assertEqual(obj.__class__.__name__, "RemoteFsConnector") + + obj = connector.InitiatorConnector.factory('local', None) + self.assertEqual(obj.__class__.__name__, "LocalConnector") + + self.assertRaises(ValueError, + connector.InitiatorConnector.factory, + "bogus", None) + + def test_check_valid_device_with_wrong_path(self): + self.connector = connector.InitiatorConnector(None) + self.connector._execute = \ + lambda *args, **kwargs: ("", None) + self.assertFalse(self.connector.check_valid_device('/d0v')) + + def test_check_valid_device(self): + self.connector = connector.InitiatorConnector(None) + self.connector._execute = \ + lambda *args, **kwargs: ("", "") + self.assertTrue(self.connector.check_valid_device('/dev')) + + def test_check_valid_device_with_cmd_error(self): + def raise_except(*args, **kwargs): + raise putils.ProcessExecutionError + self.connector = connector.InitiatorConnector(None) + self.connector._execute = mock.Mock() + self.connector._execute.side_effect = raise_except + self.assertFalse(self.connector.check_valid_device('/dev')) + + +class HostDriverTestCase(base.TestCase): + + def setUp(self): + super(HostDriverTestCase, self).setUp() + isdir_mock = mock.Mock() + isdir_mock.return_value = True + os.path.isdir = isdir_mock + self.devlist = ['device1', 'device2'] + listdir_mock = mock.Mock() + listdir_mock.return_value = self.devlist + os.listdir = listdir_mock + + def test_host_driver(self): + expected = ['/dev/disk/by-path/' + dev for dev in self.devlist] + driver = host_driver.HostDriver() + actual = driver.get_all_block_devices() + self.assertEqual(expected, actual) + + +class ISCSIConnectorTestCase(ConnectorTestCase): + + def setUp(self): + super(ISCSIConnectorTestCase, self).setUp() + self.connector = connector.ISCSIConnector( + None, execute=self.fake_execute, use_multipath=False) + + get_name_mock = mock.Mock() + get_name_mock.return_value = "/dev/sdb" + self.connector._linuxscsi.get_name_from_path = get_name_mock + + def iscsi_connection(self, volume, location, iqn): + return { + 'driver_volume_type': 'iscsi', + 'data': { + 'volume_id': volume['id'], + 'target_portal': location, + 'target_iqn': iqn, + 'target_lun': 1, + } + } + + def test_get_initiator(self): + def initiator_no_file(*args, **kwargs): + raise putils.ProcessExecutionError('No file') + + def initiator_get_text(*arg, **kwargs): + text = ('## DO NOT EDIT OR REMOVE THIS FILE!\n' + '## If you remove this file, the iSCSI daemon ' + 'will not start.\n' + '## If you change the InitiatorName, existing ' + 'access control lists\n' + '## may reject this initiator. The InitiatorName must ' + 'be unique\n' + '## for each iSCSI initiator. Do NOT duplicate iSCSI ' + 'InitiatorNames.\n' + 'InitiatorName=iqn.1234-56.foo.bar:01:23456789abc') + return text, None + + self.connector._execute = initiator_no_file + initiator = self.connector.get_initiator() + self.assertIsNone(initiator) + self.connector._execute = initiator_get_text + initiator = self.connector.get_initiator() + self.assertEqual(initiator, 'iqn.1234-56.foo.bar:01:23456789abc') + + @testtools.skipUnless(os.path.exists('/dev/disk/by-path'), + 'Test requires /dev/disk/by-path') + def test_connect_volume(self): + self.stubs.Set(os.path, 'exists', lambda x: True) + location = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + vol = {'id': 1, 'name': name} + connection_info = self.iscsi_connection(vol, location, iqn) + device = self.connector.connect_volume(connection_info['data']) + dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn) + self.assertEqual(device['type'], 'block') + self.assertEqual(device['path'], dev_str) + + self.connector.disconnect_volume(connection_info['data'], device) + expected_commands = [('iscsiadm -m node -T %s -p %s' % + (iqn, location)), + ('iscsiadm -m session'), + ('iscsiadm -m node -T %s -p %s --login' % + (iqn, location)), + ('iscsiadm -m node -T %s -p %s --op update' + ' -n node.startup -v automatic' + % (iqn, location)), + ('iscsiadm -m node --rescan'), + ('iscsiadm -m session --rescan'), + ('blockdev --flushbufs /dev/sdb'), + ('tee -a /sys/block/sdb/device/delete'), + ('iscsiadm -m node -T %s -p %s --op update' + ' -n node.startup -v manual' % (iqn, location)), + ('iscsiadm -m node -T %s -p %s --logout' % + (iqn, location)), + ('iscsiadm -m node -T %s -p %s --op delete' % + (iqn, location)), ] + LOG.debug("self.cmds = %s" % self.cmds) + LOG.debug("expected = %s" % expected_commands) + + self.assertEqual(expected_commands, self.cmds) + + def test_connect_volume_with_multipath(self): + location = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + vol = {'id': 1, 'name': name} + connection_properties = self.iscsi_connection(vol, location, iqn) + + self.connector_with_multipath = \ + connector.ISCSIConnector(None, use_multipath=True) + self.connector_with_multipath._run_iscsiadm_bare = \ + lambda *args, **kwargs: "%s %s" % (location, iqn) + portals_mock = mock.Mock() + portals_mock.return_value = [[location, iqn]] + self.connector_with_multipath.\ + _get_target_portals_from_iscsiadm_output = portals_mock + connect_to_mock = mock.Mock() + connect_to_mock.return_value = None + self.connector_with_multipath._connect_to_iscsi_portal = \ + connect_to_mock + rescan_iscsi_mock = mock.Mock() + rescan_iscsi_mock.return_value = None + self.connector_with_multipath._rescan_iscsi = rescan_iscsi_mock + rescan_multipath_mock = mock.Mock() + rescan_multipath_mock.return_value = None + self.connector_with_multipath._rescan_multipath = \ + rescan_multipath_mock + get_device_mock = mock.Mock() + get_device_mock.return_value = 'iqn.2010-10.org.openstack:%s' % name + self.connector_with_multipath._get_multipath_device_name = \ + get_device_mock + exists_mock = mock.Mock() + exists_mock.return_value = True + os.path.exists = exists_mock + result = self.connector_with_multipath.connect_volume( + connection_properties['data']) + expected_result = {'path': 'iqn.2010-10.org.openstack:volume-00000001', + 'type': 'block'} + self.assertEqual(result, expected_result) + + def test_connect_volume_with_not_found_device(self): + exists_mock = mock.Mock() + exists_mock.return_value = False + os.path.exists = exists_mock + sleep_mock = mock.Mock() + sleep_mock.return_value = None + time.sleep = sleep_mock + location = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + vol = {'id': 1, 'name': name} + connection_info = self.iscsi_connection(vol, location, iqn) + self.assertRaises(exception.VolumeDeviceNotFound, + self.connector.connect_volume, + connection_info['data']) + + def test_get_target_portals_from_iscsiadm_output(self): + connector = self.connector + test_output = '''10.15.84.19:3260 iqn.1992-08.com.netapp:sn.33615311 + 10.15.85.19:3260 iqn.1992-08.com.netapp:sn.33615311''' + res = connector._get_target_portals_from_iscsiadm_output(test_output) + ip_iqn1 = ['10.15.84.19:3260', 'iqn.1992-08.com.netapp:sn.33615311'] + ip_iqn2 = ['10.15.85.19:3260', 'iqn.1992-08.com.netapp:sn.33615311'] + expected = [ip_iqn1, ip_iqn2] + self.assertEqual(expected, res) + + def test_get_multipath_device_name(self): + realpath = mock.Mock() + realpath.return_value = None + os.path.realpath = realpath + multipath_return_string = [('mpath2 (20017380006c00036)' + 'dm-7 IBM,2810XIV')] + self.connector._run_multipath = \ + lambda *args, **kwargs: multipath_return_string + expected = '/dev/mapper/mpath2' + self.assertEqual(expected, + self.connector. + _get_multipath_device_name('/dev/md-1')) + + def test_get_iscsi_devices(self): + paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.' + 'com.netapp:node.netapp02-lun-0')] + walk_mock = lambda x: [(['.'], ['by-path'], paths)] + os.walk = walk_mock + self.assertEqual(self.connector._get_iscsi_devices(), paths) + + def test_get_iscsi_devices_with_empty_dir(self): + walk_mock = mock.Mock() + walk_mock.return_value = [] + os.walk = walk_mock + self.assertEqual(self.connector._get_iscsi_devices(), []) + + def test_get_multipath_iqn(self): + paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.' + 'com.netapp:node.netapp02-lun-0')] + realpath = lambda x: '/dev/disk/by-path/%s' % paths[0] + os.path.realpath = realpath + + get_iscsi_mock = mock.Mock() + get_iscsi_mock.return_value = paths + self.connector._get_iscsi_devices = get_iscsi_mock + + get_multipath_device_mock = mock.Mock() + get_multipath_device_mock.return_value = paths[0] + self.connector._get_multipath_device_name = get_multipath_device_mock + self.assertEqual(self.connector._get_multipath_iqn(paths[0]), + 'iqn.2013-01.ro.com.netapp:node.netapp02') + + def test_disconnect_volume_multipath_iscsi(self): + result = [] + + def fake_disconnect_from_iscsi_portal(properties): + result.append(properties) + + iqn1 = 'iqn.2013-01.ro.com.netapp:node.netapp01' + iqn2 = 'iqn.2013-01.ro.com.netapp:node.netapp02' + iqns = [iqn1, iqn2] + portal = '10.0.0.1:3260' + dev = ('ip-%s-iscsi-%s-lun-0' % (portal, iqn1)) + + get_portals_mock = mock.Mock() + get_portals_mock.return_value = [[portal, iqn1]] + rescan_iscsi_mock = mock.Mock() + rescan_iscsi_mock.return_value = None + + rescan_multipath = mock.Mock() + rescan_multipath.return_value = None + + get_block_devices_mock = mock.Mock() + get_block_devices_mock.return_value = [dev, '/dev/mapper/md-1'] + + get_multipath_name_mock = mock.Mock() + get_multipath_name_mock.return_value = '/dev/mapper/md-3' + + self.connector._get_multipath_iqn = lambda x: iqns.pop() + + disconnect_mock = fake_disconnect_from_iscsi_portal + self.connector._disconnect_from_iscsi_portal = disconnect_mock + fake_property = {'target_portal': portal, + 'target_iqn': iqn1} + self.connector._disconnect_volume_multipath_iscsi(fake_property, + 'fake/multipath') + # Target in use by other mp devices, don't disconnect + self.assertEqual([], result) + + def test_disconnect_volume_multipath_iscsi_without_other_mp_devices(self): + result = [] + + def fake_disconnect_from_iscsi_portal(properties): + result.append(properties) + portal = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + + get_portals_mock = mock.Mock() + get_portals_mock.return_value = [[portal, iqn]] + self.connector._get_target_portals_from_iscsiadm_output = \ + get_portals_mock + + rescan_iscsi_mock = mock.Mock() + rescan_iscsi_mock.return_value = None + self.connector._rescan_iscsi = rescan_iscsi_mock + + rescan_multipath_mock = mock.Mock() + rescan_multipath_mock.return_value = None + self.connector._rescan_multipath = rescan_multipath_mock + + get_all_devices_mock = mock.Mock() + get_all_devices_mock.return_value = [] + self.connector.driver.get_all_block_devices = get_all_devices_mock + + self.connector._disconnect_from_iscsi_portal = \ + fake_disconnect_from_iscsi_portal + fake_property = {'target_portal': portal, + 'target_iqn': iqn} + self.connector._disconnect_volume_multipath_iscsi(fake_property, + 'fake/multipath') + # Target not in use by other mp devices, disconnect + self.assertEqual([fake_property], result) + + +class FibreChannelConnectorTestCase(ConnectorTestCase): + def setUp(self): + super(FibreChannelConnectorTestCase, self).setUp() + self.connector = connector.FibreChannelConnector( + None, execute=self.fake_execute, use_multipath=False) + self.assertIsNotNone(self.connector) + self.assertIsNotNone(self.connector._linuxfc) + self.assertIsNotNone(self.connector._linuxscsi) + + def fake_get_fc_hbas(self): + return [{'ClassDevice': 'host1', + 'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0' + '/0000:05:00.2/host1/fc_host/host1', + 'dev_loss_tmo': '30', + 'fabric_name': '0x1000000533f55566', + 'issue_lip': '', + 'max_npiv_vports': '255', + 'maxframe_size': '2048 bytes', + 'node_name': '0x200010604b019419', + 'npiv_vports_inuse': '0', + 'port_id': '0x680409', + 'port_name': '0x100010604b019419', + 'port_state': 'Online', + 'port_type': 'NPort (fabric via point-to-point)', + 'speed': '10 Gbit', + 'supported_classes': 'Class 3', + 'supported_speeds': '10 Gbit', + 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27', + 'tgtid_bind_type': 'wwpn (World Wide Port Name)', + 'uevent': None, + 'vport_create': '', + 'vport_delete': ''}] + + def fake_get_fc_hbas_info(self): + hbas = self.fake_get_fc_hbas() + info = [{'port_name': hbas[0]['port_name'].replace('0x', ''), + 'node_name': hbas[0]['node_name'].replace('0x', ''), + 'host_device': hbas[0]['ClassDevice'], + 'device_path': hbas[0]['ClassDevicePath']}] + return info + + def fibrechan_connection(self, volume, location, wwn): + return {'driver_volume_type': 'fibrechan', + 'data': { + 'volume_id': volume['id'], + 'target_portal': location, + 'target_wwn': wwn, + 'target_lun': 1, + }} + + def test_connect_volume(self): + self.connector._linuxfc.get_fc_hbas = self.fake_get_fc_hbas + self.connector._linuxfc.get_fc_hbas_info = \ + self.fake_get_fc_hbas_info + exists_mock = mock.Mock() + exists_mock.return_value = True + os.path.exists = exists_mock + realpath_mock = mock.Mock() + realpath_mock.return_value = '/dev/sdb' + os.path.realpath = realpath_mock + + multipath_devname = '/dev/md-1' + devices = {"device": multipath_devname, + "id": "1234567890", + "devices": [{'device': '/dev/sdb', + 'address': '1:0:0:1', + 'host': 1, 'channel': 0, + 'id': 0, 'lun': 1}]} + find_device_mock = mock.Mock() + find_device_mock.return_value = devices + self.connector._linuxscsi.find_multipath_device = find_device_mock + remove_device_mock = mock.Mock() + remove_device_mock.return_value = None + self.connector._linuxscsi.remove_scsi_device = remove_device_mock + get_device_info_mock = mock.Mock() + get_device_info_mock.return_value = devices['devices'][0] + self.connector._linuxscsi.get_device_info = get_device_info_mock + location = '10.0.2.15:3260' + name = 'volume-00000001' + vol = {'id': 1, 'name': name} + # Should work for string, unicode, and list + wwns = ['1234567890123456', unicode('1234567890123456'), + ['1234567890123456', '1234567890123457']] + for wwn in wwns: + connection_info = self.fibrechan_connection(vol, location, wwn) + dev_info = self.connector.connect_volume(connection_info['data']) + exp_wwn = wwn[0] if isinstance(wwn, list) else wwn + dev_str = ('/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' % + exp_wwn) + self.assertEqual(dev_info['type'], 'block') + self.assertEqual(dev_info['path'], dev_str) + + self.connector.disconnect_volume(connection_info['data'], dev_info) + expected_commands = [] + self.assertEqual(expected_commands, self.cmds) + + # Should not work for anything other than string, unicode, and list + connection_info = self.fibrechan_connection(vol, location, 123) + self.assertRaises(exception.NoFibreChannelHostsFound, + self.connector.connect_volume, + connection_info['data']) + + get_fc_hbas_mock = mock.Mock() + get_fc_hbas_mock.return_value = [] + self.connector._linuxfc.get_fc_hbas = get_fc_hbas_mock + + get_fc_hbas_info_mock = mock.Mock() + get_fc_hbas_info_mock.return_value = [] + self.connector._linuxfc.get_fc_hbas_info = get_fc_hbas_info_mock + self.assertRaises(exception.NoFibreChannelHostsFound, + self.connector.connect_volume, + connection_info['data']) + + +class FakeFixedIntervalLoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._stop = False + + def stop(self): + self._stop = True + + def wait(self): + return self + + def start(self, interval, initial_delay=None): + while not self._stop: + try: + self.f(*self.args, **self.kw) + except loopingcall.LoopingCallDone: + return self + except Exception: + LOG.exception(_('in fixed duration looping call')) + raise + + +class AoEConnectorTestCase(ConnectorTestCase): + """Test cases for AoE initiator class.""" + def setUp(self): + super(AoEConnectorTestCase, self).setUp() + self.connector = connector.AoEConnector('sudo') + self.connection_properties = {'target_shelf': 'fake_shelf', + 'target_lun': 'fake_lun'} + loopingcall.FixedIntervalLoopingCall = FakeFixedIntervalLoopingCall + + def _mock_path_exists(self, aoe_path, mock_values=None): + exists_mock = mock.Mock() + exists_mock.return_value = mock_values + os.path.exists = exists_mock + + def test_connect_volume(self): + """Ensure that if path exist aoe-revaliadte was called.""" + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + + self._mock_path_exists(aoe_path, [True, True]) + + exec_mock = mock.Mock() + exec_mock.return_value = ["", ""] + self.connector._execute = exec_mock + + self.connector.connect_volume(self.connection_properties) + + def test_connect_volume_without_path(self): + """Ensure that if path doesn't exist aoe-discovery was called.""" + + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + expected_info = { + 'type': 'block', + 'device': aoe_device, + 'path': aoe_path, + } + + self._mock_path_exists(aoe_path, [False, True]) + + exec_mock = mock.Mock() + exec_mock.return_value = ["", ""] + self.connector._execute = exec_mock + + volume_info = self.connector.connect_volume( + self.connection_properties) + + self.assertDictMatch(volume_info, expected_info) + + def test_connect_volume_could_not_discover_path(self): + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + + exists_mock = mock.Mock() + exists_mock.return_value = False + os.path.exists = exists_mock + exec_mock = mock.Mock() + exec_mock.return_value = ["", ""] + self.connector._execute = exec_mock + self.assertRaises(exception.VolumeDeviceNotFound, + self.connector.connect_volume, + self.connection_properties) + + def test_disconnect_volume(self): + """Ensure that if path exist aoe-revaliadte was called.""" + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + + self._mock_path_exists(aoe_path, [True]) + + exec_mock = mock.Mock() + exec_mock.return_value = ["", ""] + self.connector._execute = exec_mock + + self.connector.disconnect_volume(self.connection_properties, {}) + + +class RemoteFsConnectorTestCase(ConnectorTestCase): + """Test cases for Remote FS initiator class.""" + TEST_DEV = '172.18.194.100:/var/nfs' + TEST_PATH = '/mnt/test/df0808229363aad55c27da50c38d6328' + + def setUp(self): + super(RemoteFsConnectorTestCase, self).setUp() + self.connection_properties = { + 'export': self.TEST_DEV, + 'name': '9c592d52-ce47-4263-8c21-4ecf3c029cdb'} + self.connector = connector.RemoteFsConnector( + 'nfs', root_helper='sudo', nfs_mount_point_base='/mnt/test', + nfs_mount_options='vers=3') + + def test_connect_volume(self): + """Test the basic connect volume case.""" + client = self.connector._remotefsclient + client.mount = mock.Mock() + client.get_mount_point = mock.Mock() + client.get_mount_point.return_value = "ass" + + self.connector.connect_volume(self.connection_properties) + + def test_disconnect_volume(self): + """Nothing should happen here -- make sure it doesn't blow up.""" + self.connector.disconnect_volume(self.connection_properties, {}) + + +class LocalConnectorTestCase(base.TestCase): + + def setUp(self): + super(LocalConnectorTestCase, self).setUp() + self.connection_properties = {'name': 'foo', + 'device_path': '/tmp/bar'} + + def test_connect_volume(self): + self.connector = connector.LocalConnector(None) + cprops = self.connection_properties + dev_info = self.connector.connect_volume(cprops) + self.assertEqual(dev_info['type'], 'local') + self.assertEqual(dev_info['path'], cprops['device_path']) + + def test_connect_volume_with_invalid_connection_data(self): + self.connector = connector.LocalConnector(None) + cprops = {} + self.assertRaises(ValueError, + self.connector.connect_volume, cprops) diff --git a/brick/tests/initiator/test_linuxfc.py b/brick/tests/initiator/test_linuxfc.py new file mode 100644 index 000000000..a8c4e6c7d --- /dev/null +++ b/brick/tests/initiator/test_linuxfc.py @@ -0,0 +1,176 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import string + +import mock + +from brick.initiator import linuxfc +from brick.openstack.common import log as logging +from brick.tests import base + +LOG = logging.getLogger(__name__) + + +class LinuxFCTestCase(base.TestCase): + + def setUp(self): + super(LinuxFCTestCase, self).setUp() + self.cmds = [] + + exists_mock = mock.Mock() + exists_mock.return_value = True + os.path.exists = exists_mock + self.lfc = linuxfc.LinuxFibreChannel(None, execute=self.fake_execute) + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def test_rescan_hosts(self): + hbas = [{'host_device': 'foo'}, + {'host_device': 'bar'}, ] + self.lfc.rescan_hosts(hbas) + expected_commands = ['tee -a /sys/class/scsi_host/foo/scan', + 'tee -a /sys/class/scsi_host/bar/scan'] + self.assertEqual(expected_commands, self.cmds) + + def test_get_fc_hbas_fail(self): + def fake_exec1(a, b, c, d, run_as_root=True, root_helper='sudo'): + raise OSError + + def fake_exec2(a, b, c, d, run_as_root=True, root_helper='sudo'): + return None, 'None found' + + self.lfc._execute = fake_exec1 + hbas = self.lfc.get_fc_hbas() + self.assertEqual(0, len(hbas)) + self.lfc._execute = fake_exec2 + hbas = self.lfc.get_fc_hbas() + self.assertEqual(0, len(hbas)) + + def test_get_fc_hbas(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.lfc._execute = fake_exec + hbas = self.lfc.get_fc_hbas() + self.assertEqual(2, len(hbas)) + hba1 = hbas[0] + self.assertEqual(hba1["ClassDevice"], "host0") + hba2 = hbas[1] + self.assertEqual(hba2["ClassDevice"], "host2") + + def test_get_fc_hbas_info(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.lfc._execute = fake_exec + hbas_info = self.lfc.get_fc_hbas_info() + expected_info = [{'device_path': '/sys/devices/pci0000:20/' + '0000:20:03.0/0000:21:00.0/' + 'host0/fc_host/host0', + 'host_device': 'host0', + 'node_name': '50014380242b9751', + 'port_name': '50014380242b9750'}, + {'device_path': '/sys/devices/pci0000:20/' + '0000:20:03.0/0000:21:00.1/' + 'host2/fc_host/host2', + 'host_device': 'host2', + 'node_name': '50014380242b9753', + 'port_name': '50014380242b9752'}, ] + self.assertEqual(expected_info, hbas_info) + + def test_get_fc_wwpns(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + + self.lfc._execute = fake_exec + wwpns = self.lfc.get_fc_wwpns() + expected_wwpns = ['50014380242b9750', '50014380242b9752'] + self.assertEqual(expected_wwpns, wwpns) + + def test_get_fc_wwnns(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.lfc._execute = fake_exec + wwnns = self.lfc.get_fc_wwpns() + expected_wwnns = ['50014380242b9750', '50014380242b9752'] + self.assertEqual(expected_wwnns, wwnns) + +SYSTOOL_FC = """ +Class = "fc_host" + + Class Device = "host0" + Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\ +0000:21:00.0/host0/fc_host/host0" + dev_loss_tmo = "16" + fabric_name = "0x100000051ea338b9" + issue_lip = + max_npiv_vports = "0" + node_name = "0x50014380242b9751" + npiv_vports_inuse = "0" + port_id = "0x960d0d" + port_name = "0x50014380242b9750" + port_state = "Online" + port_type = "NPort (fabric via point-to-point)" + speed = "8 Gbit" + supported_classes = "Class 3" + supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit" + symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k" + system_hostname = "" + tgtid_bind_type = "wwpn (World Wide Port Name)" + uevent = + vport_create = + vport_delete = + + Device = "host0" + Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.0/host0" + edc = + optrom_ctl = + reset = + uevent = "DEVTYPE=scsi_host" + + + Class Device = "host2" + Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\ +0000:21:00.1/host2/fc_host/host2" + dev_loss_tmo = "16" + fabric_name = "0x100000051ea33b79" + issue_lip = + max_npiv_vports = "0" + node_name = "0x50014380242b9753" + npiv_vports_inuse = "0" + port_id = "0x970e09" + port_name = "0x50014380242b9752" + port_state = "Online" + port_type = "NPort (fabric via point-to-point)" + speed = "8 Gbit" + supported_classes = "Class 3" + supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit" + symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k" + system_hostname = "" + tgtid_bind_type = "wwpn (World Wide Port Name)" + uevent = + vport_create = + vport_delete = + + Device = "host2" + Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.1/host2" + edc = + optrom_ctl = + reset = + uevent = "DEVTYPE=scsi_host" + + +""" diff --git a/brick/tests/initiator/test_linuxscsi.py b/brick/tests/initiator/test_linuxscsi.py new file mode 100644 index 000000000..ffc70a2a0 --- /dev/null +++ b/brick/tests/initiator/test_linuxscsi.py @@ -0,0 +1,226 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import string + +import mock + +from brick.initiator import linuxscsi +from brick.openstack.common import log as logging +from brick.tests import base + +LOG = logging.getLogger(__name__) + + +class LinuxSCSITestCase(base.TestCase): + def setUp(self): + super(LinuxSCSITestCase, self).setUp() + self.cmds = [] + realpath_mock = mock.Mock() + realpath_mock.return_value = '/dev/sdc' + os.path.realpath = realpath_mock + self.linuxscsi = linuxscsi.LinuxSCSI(None, execute=self.fake_execute) + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def test_echo_scsi_command(self): + self.linuxscsi.echo_scsi_command("/some/path", "1") + expected_commands = ['tee -a /some/path'] + self.assertEqual(expected_commands, self.cmds) + + def test_get_name_from_path(self): + device_name = "/dev/sdc" + realpath_mock = mock.Mock() + realpath_mock.return_value = device_name + os.path.realpath = realpath_mock + disk_path = ("/dev/disk/by-path/ip-10.10.220.253:3260-" + "iscsi-iqn.2000-05.com.3pardata:21810002ac00383d-lun-0") + name = self.linuxscsi.get_name_from_path(disk_path) + self.assertEqual(name, device_name) + realpath_mock = mock.Mock() + realpath_mock.return_value = "bogus" + os.path.realpath = realpath_mock + name = self.linuxscsi.get_name_from_path(disk_path) + self.assertIsNone(name) + + def test_remove_scsi_device(self): + exists_mock = mock.Mock() + exists_mock.return_value = False + os.path.exists = exists_mock + self.linuxscsi.remove_scsi_device("/dev/sdc") + expected_commands = [] + self.assertEqual(expected_commands, self.cmds) + exists_mock = mock.Mock() + exists_mock.return_value = True + os.path.exists = exists_mock + self.linuxscsi.remove_scsi_device("/dev/sdc") + expected_commands = [ + ('blockdev --flushbufs /dev/sdc'), + ('tee -a /sys/block/sdc/device/delete')] + self.assertEqual(expected_commands, self.cmds) + + def test_flush_multipath_device(self): + self.linuxscsi.flush_multipath_device('/dev/dm-9') + expected_commands = [('multipath -f /dev/dm-9')] + self.assertEqual(expected_commands, self.cmds) + + def test_flush_multipath_devices(self): + self.linuxscsi.flush_multipath_devices() + expected_commands = [('multipath -F')] + self.assertEqual(expected_commands, self.cmds) + + def test_remove_multipath_device(self): + def fake_find_multipath_device(device): + devices = [{'device': '/dev/sde', 'host': 0, + 'channel': 0, 'id': 0, 'lun': 1}, + {'device': '/dev/sdf', 'host': 2, + 'channel': 0, 'id': 0, 'lun': 1}, ] + + info = {"device": "dm-3", + "id": "350002ac20398383d", + "devices": devices} + return info + + exists_mock = mock.Mock() + exists_mock.return_value = True + os.path.exists = exists_mock + + self.linuxscsi.find_multipath_device = fake_find_multipath_device + + self.linuxscsi.remove_multipath_device('/dev/dm-3') + expected_commands = [ + ('blockdev --flushbufs /dev/sde'), + ('tee -a /sys/block/sde/device/delete'), + ('blockdev --flushbufs /dev/sdf'), + ('tee -a /sys/block/sdf/device/delete'), + ('multipath -f 350002ac20398383d'), ] + self.assertEqual(expected_commands, self.cmds) + + def test_find_multipath_device_3par(self): + def fake_execute(*cmd, **kwargs): + out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n" + "size=2.0G features='0' hwhandler='0' wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 0:0:0:1 sde 8:64 active undef running\n" + " `- 2:0:0:1 sdf 8:80 active undef running\n" + ) + return out, None + + self.linuxscsi._execute = fake_execute + + info = self.linuxscsi.find_multipath_device('/dev/sde') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-3", info["device"]) + self.assertEqual("/dev/sde", info['devices'][0]['device']) + self.assertEqual("0", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("1", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdf", info['devices'][1]['device']) + self.assertEqual("2", info['devices'][1]['host']) + self.assertEqual("0", info['devices'][1]['id']) + self.assertEqual("0", info['devices'][1]['channel']) + self.assertEqual("1", info['devices'][1]['lun']) + + def test_find_multipath_device_svc(self): + def fake_execute(*cmd, **kwargs): + out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n" + "size=954M features='1 queue_if_no_path' hwhandler='0'" + " wp=rw\n" + "|-+- policy='round-robin 0' prio=-1 status=active\n" + "| |- 6:0:2:0 sde 8:64 active undef running\n" + "| `- 6:0:4:0 sdg 8:96 active undef running\n" + "`-+- policy='round-robin 0' prio=-1 status=enabled\n" + " |- 6:0:3:0 sdf 8:80 active undef running\n" + " `- 6:0:5:0 sdh 8:112 active undef running\n" + ) + return out, None + + self.linuxscsi._execute = fake_execute + + info = self.linuxscsi.find_multipath_device('/dev/sde') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/sde", info['devices'][0]['device']) + self.assertEqual("6", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("2", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdf", info['devices'][2]['device']) + self.assertEqual("6", info['devices'][2]['host']) + self.assertEqual("0", info['devices'][2]['channel']) + self.assertEqual("3", info['devices'][2]['id']) + self.assertEqual("0", info['devices'][2]['lun']) + + def test_find_multipath_device_ds8000(self): + def fake_execute(*cmd, **kwargs): + out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n" + "size=1.0G features='1 queue_if_no_path' hwhandler='0'" + " wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 6:0:2:0 sdd 8:64 active undef running\n" + " `- 6:1:0:3 sdc 8:32 active undef running\n" + ) + return out, None + + self.linuxscsi._execute = fake_execute + + info = self.linuxscsi.find_multipath_device('/dev/sdd') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/sdd", info['devices'][0]['device']) + self.assertEqual("6", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("2", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdc", info['devices'][1]['device']) + self.assertEqual("6", info['devices'][1]['host']) + self.assertEqual("1", info['devices'][1]['channel']) + self.assertEqual("0", info['devices'][1]['id']) + self.assertEqual("3", info['devices'][1]['lun']) + + def test_find_multipath_device_with_error(self): + def fake_execute(*cmd, **kwargs): + out = ("Oct 13 10:24:01 | /lib/udev/scsi_id exitted with 1\n" + "36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n" + "size=1.0G features='1 queue_if_no_path' hwhandler='0'" + " wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 6:0:2:0 sdd 8:64 active undef running\n" + " `- 6:1:0:3 sdc 8:32 active undef running\n" + ) + return out, None + + self.linuxscsi._execute = fake_execute + + info = self.linuxscsi.find_multipath_device('/dev/sdd') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/sdd", info['devices'][0]['device']) + self.assertEqual("6", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("2", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdc", info['devices'][1]['device']) + self.assertEqual("6", info['devices'][1]['host']) + self.assertEqual("1", info['devices'][1]['channel']) + self.assertEqual("0", info['devices'][1]['id']) + self.assertEqual("3", info['devices'][1]['lun']) diff --git a/brick/tests/test_brick.py b/brick/tests/test_brick.py new file mode 100644 index 000000000..06d132be1 --- /dev/null +++ b/brick/tests/test_brick.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_brick +---------------------------------- + +Tests for `brick` module. +""" + +from brick.tests import base + + +class TestBrick(base.TestCase): + + def test_something(self): + pass diff --git a/brick/tests/test_exception.py b/brick/tests/test_exception.py new file mode 100644 index 000000000..6b5bcef84 --- /dev/null +++ b/brick/tests/test_exception.py @@ -0,0 +1,59 @@ + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from brick import exception +from brick.tests import base + + +class BrickExceptionTestCase(base.TestCase): + def test_default_error_msg(self): + class FakeBrickException(exception.BrickException): + message = "default message" + + exc = FakeBrickException() + self.assertEqual(unicode(exc), 'default message') + + def test_error_msg(self): + self.assertEqual(unicode(exception.BrickException('test')), 'test') + + def test_default_error_msg_with_kwargs(self): + class FakeBrickException(exception.BrickException): + message = "default message: %(code)s" + + exc = FakeBrickException(code=500) + self.assertEqual(unicode(exc), 'default message: 500') + + def test_error_msg_exception_with_kwargs(self): + class FakeBrickException(exception.BrickException): + message = "default message: %(mispelled_code)s" + + exc = FakeBrickException(code=500) + self.assertEqual(unicode(exc), 'default message: %(mispelled_code)s') + + def test_default_error_code(self): + class FakeBrickException(exception.BrickException): + code = 404 + + exc = FakeBrickException() + self.assertEqual(exc.kwargs['code'], 404) + + def test_error_code_from_kwarg(self): + class FakeBrickException(exception.BrickException): + code = 500 + + exc = FakeBrickException(code=404) + self.assertEqual(exc.kwargs['code'], 404) diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100755 index 000000000..b1f113ead --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path.insert(0, os.path.abspath('../..')) +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + #'sphinx.ext.intersphinx', + 'oslosphinx' +] + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'brick' +copyright = u'2013, OpenStack Foundation' + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' +# html_static_path = ['static'] + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', 'manual'), +] + +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst new file mode 100644 index 000000000..1728a61ca --- /dev/null +++ b/doc/source/contributing.rst @@ -0,0 +1,4 @@ +============ +Contributing +============ +.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 000000000..bfdeaeda5 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,50 @@ +Brick |release| Documenation +============================ + +Overview +-------- +**Brick** is a Python package containing classes that help +with volume discover and creation for local storage. + +:doc:`installation` + Instructions on how to get the distribution. + +:doc:`tutorial` + Start here for a quick overview. + +:doc:`api/index` + The complete API Documenation, organized by module. + + +Changes +------- +see the :doc:`changelog` for a full list of changes to **Brick**. + +About This Documentation +------------------------ +This documentation is generated using the `Sphinx +`_ documentation generator. The source files +for the documentation are located in the *doc/* directory of the +**Brick** distribution. To generate the docs locally run the +following command from the root directory of the **Brick** source. + +.. code-block:: bash + + $ python setup.py doc + + +.. toctree:: + :hidden: + + installation + tutorial + changelog + api/index + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/source/installation.rst b/doc/source/installation.rst new file mode 100644 index 000000000..43fde847e --- /dev/null +++ b/doc/source/installation.rst @@ -0,0 +1,12 @@ +============ +Installation +============ + +At the command line:: + + $ pip install brick + +Or, if you have virtualenvwrapper installed:: + + $ mkvirtualenv brick + $ pip install brick diff --git a/doc/source/readme.rst b/doc/source/readme.rst new file mode 100644 index 000000000..a6210d3d8 --- /dev/null +++ b/doc/source/readme.rst @@ -0,0 +1 @@ +.. include:: ../../README.rst diff --git a/doc/source/usage.rst b/doc/source/usage.rst new file mode 100644 index 000000000..1912dfd19 --- /dev/null +++ b/doc/source/usage.rst @@ -0,0 +1,7 @@ +======== +Usage +======== + +To use brick in a project:: + + import brick diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 000000000..a8f89d02d --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,10 @@ +[DEFAULT] + +# The list of modules to copy from oslo-incubator.git + +# The base module to hold the copy of openstack.common +base=brick + +module=log +module=log_handler +module=loopingcall diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..5a9d8c073 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pbr>=0.6,!=0.7,<1.0 +Babel>=1.3 +eventlet>=0.15.2 +oslo.concurrency>=0.3.0,!=0.4.0 +oslo.rootwrap>=1.3.0 +oslo.serialization>=1.2.0 +oslo.i18n>=1.0.0 +six>=1.7.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..5ef743d5a --- /dev/null +++ b/setup.cfg @@ -0,0 +1,56 @@ +[metadata] +name = brick +summary = OpenStack Cinder brick library for managing local volume attaches +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 2.6 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.3 + Programming Language :: Python :: 3.4 + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] +packages = + brick + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + +[build_sphinx] +source-dir = doc/source +build-dir = doc/build +all_files = 1 + +[upload_sphinx] +upload-dir = doc/build/html + +[compile_catalog] +directory = brick/locale +domain = brick + +[update_catalog] +domain = brick +output_dir = brick/locale +input_file = brick/locale/brick.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = brick/locale/brick.pot diff --git a/setup.py b/setup.py new file mode 100755 index 000000000..70c2b3f32 --- /dev/null +++ b/setup.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 000000000..273141733 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,14 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +hacking>=0.9.2,<0.10 +coverage>=3.6 +discover +python-subunit +sphinx>=1.1.2 +oslosphinx +oslotest>=1.1.0.0a1 +testrepository>=0.0.18 +testscenarios>=0.4 +testtools>=0.9.34 diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..056a8cfb5 --- /dev/null +++ b/tox.ini @@ -0,0 +1,62 @@ +[tox] +minversion = 1.6 +envlist = py27,pep8 +skipsdist = True + +[testenv] +usedevelop = True +install_command = pip install -U {opts} {packages} +setenv = + VIRTUAL_ENV={envdir} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = python setup.py testr --slowest --testr-args='{posargs}' +whitelist_externals = bash + +[tox:jenkins] +downloadcache = ~/cache/pip + +[testenv:pep8] +commands = flake8 + +[testenv:venv] +commands = {posargs} + +[testenv:cover] +commands = python setup.py testr --coverage --testr-args='{posargs}' + +[testenv:docs] +commands = python setup.py build_sphinx + +[flake8] +# H803 skipped on purpose per list discussion. +# E123, E125 skipped as they are invalid PEP-8. +# Following checks are ignored on purpose. +# +# E251 unexpected spaces around keyword / parameter equals +# reason: no improvement in readability +# +# E265 block comment should start with '# ' +# reason: no improvement in readability +# +# H402 one line docstring needs punctuation +# reason: removed in hacking (https://review.openstack.org/#/c/101497/) +# +# H803 git commit title should not end with period +# reason: removed in hacking (https://review.openstack.org/#/c/101498/) +# +# H904 wrap long lines in parentheses instead of a backslash +# reason: removed in hacking (https://review.openstack.org/#/c/101701/) +# +# Due to the upgrade to hacking 0.9.2 the following checking are +# ignored on purpose for the moment and should be re-enabled. + + +show-source = True +ignore = E123,E125,E251,E265,H302,H402,H405,H803,H904 +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build +max-complexity=30 + +[hacking] +import_exceptions = brick.i18n