Created the Brick library from Cinder
This is the external Brick library that came from Cinder's codebase. It's intended to be used as a standalone library and subproject of Cinder.
This commit is contained in:
commit
81b1dbebdf
7
.coveragerc
Normal file
7
.coveragerc
Normal file
@ -0,0 +1,7 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = brick
|
||||
omit = brick/tests/*,brick/openstack/*
|
||||
|
||||
[report]
|
||||
ignore-errors = True
|
53
.gitignore
vendored
Normal file
53
.gitignore
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Packages
|
||||
*.egg
|
||||
*.egg-info
|
||||
dist
|
||||
build
|
||||
eggs
|
||||
parts
|
||||
bin
|
||||
var
|
||||
sdist
|
||||
develop-eggs
|
||||
.installed.cfg
|
||||
lib
|
||||
lib64
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
.coverage
|
||||
.tox
|
||||
nosetests.xml
|
||||
.testrepository
|
||||
.venv
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
||||
# Mr Developer
|
||||
.mr.developer.cfg
|
||||
.project
|
||||
.pydevproject
|
||||
|
||||
# Complexity
|
||||
output/*.html
|
||||
output/*/index.html
|
||||
|
||||
# Sphinx
|
||||
doc/build
|
||||
|
||||
# pbr generates these
|
||||
AUTHORS
|
||||
ChangeLog
|
||||
|
||||
# Editors
|
||||
*~
|
||||
.*.swp
|
||||
.*sw?
|
4
.gitreview
Normal file
4
.gitreview
Normal file
@ -0,0 +1,4 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=openstack/brick.git
|
3
.mailmap
Normal file
3
.mailmap
Normal file
@ -0,0 +1,3 @@
|
||||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
# <preferred e-mail> <other e-mail 2>
|
7
.testr.conf
Normal file
7
.testr.conf
Normal file
@ -0,0 +1,7 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
16
CONTRIBUTING.rst
Normal file
16
CONTRIBUTING.rst
Normal file
@ -0,0 +1,16 @@
|
||||
If you would like to contribute to the development of OpenStack,
|
||||
you must follow the steps in this page:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
Once those steps have been completed, changes to OpenStack
|
||||
should be submitted for review via the Gerrit tool, following
|
||||
the workflow documented at:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
|
||||
https://bugs.launchpad.net/brick
|
4
HACKING.rst
Normal file
4
HACKING.rst
Normal file
@ -0,0 +1,4 @@
|
||||
brick Style Commandments
|
||||
===============================================
|
||||
|
||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
|
176
LICENSE
Normal file
176
LICENSE
Normal file
@ -0,0 +1,176 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
6
MANIFEST.in
Normal file
6
MANIFEST.in
Normal file
@ -0,0 +1,6 @@
|
||||
include AUTHORS
|
||||
include ChangeLog
|
||||
exclude .gitignore
|
||||
exclude .gitreview
|
||||
|
||||
global-exclude *.pyc
|
15
README.rst
Normal file
15
README.rst
Normal file
@ -0,0 +1,15 @@
|
||||
===============================
|
||||
brick
|
||||
===============================
|
||||
|
||||
OpenStack Cinder brick library for managing local volume attaches
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/brick
|
||||
* Source: http://git.openstack.org/cgit/openstack/brick
|
||||
* Bugs: http://bugs.launchpad.net/cinder
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* TODO
|
9
brick/README.txt
Normal file
9
brick/README.txt
Normal file
@ -0,0 +1,9 @@
|
||||
Brick is a new library that currently is maintained in Cinder for
|
||||
the Havana release. It will eventually be moved external to Cinder,
|
||||
possibly oslo, or pypi. Any defects found in Brick, should be submitted
|
||||
against Cinder and fixed there, then pulled into other projects that
|
||||
are using brick.
|
||||
|
||||
* Brick is used outside of Cinder and therefore
|
||||
cannot have any dependencies on Cinder and/or
|
||||
it's database.
|
0
brick/__init__.py
Normal file
0
brick/__init__.py
Normal file
120
brick/exception.py
Normal file
120
brick/exception.py
Normal file
@ -0,0 +1,120 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Exceptions for the Brick library."""
|
||||
|
||||
from brick.i18n import _
|
||||
from brick.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BrickException(Exception):
|
||||
"""Base Brick Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'msg_fmt' property. That msg_fmt will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = _("An unknown exception occurred.")
|
||||
code = 500
|
||||
headers = {}
|
||||
safe = False
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
if 'code' not in self.kwargs:
|
||||
try:
|
||||
self.kwargs['code'] = self.code
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not message:
|
||||
try:
|
||||
message = self.message % kwargs
|
||||
|
||||
except Exception:
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
msg = (_("Exception in string format operation. msg='%s'")
|
||||
% self.message)
|
||||
LOG.exception(msg)
|
||||
for name, value in kwargs.iteritems():
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
|
||||
# at least get the core message out if something happened
|
||||
message = self.message
|
||||
|
||||
# Put the message in 'msg' so that we can access it. If we have it in
|
||||
# message it will be overshadowed by the class' message attribute
|
||||
self.msg = message
|
||||
super(BrickException, self).__init__(message)
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(self.msg)
|
||||
|
||||
|
||||
class NotFound(BrickException):
|
||||
message = _("Resource could not be found.")
|
||||
code = 404
|
||||
safe = True
|
||||
|
||||
|
||||
class Invalid(BrickException):
|
||||
message = _("Unacceptable parameters.")
|
||||
code = 400
|
||||
|
||||
|
||||
# Cannot be templated as the error syntax varies.
|
||||
# msg needs to be constructed when raised.
|
||||
class InvalidParameterValue(Invalid):
|
||||
message = _("%(err)s")
|
||||
|
||||
|
||||
class NoFibreChannelHostsFound(BrickException):
|
||||
message = _("We are unable to locate any Fibre Channel devices.")
|
||||
|
||||
|
||||
class NoFibreChannelVolumeDeviceFound(BrickException):
|
||||
message = _("Unable to find a Fibre Channel volume device.")
|
||||
|
||||
|
||||
class VolumeDeviceNotFound(BrickException):
|
||||
message = _("Volume device not found at %(device)s.")
|
||||
|
||||
|
||||
class VolumeGroupNotFound(BrickException):
|
||||
message = _('Unable to find Volume Group: %(vg_name)s')
|
||||
|
||||
|
||||
class VolumeGroupCreationFailed(BrickException):
|
||||
message = _('Failed to create Volume Group: %(vg_name)s')
|
||||
|
||||
|
||||
class ISCSITargetCreateFailed(BrickException):
|
||||
message = _("Failed to create iscsi target for volume %(volume_id)s.")
|
||||
|
||||
|
||||
class ISCSITargetRemoveFailed(BrickException):
|
||||
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
|
||||
|
||||
|
||||
class ISCSITargetAttachFailed(BrickException):
|
||||
message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
|
||||
|
||||
|
||||
class ProtocolNotSupported(BrickException):
|
||||
message = _("Connect to volume via protocol %(protocol)s not supported.")
|
34
brick/executor.py
Normal file
34
brick/executor.py
Normal file
@ -0,0 +1,34 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic exec utility that allows us to set the
|
||||
execute and root_helper attributes for putils.
|
||||
Some projects need their own execute wrapper
|
||||
and root_helper settings, so this provides that hook.
|
||||
"""
|
||||
|
||||
from oslo_concurrency import processutils as putils
|
||||
|
||||
|
||||
class Executor(object):
|
||||
def __init__(self, root_helper, execute=putils.execute,
|
||||
*args, **kwargs):
|
||||
self.set_execute(execute)
|
||||
self.set_root_helper(root_helper)
|
||||
|
||||
def set_execute(self, execute):
|
||||
self._execute = execute
|
||||
|
||||
def set_root_helper(self, helper):
|
||||
self._root_helper = helper
|
38
brick/i18n.py
Normal file
38
brick/i18n.py
Normal file
@ -0,0 +1,38 @@
|
||||
# Copyright 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
|
||||
|
||||
"""
|
||||
|
||||
from oslo import i18n
|
||||
|
||||
DOMAIN = 'brick'
|
||||
|
||||
_translators = i18n.TranslatorFactory(domain=DOMAIN)
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
0
brick/initiator/__init__.py
Normal file
0
brick/initiator/__init__.py
Normal file
1050
brick/initiator/connector.py
Normal file
1050
brick/initiator/connector.py
Normal file
File diff suppressed because it is too large
Load Diff
30
brick/initiator/host_driver.py
Normal file
30
brick/initiator/host_driver.py
Normal file
@ -0,0 +1,30 @@
|
||||
# Copyright 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class HostDriver(object):
|
||||
|
||||
def get_all_block_devices(self):
|
||||
"""Get the list of all block devices seen in /dev/disk/by-path/."""
|
||||
files = []
|
||||
dir = "/dev/disk/by-path/"
|
||||
if os.path.isdir(dir):
|
||||
files = os.listdir(dir)
|
||||
devices = []
|
||||
for file in files:
|
||||
devices.append(dir + file)
|
||||
return devices
|
140
brick/initiator/linuxfc.py
Normal file
140
brick/initiator/linuxfc.py
Normal file
@ -0,0 +1,140 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic linux Fibre Channel utilities."""
|
||||
|
||||
import errno
|
||||
|
||||
from oslo_concurrency import processutils as putils
|
||||
|
||||
from brick.i18n import _LW
|
||||
from brick.initiator import linuxscsi
|
||||
from brick.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LinuxFibreChannel(linuxscsi.LinuxSCSI):
|
||||
def __init__(self, root_helper, execute=putils.execute,
|
||||
*args, **kwargs):
|
||||
super(LinuxFibreChannel, self).__init__(root_helper, execute,
|
||||
*args, **kwargs)
|
||||
|
||||
def rescan_hosts(self, hbas):
|
||||
for hba in hbas:
|
||||
self.echo_scsi_command("/sys/class/scsi_host/%s/scan"
|
||||
% hba['host_device'], "- - -")
|
||||
|
||||
def get_fc_hbas(self):
|
||||
"""Get the Fibre Channel HBA information."""
|
||||
out = None
|
||||
try:
|
||||
out, _err = self._execute('systool', '-c', 'fc_host', '-v',
|
||||
run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
except putils.ProcessExecutionError as exc:
|
||||
# This handles the case where rootwrap is used
|
||||
# and systool is not installed
|
||||
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
|
||||
if exc.exit_code == 96:
|
||||
LOG.warn(_LW("systool is not installed"))
|
||||
return []
|
||||
except OSError as exc:
|
||||
# This handles the case where rootwrap is NOT used
|
||||
# and systool is not installed
|
||||
if exc.errno == errno.ENOENT:
|
||||
LOG.warn(_LW("systool is not installed"))
|
||||
return []
|
||||
|
||||
# No FC HBAs were found
|
||||
if out is None:
|
||||
return []
|
||||
|
||||
lines = out.split('\n')
|
||||
# ignore the first 2 lines
|
||||
lines = lines[2:]
|
||||
hbas = []
|
||||
hba = {}
|
||||
lastline = None
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
# 2 newlines denotes a new hba port
|
||||
if line == '' and lastline == '':
|
||||
if len(hba) > 0:
|
||||
hbas.append(hba)
|
||||
hba = {}
|
||||
else:
|
||||
val = line.split('=')
|
||||
if len(val) == 2:
|
||||
key = val[0].strip().replace(" ", "")
|
||||
value = val[1].strip()
|
||||
hba[key] = value.replace('"', '')
|
||||
lastline = line
|
||||
|
||||
return hbas
|
||||
|
||||
def get_fc_hbas_info(self):
|
||||
"""Get Fibre Channel WWNs and device paths from the system, if any."""
|
||||
|
||||
# Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys
|
||||
# and are obtainable via the systool app
|
||||
hbas = self.get_fc_hbas()
|
||||
if not hbas:
|
||||
return []
|
||||
|
||||
hbas_info = []
|
||||
for hba in hbas:
|
||||
wwpn = hba['port_name'].replace('0x', '')
|
||||
wwnn = hba['node_name'].replace('0x', '')
|
||||
device_path = hba['ClassDevicepath']
|
||||
device = hba['ClassDevice']
|
||||
hbas_info.append({'port_name': wwpn,
|
||||
'node_name': wwnn,
|
||||
'host_device': device,
|
||||
'device_path': device_path})
|
||||
return hbas_info
|
||||
|
||||
def get_fc_wwpns(self):
|
||||
"""Get Fibre Channel WWPNs from the system, if any."""
|
||||
|
||||
# Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys
|
||||
# and are obtainable via the systool app
|
||||
hbas = self.get_fc_hbas()
|
||||
|
||||
wwpns = []
|
||||
if hbas:
|
||||
for hba in hbas:
|
||||
if hba['port_state'] == 'Online':
|
||||
wwpn = hba['port_name'].replace('0x', '')
|
||||
wwpns.append(wwpn)
|
||||
|
||||
return wwpns
|
||||
|
||||
def get_fc_wwnns(self):
|
||||
"""Get Fibre Channel WWNNs from the system, if any."""
|
||||
|
||||
# Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys
|
||||
# and are obtainable via the systool app
|
||||
hbas = self.get_fc_hbas()
|
||||
if not hbas:
|
||||
return []
|
||||
|
||||
wwnns = []
|
||||
if hbas:
|
||||
for hba in hbas:
|
||||
if hba['port_state'] == 'Online':
|
||||
wwnn = hba['node_name'].replace('0x', '')
|
||||
wwnns.append(wwnn)
|
||||
|
||||
return wwnns
|
193
brick/initiator/linuxscsi.py
Normal file
193
brick/initiator/linuxscsi.py
Normal file
@ -0,0 +1,193 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic linux scsi subsystem and Multipath utilities.
|
||||
|
||||
Note, this is not iSCSI.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from oslo_concurrency import processutils as putils
|
||||
|
||||
from brick import executor
|
||||
from brick.i18n import _, _LW
|
||||
from brick.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
|
||||
|
||||
|
||||
class LinuxSCSI(executor.Executor):
|
||||
def __init__(self, root_helper, execute=putils.execute,
|
||||
*args, **kwargs):
|
||||
super(LinuxSCSI, self).__init__(root_helper, execute,
|
||||
*args, **kwargs)
|
||||
|
||||
def echo_scsi_command(self, path, content):
|
||||
"""Used to echo strings to scsi subsystem."""
|
||||
|
||||
args = ["-a", path]
|
||||
kwargs = dict(process_input=content,
|
||||
run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
self._execute('tee', *args, **kwargs)
|
||||
|
||||
def get_name_from_path(self, path):
|
||||
"""Translates /dev/disk/by-path/ entry to /dev/sdX."""
|
||||
|
||||
name = os.path.realpath(path)
|
||||
if name.startswith("/dev/"):
|
||||
return name
|
||||
else:
|
||||
return None
|
||||
|
||||
def remove_scsi_device(self, device):
|
||||
"""Removes a scsi device based upon /dev/sdX name."""
|
||||
|
||||
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
|
||||
if os.path.exists(path):
|
||||
# flush any outstanding IO first
|
||||
self.flush_device_io(device)
|
||||
|
||||
LOG.debug("Remove SCSI device(%s) with %s" % (device, path))
|
||||
self.echo_scsi_command(path, "1")
|
||||
|
||||
def get_device_info(self, device):
|
||||
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
dev_info = {'device': device, 'host': None,
|
||||
'channel': None, 'id': None, 'lun': None}
|
||||
if out:
|
||||
line = out.strip()
|
||||
line = line.replace(device + ": ", "")
|
||||
info = line.split(" ")
|
||||
|
||||
for item in info:
|
||||
if '=' in item:
|
||||
pair = item.split('=')
|
||||
dev_info[pair[0]] = pair[1]
|
||||
elif 'scsi' in item:
|
||||
dev_info['host'] = item.replace('scsi', '')
|
||||
|
||||
return dev_info
|
||||
|
||||
def remove_multipath_device(self, multipath_name):
|
||||
"""This removes LUNs associated with a multipath device
|
||||
and the multipath device itself.
|
||||
"""
|
||||
|
||||
LOG.debug("remove multipath device %s" % multipath_name)
|
||||
mpath_dev = self.find_multipath_device(multipath_name)
|
||||
if mpath_dev:
|
||||
devices = mpath_dev['devices']
|
||||
LOG.debug("multipath LUNs to remove %s" % devices)
|
||||
for device in devices:
|
||||
self.remove_scsi_device(device['device'])
|
||||
self.flush_multipath_device(mpath_dev['id'])
|
||||
|
||||
def flush_device_io(self, device):
|
||||
"""This is used to flush any remaining IO in the buffers."""
|
||||
try:
|
||||
LOG.debug("Flushing IO for device %s" % device)
|
||||
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
except putils.ProcessExecutionError as exc:
|
||||
msg = _("Failed to flush IO buffers prior to removing"
|
||||
" device: (%(code)s)") % {'code': exc.exit_code}
|
||||
LOG.warn(msg)
|
||||
|
||||
def flush_multipath_device(self, device):
|
||||
try:
|
||||
LOG.debug("Flush multipath device %s" % device)
|
||||
self._execute('multipath', '-f', device, run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
except putils.ProcessExecutionError as exc:
|
||||
LOG.warn(_LW("multipath call failed exit (%(code)s)")
|
||||
% {'code': exc.exit_code})
|
||||
|
||||
def flush_multipath_devices(self):
|
||||
try:
|
||||
self._execute('multipath', '-F', run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
except putils.ProcessExecutionError as exc:
|
||||
LOG.warn(_LW("multipath call failed exit (%(code)s)")
|
||||
% {'code': exc.exit_code})
|
||||
|
||||
def find_multipath_device(self, device):
|
||||
"""Find a multipath device associated with a LUN device name.
|
||||
|
||||
device can be either a /dev/sdX entry or a multipath id.
|
||||
"""
|
||||
|
||||
mdev = None
|
||||
devices = []
|
||||
out = None
|
||||
try:
|
||||
(out, _err) = self._execute('multipath', '-l', device,
|
||||
run_as_root=True,
|
||||
root_helper=self._root_helper)
|
||||
except putils.ProcessExecutionError as exc:
|
||||
LOG.warn(_LW("multipath call failed exit (%(code)s)")
|
||||
% {'code': exc.exit_code})
|
||||
return None
|
||||
|
||||
if out:
|
||||
lines = out.strip()
|
||||
lines = lines.split("\n")
|
||||
lines = [line for line in lines
|
||||
if not re.match(MULTIPATH_ERROR_REGEX, line)]
|
||||
if lines:
|
||||
line = lines[0]
|
||||
info = line.split(" ")
|
||||
# device line output is different depending
|
||||
# on /etc/multipath.conf settings.
|
||||
if info[1][:2] == "dm":
|
||||
mdev = "/dev/%s" % info[1]
|
||||
mdev_id = info[0]
|
||||
elif info[2][:2] == "dm":
|
||||
mdev = "/dev/%s" % info[2]
|
||||
mdev_id = info[1].replace('(', '')
|
||||
mdev_id = mdev_id.replace(')', '')
|
||||
|
||||
if mdev is None:
|
||||
LOG.warn(_LW("Couldn't find multipath device %(line)s")
|
||||
% {'line': line})
|
||||
return None
|
||||
|
||||
LOG.debug("Found multipath device = %(mdev)s"
|
||||
% {'mdev': mdev})
|
||||
device_lines = lines[3:]
|
||||
for dev_line in device_lines:
|
||||
if dev_line.find("policy") != -1:
|
||||
continue
|
||||
|
||||
dev_line = dev_line.lstrip(' |-`')
|
||||
dev_info = dev_line.split()
|
||||
address = dev_info[0].split(":")
|
||||
|
||||
dev = {'device': '/dev/%s' % dev_info[1],
|
||||
'host': address[0], 'channel': address[1],
|
||||
'id': address[2], 'lun': address[3]
|
||||
}
|
||||
|
||||
devices.append(dev)
|
||||
|
||||
if mdev is not None:
|
||||
info = {"device": mdev,
|
||||
"id": mdev_id,
|
||||
"devices": devices}
|
||||
return info
|
||||
return None
|
0
brick/openstack/__init__.py
Normal file
0
brick/openstack/__init__.py
Normal file
0
brick/openstack/common/__init__.py
Normal file
0
brick/openstack/common/__init__.py
Normal file
45
brick/openstack/common/_i18n.py
Normal file
45
brick/openstack/common/_i18n.py
Normal file
@ -0,0 +1,45 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See http://docs.openstack.org/developer/oslo.i18n/usage.html
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
import oslo.i18n
|
||||
|
||||
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
|
||||
# application name when this module is synced into the separate
|
||||
# repository. It is OK to have more than one translation function
|
||||
# using the same domain, since there will still only be one message
|
||||
# catalog.
|
||||
_translators = oslo.i18n.TranslatorFactory(domain='brick')
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
||||
except ImportError:
|
||||
# NOTE(dims): Support for cases where a project wants to use
|
||||
# code from oslo-incubator, but is not ready to be internationalized
|
||||
# (like tempest)
|
||||
_ = _LI = _LW = _LE = _LC = lambda x: x
|
45
brick/openstack/common/local.py
Normal file
45
brick/openstack/common/local.py
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Local storage of variables using weak references"""
|
||||
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
|
||||
class WeakLocal(threading.local):
|
||||
def __getattribute__(self, attr):
|
||||
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||
if rval:
|
||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||
# reference, not the value itself. We therefore need to lookup
|
||||
# the weak reference and return the inner value here.
|
||||
rval = rval()
|
||||
return rval
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
value = weakref.ref(value)
|
||||
return super(WeakLocal, self).__setattr__(attr, value)
|
||||
|
||||
|
||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
strong_store = threading.local()
|
718
brick/openstack/common/log.py
Normal file
718
brick/openstack/common/log.py
Normal file
@ -0,0 +1,718 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""OpenStack logging handler.
|
||||
|
||||
This module adds to logging functionality by adding the option to specify
|
||||
a context object when calling the various log methods. If the context object
|
||||
is not specified, default formatting is used. Additionally, an instance uuid
|
||||
may be passed as part of the log message, which is intended to make it easier
|
||||
for admins to find messages related to a specific instance.
|
||||
|
||||
It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
|
||||
import copy
|
||||
import inspect
|
||||
import itertools
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo.utils import importutils
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
_PY26 = sys.version_info[0:2] == (2, 6)
|
||||
|
||||
from brick.openstack.common._i18n import _
|
||||
from brick.openstack.common import local
|
||||
|
||||
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config-append',
|
||||
metavar='PATH',
|
||||
deprecated_name='log-config',
|
||||
help='The name of a logging configuration file. This file '
|
||||
'is appended to any existing logging configuration '
|
||||
'files. For details about logging configuration files, '
|
||||
'see the Python logging module documentation.'),
|
||||
cfg.StrOpt('log-format',
|
||||
metavar='FORMAT',
|
||||
help='DEPRECATED. '
|
||||
'A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'This option is deprecated. Please use '
|
||||
'logging_context_format_string and '
|
||||
'logging_default_format_string instead.'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s .'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If no default is set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The base directory used for relative '
|
||||
'--log-file paths.'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging. '
|
||||
'Existing syslog format is DEPRECATED during I, '
|
||||
'and will change in J to honor RFC5424.'),
|
||||
cfg.BoolOpt('use-syslog-rfc-format',
|
||||
# TODO(bogdando) remove or use True after existing
|
||||
# syslog format deprecation in J
|
||||
default=False,
|
||||
help='(Optional) Enables or disables syslog rfc5424 format '
|
||||
'for logging. If enabled, prefixes the MSG part of the '
|
||||
'syslog message with APP-NAME (RFC5424). The '
|
||||
'format without the APP-NAME is deprecated in I, '
|
||||
'and will be removed in J.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='Syslog facility to receive log lines.')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error.')
|
||||
]
|
||||
|
||||
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
|
||||
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
|
||||
'oslo.messaging=INFO', 'iso8601=WARN',
|
||||
'requests.packages.urllib3.connectionpool=WARN',
|
||||
'urllib3.connectionpool=WARN', 'websocket=WARN',
|
||||
"keystonemiddleware=WARN", "routes.middleware=WARN",
|
||||
"stevedore=WARN"]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||
'%(instance)s%(message)s',
|
||||
help='Format string to use for log messages with context.'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='Format string to use for log messages without context.'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='Data to append to log format when level is DEBUG.'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='Prefix each line of exception output with this format.'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
default=DEFAULT_LOG_LEVELS,
|
||||
help='List of logger=LEVEL pairs.'),
|
||||
cfg.BoolOpt('publish_errors',
|
||||
default=False,
|
||||
help='Enables or disables publication of error events.'),
|
||||
cfg.BoolOpt('fatal_deprecations',
|
||||
default=False,
|
||||
help='Enables or disables fatal status of deprecations.'),
|
||||
|
||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||
# a full instance (and could include more information), and other times we
|
||||
# are just handed a UUID for the instance.
|
||||
cfg.StrOpt('instance_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='The format for an instance that is passed with the log '
|
||||
'message.'),
|
||||
cfg.StrOpt('instance_uuid_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='The format for an instance UUID that is passed with the '
|
||||
'log message.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo.config-generator."""
|
||||
return [(None, copy.deepcopy(common_cli_opts)),
|
||||
(None, copy.deepcopy(logging_cli_opts)),
|
||||
(None, copy.deepcopy(generic_log_opts)),
|
||||
(None, copy.deepcopy(log_opts)),
|
||||
]
|
||||
|
||||
|
||||
# our new audit level
|
||||
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||
# module aware of it so it acts like other levels.
|
||||
logging.AUDIT = logging.INFO + 1
|
||||
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||
|
||||
|
||||
try:
|
||||
NullHandler = logging.NullHandler
|
||||
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||
class NullHandler(logging.Handler):
|
||||
def handle(self, record):
|
||||
pass
|
||||
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
def createLock(self):
|
||||
self.lock = None
|
||||
|
||||
|
||||
def _dictify_context(context):
|
||||
if context is None:
|
||||
return None
|
||||
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||
context = context.to_dict()
|
||||
return context
|
||||
|
||||
|
||||
def _get_binary_name():
|
||||
return os.path.basename(inspect.stack()[-1][1])
|
||||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
|
||||
if logfile and logdir:
|
||||
return os.path.join(logdir, logfile)
|
||||
|
||||
if logdir:
|
||||
binary = binary or _get_binary_name()
|
||||
return '%s.log' % (os.path.join(logdir, binary),)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
|
||||
def isEnabledFor(self, level):
|
||||
if _PY26:
|
||||
# This method was added in python 2.7 (and it does the exact
|
||||
# same logic, so we need to do the exact same logic so that
|
||||
# python 2.6 has this capability as well).
|
||||
return self.logger.isEnabledFor(level)
|
||||
else:
|
||||
return super(BaseLoggerAdapter, self).isEnabledFor(level)
|
||||
|
||||
|
||||
class LazyAdapter(BaseLoggerAdapter):
|
||||
def __init__(self, name='unknown', version='unknown'):
|
||||
self._logger = None
|
||||
self.extra = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if not self._logger:
|
||||
self._logger = getLogger(self.name, self.version)
|
||||
if six.PY3:
|
||||
# In Python 3, the code fails because the 'manager' attribute
|
||||
# cannot be found when using a LoggerAdapter as the
|
||||
# underlying logger. Work around this issue.
|
||||
self._logger.manager = self._logger.logger.manager
|
||||
return self._logger
|
||||
|
||||
|
||||
class ContextAdapter(BaseLoggerAdapter):
|
||||
warn = logging.LoggerAdapter.warning
|
||||
|
||||
def __init__(self, logger, project_name, version_string):
|
||||
self.logger = logger
|
||||
self.project = project_name
|
||||
self.version = version_string
|
||||
self._deprecated_messages_sent = dict()
|
||||
|
||||
@property
|
||||
def handlers(self):
|
||||
return self.logger.handlers
|
||||
|
||||
def deprecated(self, msg, *args, **kwargs):
|
||||
"""Call this method when a deprecated feature is used.
|
||||
|
||||
If the system is configured for fatal deprecations then the message
|
||||
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||
be raised.
|
||||
|
||||
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||
|
||||
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||
fatal deprecations.
|
||||
|
||||
"""
|
||||
stdmsg = _("Deprecated: %s") % msg
|
||||
if CONF.fatal_deprecations:
|
||||
self.critical(stdmsg, *args, **kwargs)
|
||||
raise DeprecatedConfig(msg=stdmsg)
|
||||
|
||||
# Using a list because a tuple with dict can't be stored in a set.
|
||||
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||
|
||||
if args in sent_args:
|
||||
# Already logged this message, so don't log it again.
|
||||
return
|
||||
|
||||
sent_args.append(args)
|
||||
self.warn(stdmsg, *args, **kwargs)
|
||||
|
||||
def process(self, msg, kwargs):
|
||||
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
|
||||
# before it can get to the python logging and
|
||||
# possibly cause string encoding trouble
|
||||
if not isinstance(msg, six.text_type):
|
||||
msg = six.text_type(msg)
|
||||
|
||||
if 'extra' not in kwargs:
|
||||
kwargs['extra'] = {}
|
||||
extra = kwargs['extra']
|
||||
|
||||
context = kwargs.pop('context', None)
|
||||
if not context:
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
extra.update(_dictify_context(context))
|
||||
|
||||
instance = kwargs.pop('instance', None)
|
||||
instance_uuid = (extra.get('instance_uuid') or
|
||||
kwargs.pop('instance_uuid', None))
|
||||
instance_extra = ''
|
||||
if instance:
|
||||
instance_extra = CONF.instance_format % instance
|
||||
elif instance_uuid:
|
||||
instance_extra = (CONF.instance_uuid_format
|
||||
% {'uuid': instance_uuid})
|
||||
extra['instance'] = instance_extra
|
||||
|
||||
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||
|
||||
extra['project'] = self.project
|
||||
extra['version'] = self.version
|
||||
extra['extra'] = extra.copy()
|
||||
return msg, kwargs
|
||||
|
||||
|
||||
class JSONFormatter(logging.Formatter):
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||
# since logging.config.fileConfig passes it.
|
||||
self.datefmt = datefmt
|
||||
|
||||
def formatException(self, ei, strip_newlines=True):
|
||||
lines = traceback.format_exception(*ei)
|
||||
if strip_newlines:
|
||||
lines = [moves.filter(
|
||||
lambda x: x,
|
||||
line.rstrip().splitlines()) for line in lines]
|
||||
lines = list(itertools.chain(*lines))
|
||||
return lines
|
||||
|
||||
def format(self, record):
|
||||
message = {'message': record.getMessage(),
|
||||
'asctime': self.formatTime(record, self.datefmt),
|
||||
'name': record.name,
|
||||
'msg': record.msg,
|
||||
'args': record.args,
|
||||
'levelname': record.levelname,
|
||||
'levelno': record.levelno,
|
||||
'pathname': record.pathname,
|
||||
'filename': record.filename,
|
||||
'module': record.module,
|
||||
'lineno': record.lineno,
|
||||
'funcname': record.funcName,
|
||||
'created': record.created,
|
||||
'msecs': record.msecs,
|
||||
'relative_created': record.relativeCreated,
|
||||
'thread': record.thread,
|
||||
'thread_name': record.threadName,
|
||||
'process_name': record.processName,
|
||||
'process': record.process,
|
||||
'traceback': None}
|
||||
|
||||
if hasattr(record, 'extra'):
|
||||
message['extra'] = record.extra
|
||||
|
||||
if record.exc_info:
|
||||
message['traceback'] = self.formatException(record.exc_info)
|
||||
|
||||
return jsonutils.dumps(message)
|
||||
|
||||
|
||||
def _create_logging_excepthook(product_name):
|
||||
def logging_excepthook(exc_type, value, tb):
|
||||
extra = {'exc_info': (exc_type, value, tb)}
|
||||
getLogger(product_name).critical(
|
||||
"".join(traceback.format_exception_only(exc_type, value)),
|
||||
**extra)
|
||||
return logging_excepthook
|
||||
|
||||
|
||||
class LogConfigError(Exception):
|
||||
|
||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||
|
||||
def __init__(self, log_config, err_msg):
|
||||
self.log_config = log_config
|
||||
self.err_msg = err_msg
|
||||
|
||||
def __str__(self):
|
||||
return self.message % dict(log_config=self.log_config,
|
||||
err_msg=self.err_msg)
|
||||
|
||||
|
||||
def _load_log_config(log_config_append):
|
||||
try:
|
||||
logging.config.fileConfig(log_config_append,
|
||||
disable_existing_loggers=False)
|
||||
except (moves.configparser.Error, KeyError) as exc:
|
||||
raise LogConfigError(log_config_append, six.text_type(exc))
|
||||
|
||||
|
||||
def setup(product_name, version='unknown'):
|
||||
"""Setup logging."""
|
||||
if CONF.log_config_append:
|
||||
_load_log_config(CONF.log_config_append)
|
||||
else:
|
||||
_setup_logging_from_conf(product_name, version)
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
|
||||
def set_defaults(logging_context_format_string=None,
|
||||
default_log_levels=None):
|
||||
# Just in case the caller is not setting the
|
||||
# default_log_level. This is insurance because
|
||||
# we introduced the default_log_level parameter
|
||||
# later in a backwards in-compatible change
|
||||
if default_log_levels is not None:
|
||||
cfg.set_defaults(
|
||||
log_opts,
|
||||
default_log_levels=default_log_levels)
|
||||
if logging_context_format_string is not None:
|
||||
cfg.set_defaults(
|
||||
log_opts,
|
||||
logging_context_format_string=logging_context_format_string)
|
||||
|
||||
|
||||
def _find_facility_from_conf():
|
||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||
facility = getattr(logging.handlers.SysLogHandler,
|
||||
CONF.syslog_log_facility,
|
||||
None)
|
||||
|
||||
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||
facility = facility_names.get(CONF.syslog_log_facility)
|
||||
|
||||
if facility is None:
|
||||
valid_facilities = facility_names.keys()
|
||||
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||
valid_facilities.extend(consts)
|
||||
raise TypeError(_('syslog facility must be one of: %s') %
|
||||
', '.join("'%s'" % fac
|
||||
for fac in valid_facilities))
|
||||
|
||||
return facility
|
||||
|
||||
|
||||
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.binary_name = _get_binary_name()
|
||||
# Do not use super() unless type(logging.handlers.SysLogHandler)
|
||||
# is 'type' (Python 2.7).
|
||||
# Use old style calls, if the type is 'classobj' (Python 2.6)
|
||||
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
# Do not use super() unless type(logging.handlers.SysLogHandler)
|
||||
# is 'type' (Python 2.7).
|
||||
# Use old style calls, if the type is 'classobj' (Python 2.6)
|
||||
msg = logging.handlers.SysLogHandler.format(self, record)
|
||||
msg = self.binary_name + ' ' + msg
|
||||
return msg
|
||||
|
||||
|
||||
def _setup_logging_from_conf(project, version):
|
||||
log_root = getLogger(None).logger
|
||||
for handler in log_root.handlers:
|
||||
log_root.removeHandler(handler)
|
||||
|
||||
logpath = _get_log_file_path()
|
||||
if logpath:
|
||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||
log_root.addHandler(filelog)
|
||||
|
||||
if CONF.use_stderr:
|
||||
streamlog = ColorHandler()
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
elif not logpath:
|
||||
# pass sys.stdout as a positional argument
|
||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||
streamlog = logging.StreamHandler(sys.stdout)
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
if CONF.publish_errors:
|
||||
handler = importutils.import_object(
|
||||
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
|
||||
logging.ERROR)
|
||||
log_root.addHandler(handler)
|
||||
|
||||
datefmt = CONF.log_date_format
|
||||
for handler in log_root.handlers:
|
||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||
# should be deprecated in favor of context aware formatting.
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||
'be removed in the next release')
|
||||
else:
|
||||
handler.setFormatter(ContextFormatter(project=project,
|
||||
version=version,
|
||||
datefmt=datefmt))
|
||||
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
for pair in CONF.default_log_levels:
|
||||
mod, _sep, level_name = pair.partition('=')
|
||||
logger = logging.getLogger(mod)
|
||||
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
|
||||
# to integer code.
|
||||
if sys.version_info < (2, 7):
|
||||
level = logging.getLevelName(level_name)
|
||||
logger.setLevel(level)
|
||||
else:
|
||||
logger.setLevel(level_name)
|
||||
|
||||
if CONF.use_syslog:
|
||||
try:
|
||||
facility = _find_facility_from_conf()
|
||||
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||
# after existing syslog format deprecation in J
|
||||
if CONF.use_syslog_rfc_format:
|
||||
syslog = RFCSysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
else:
|
||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
log_root.addHandler(syslog)
|
||||
except socket.error:
|
||||
log_root.error('Unable to add syslog handler. Verify that syslog '
|
||||
'is running.')
|
||||
|
||||
|
||||
_loggers = {}
|
||||
|
||||
|
||||
def getLogger(name='unknown', version='unknown'):
|
||||
if name not in _loggers:
|
||||
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||
name,
|
||||
version)
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def getLazyLogger(name='unknown', version='unknown'):
|
||||
"""Returns lazy logger.
|
||||
|
||||
Creates a pass-through logger that does not create the real logger
|
||||
until it is really needed and delegates all calls to the real logger
|
||||
once it is created.
|
||||
"""
|
||||
return LazyAdapter(name, version)
|
||||
|
||||
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
def __init__(self, logger, level=logging.INFO):
|
||||
self.logger = logger
|
||||
self.level = level
|
||||
|
||||
def write(self, msg):
|
||||
self.logger.log(self.level, msg.rstrip())
|
||||
|
||||
|
||||
class ContextFormatter(logging.Formatter):
|
||||
"""A context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_format_string
|
||||
and logging_default_format_string. You can also specify
|
||||
logging_debug_format_suffix to append extra formatting if the log level is
|
||||
debug.
|
||||
|
||||
For information about what variables are available for the formatter see:
|
||||
http://docs.python.org/library/logging.html#formatter
|
||||
|
||||
If available, uses the context value stored in TLS - local.store.context
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize ContextFormatter instance
|
||||
|
||||
Takes additional keyword arguments which can be used in the message
|
||||
format string.
|
||||
|
||||
:keyword project: project name
|
||||
:type project: string
|
||||
:keyword version: project version
|
||||
:type version: string
|
||||
|
||||
"""
|
||||
|
||||
self.project = kwargs.pop('project', 'unknown')
|
||||
self.version = kwargs.pop('version', 'unknown')
|
||||
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
"""Uses contextstring if request_id is set, otherwise default."""
|
||||
|
||||
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
|
||||
# before it can get to the python logging and
|
||||
# possibly cause string encoding trouble
|
||||
if not isinstance(record.msg, six.text_type):
|
||||
record.msg = six.text_type(record.msg)
|
||||
|
||||
# store project info
|
||||
record.project = self.project
|
||||
record.version = self.version
|
||||
|
||||
# store request info
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
d = _dictify_context(context)
|
||||
for k, v in d.items():
|
||||
setattr(record, k, v)
|
||||
|
||||
# NOTE(sdague): default the fancier formatting params
|
||||
# to an empty string so we don't throw an exception if
|
||||
# they get used
|
||||
for key in ('instance', 'color', 'user_identity'):
|
||||
if key not in record.__dict__:
|
||||
record.__dict__[key] = ''
|
||||
|
||||
if record.__dict__.get('request_id'):
|
||||
fmt = CONF.logging_context_format_string
|
||||
else:
|
||||
fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
if sys.version_info < (3, 2):
|
||||
self._fmt = fmt
|
||||
else:
|
||||
self._style = logging.PercentStyle(fmt)
|
||||
self._fmt = self._style._fmt
|
||||
# Cache this on the record, Logger will respect our formatted copy
|
||||
if record.exc_info:
|
||||
record.exc_text = self.formatException(record.exc_info, record)
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
def formatException(self, exc_info, record=None):
|
||||
"""Format exception output with CONF.logging_exception_prefix."""
|
||||
if not record:
|
||||
return logging.Formatter.formatException(self, exc_info)
|
||||
|
||||
stringbuffer = moves.StringIO()
|
||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||
None, stringbuffer)
|
||||
lines = stringbuffer.getvalue().split('\n')
|
||||
stringbuffer.close()
|
||||
|
||||
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
formatted_lines = []
|
||||
for line in lines:
|
||||
pl = CONF.logging_exception_prefix % record.__dict__
|
||||
fl = '%s%s' % (pl, line)
|
||||
formatted_lines.append(fl)
|
||||
return '\n'.join(formatted_lines)
|
||||
|
||||
|
||||
class ColorHandler(logging.StreamHandler):
|
||||
LEVEL_COLORS = {
|
||||
logging.DEBUG: '\033[00;32m', # GREEN
|
||||
logging.INFO: '\033[00;36m', # CYAN
|
||||
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
record.color = self.LEVEL_COLORS[record.levelno]
|
||||
return logging.StreamHandler.format(self, record)
|
||||
|
||||
|
||||
class DeprecatedConfig(Exception):
|
||||
message = _("Fatal call to deprecated config: %(msg)s")
|
||||
|
||||
def __init__(self, msg):
|
||||
super(Exception, self).__init__(self.message % dict(msg=msg))
|
147
brick/openstack/common/loopingcall.py
Normal file
147
brick/openstack/common/loopingcall.py
Normal file
@ -0,0 +1,147 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from brick.openstack.common._i18n import _LE, _LW
|
||||
from brick.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
|
||||
# with time.time() called in the standard logging module
|
||||
# during unittests.
|
||||
_ts = lambda: time.time()
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCallBase.
|
||||
|
||||
The poll-function passed to LoopingCallBase can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCallBase.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCallBase.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = _ts()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = _ts()
|
||||
if not self._running:
|
||||
break
|
||||
delay = end - start - interval
|
||||
if delay > 0:
|
||||
LOG.warn(_LW('task %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec'),
|
||||
{'func_name': self.f, 'delay': delay})
|
||||
greenthread.sleep(-delay if delay < 0 else 0)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_LE('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug('Dynamic looping call %(func_name)r sleeping '
|
||||
'for %(idle).02f seconds',
|
||||
{'func_name': self.f, 'idle': idle})
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_LE('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
0
brick/remotefs/__init__.py
Normal file
0
brick/remotefs/__init__.py
Normal file
174
brick/remotefs/remotefs.py
Normal file
174
brick/remotefs/remotefs.py
Normal file
@ -0,0 +1,174 @@
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Remote filesystem client utilities."""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
|
||||
from oslo_concurrency import processutils as putils
|
||||
import six
|
||||
|
||||
from brick import exception
|
||||
from brick.i18n import _, _LI
|
||||
from brick.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RemoteFsClient(object):
|
||||
|
||||
def __init__(self, mount_type, root_helper,
|
||||
execute=putils.execute, *args, **kwargs):
|
||||
|
||||
self._mount_type = mount_type
|
||||
if mount_type == "nfs":
|
||||
self._mount_base = kwargs.get('nfs_mount_point_base', None)
|
||||
if not self._mount_base:
|
||||
raise exception.InvalidParameterValue(
|
||||
err=_('nfs_mount_point_base required'))
|
||||
self._mount_options = kwargs.get('nfs_mount_options', None)
|
||||
self._check_nfs_options()
|
||||
elif mount_type == "cifs":
|
||||
self._mount_base = kwargs.get('smbfs_mount_point_base', None)
|
||||
if not self._mount_base:
|
||||
raise exception.InvalidParameterValue(
|
||||
err=_('smbfs_mount_point_base required'))
|
||||
self._mount_options = kwargs.get('smbfs_mount_options', None)
|
||||
elif mount_type == "glusterfs":
|
||||
self._mount_base = kwargs.get('glusterfs_mount_point_base', None)
|
||||
if not self._mount_base:
|
||||
raise exception.InvalidParameterValue(
|
||||
err=_('glusterfs_mount_point_base required'))
|
||||
self._mount_options = None
|
||||
else:
|
||||
raise exception.ProtocolNotSupported(protocol=mount_type)
|
||||
self.root_helper = root_helper
|
||||
self.set_execute(execute)
|
||||
|
||||
def set_execute(self, execute):
|
||||
self._execute = execute
|
||||
|
||||
def _get_hash_str(self, base_str):
|
||||
"""Return a string that represents hash of base_str
|
||||
(in a hex format).
|
||||
"""
|
||||
return hashlib.md5(base_str).hexdigest()
|
||||
|
||||
def get_mount_point(self, device_name):
|
||||
"""Get Mount Point.
|
||||
|
||||
:param device_name: example 172.18.194.100:/var/nfs
|
||||
"""
|
||||
return os.path.join(self._mount_base,
|
||||
self._get_hash_str(device_name))
|
||||
|
||||
def _read_mounts(self):
|
||||
(out, _err) = self._execute('mount', check_exit_code=0)
|
||||
lines = out.split('\n')
|
||||
mounts = {}
|
||||
for line in lines:
|
||||
tokens = line.split()
|
||||
if 2 < len(tokens):
|
||||
device = tokens[0]
|
||||
mnt_point = tokens[2]
|
||||
mounts[mnt_point] = device
|
||||
return mounts
|
||||
|
||||
def mount(self, share, flags=None):
|
||||
"""Mount given share."""
|
||||
mount_path = self.get_mount_point(share)
|
||||
|
||||
if mount_path in self._read_mounts():
|
||||
LOG.info(_LI('Already mounted: %s') % mount_path)
|
||||
return
|
||||
|
||||
self._execute('mkdir', '-p', mount_path, check_exit_code=0)
|
||||
if self._mount_type == 'nfs':
|
||||
self._mount_nfs(share, mount_path, flags)
|
||||
else:
|
||||
self._do_mount(self._mount_type, share, mount_path,
|
||||
self._mount_options, flags)
|
||||
|
||||
def _do_mount(self, mount_type, share, mount_path, mount_options=None,
|
||||
flags=None):
|
||||
"""Mounts share based on the specified params."""
|
||||
mnt_cmd = ['mount', '-t', mount_type]
|
||||
if mount_options is not None:
|
||||
mnt_cmd.extend(['-o', mount_options])
|
||||
if flags is not None:
|
||||
mnt_cmd.extend(flags)
|
||||
mnt_cmd.extend([share, mount_path])
|
||||
|
||||
self._execute(*mnt_cmd, root_helper=self.root_helper,
|
||||
run_as_root=True, check_exit_code=0)
|
||||
|
||||
def _mount_nfs(self, nfs_share, mount_path, flags=None):
|
||||
"""Mount nfs share using present mount types."""
|
||||
mnt_errors = {}
|
||||
|
||||
# This loop allows us to first try to mount with NFS 4.1 for pNFS
|
||||
# support but falls back to mount NFS 4 or NFS 3 if either the client
|
||||
# or server do not support it.
|
||||
for mnt_type in sorted(self._nfs_mount_type_opts.keys(), reverse=True):
|
||||
options = self._nfs_mount_type_opts[mnt_type]
|
||||
try:
|
||||
self._do_mount('nfs', nfs_share, mount_path, options, flags)
|
||||
LOG.debug('Mounted %(sh)s using %(mnt_type)s.'
|
||||
% {'sh': nfs_share, 'mnt_type': mnt_type})
|
||||
return
|
||||
except Exception as e:
|
||||
mnt_errors[mnt_type] = six.text_type(e)
|
||||
LOG.debug('Failed to do %s mount.', mnt_type)
|
||||
raise exception.BrickException(_("NFS mount failed for share %(sh)s. "
|
||||
"Error - %(error)s")
|
||||
% {'sh': nfs_share,
|
||||
'error': mnt_errors})
|
||||
|
||||
def _check_nfs_options(self):
|
||||
"""Checks and prepares nfs mount type options."""
|
||||
self._nfs_mount_type_opts = {'nfs': self._mount_options}
|
||||
nfs_vers_opt_patterns = ['^nfsvers', '^vers', '^v[\d]']
|
||||
for opt in nfs_vers_opt_patterns:
|
||||
if self._option_exists(self._mount_options, opt):
|
||||
return
|
||||
|
||||
# pNFS requires NFS 4.1. The mount.nfs4 utility does not automatically
|
||||
# negotiate 4.1 support, we have to ask for it by specifying two
|
||||
# options: vers=4 and minorversion=1.
|
||||
pnfs_opts = self._update_option(self._mount_options, 'vers', '4')
|
||||
pnfs_opts = self._update_option(pnfs_opts, 'minorversion', '1')
|
||||
self._nfs_mount_type_opts['pnfs'] = pnfs_opts
|
||||
|
||||
def _option_exists(self, options, opt_pattern):
|
||||
"""Checks if the option exists in nfs options and returns position."""
|
||||
options = [x.strip() for x in options.split(',')] if options else []
|
||||
pos = 0
|
||||
for opt in options:
|
||||
pos = pos + 1
|
||||
if re.match(opt_pattern, opt, flags=0):
|
||||
return pos
|
||||
return 0
|
||||
|
||||
def _update_option(self, options, option, value=None):
|
||||
"""Update option if exists else adds it and returns new options."""
|
||||
opts = [x.strip() for x in options.split(',')] if options else []
|
||||
pos = self._option_exists(options, option)
|
||||
if pos:
|
||||
opts.pop(pos - 1)
|
||||
opt = '%s=%s' % (option, value) if value else option
|
||||
opts.append(opt)
|
||||
return ",".join(opts) if len(opts) > 1 else opts[0]
|
0
brick/tests/__init__.py
Normal file
0
brick/tests/__init__.py
Normal file
159
brick/tests/base.py
Normal file
159
brick/tests/base.py
Normal file
@ -0,0 +1,159 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
from oslo.utils import strutils
|
||||
from oslotest import base
|
||||
|
||||
from brick.openstack.common import log as oslo_logging
|
||||
|
||||
|
||||
LOG = oslo_logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestCase(base.BaseTestCase):
|
||||
|
||||
"""Test case base class for all unit tests."""
|
||||
|
||||
def setUp(self):
|
||||
"""Run before each test method to initialize test environment."""
|
||||
super(TestCase, self).setUp()
|
||||
|
||||
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||
try:
|
||||
test_timeout = int(test_timeout)
|
||||
except ValueError:
|
||||
# If timeout value is invalid do not set a timeout.
|
||||
test_timeout = 0
|
||||
if test_timeout > 0:
|
||||
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||
self.useFixture(fixtures.NestedTempfile())
|
||||
self.useFixture(fixtures.TempHomeDir())
|
||||
|
||||
environ_enabled = (lambda var_name:
|
||||
strutils.bool_from_string(os.environ.get(var_name)))
|
||||
if environ_enabled('OS_STDOUT_CAPTURE'):
|
||||
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||
if environ_enabled('OS_STDERR_CAPTURE'):
|
||||
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||
if environ_enabled('OS_LOG_CAPTURE'):
|
||||
log_format = '%(levelname)s [%(name)s] %(message)s'
|
||||
if environ_enabled('OS_DEBUG'):
|
||||
level = logging.DEBUG
|
||||
else:
|
||||
level = logging.INFO
|
||||
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
|
||||
format=log_format,
|
||||
level=level))
|
||||
|
||||
def _common_cleanup(self):
|
||||
"""Runs after each test method to tear down test environment."""
|
||||
|
||||
# Stop any timers
|
||||
for x in self.injected:
|
||||
try:
|
||||
x.stop()
|
||||
except AssertionError:
|
||||
pass
|
||||
|
||||
# Delete attributes that don't start with _ so they don't pin
|
||||
# memory around unnecessarily for the duration of the test
|
||||
# suite
|
||||
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
|
||||
del self.__dict__[key]
|
||||
|
||||
def log_level(self, level):
|
||||
"""Set logging level to the specified value."""
|
||||
log_root = logging.getLogger(None).logger
|
||||
log_root.setLevel(level)
|
||||
|
||||
def mock_object(self, obj, attr_name, new_attr=None, **kwargs):
|
||||
"""Use python mock to mock an object attribute
|
||||
|
||||
Mocks the specified objects attribute with the given value.
|
||||
Automatically performs 'addCleanup' for the mock.
|
||||
|
||||
"""
|
||||
if not new_attr:
|
||||
new_attr = mock.Mock()
|
||||
patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
# Useful assertions
|
||||
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
|
||||
"""Assert two dicts are equivalent.
|
||||
|
||||
This is a 'deep' match in the sense that it handles nested
|
||||
dictionaries appropriately.
|
||||
|
||||
NOTE:
|
||||
|
||||
If you don't care (or don't know) a given value, you can specify
|
||||
the string DONTCARE as the value. This will cause that dict-item
|
||||
to be skipped.
|
||||
|
||||
"""
|
||||
def raise_assertion(msg):
|
||||
d1str = d1
|
||||
d2str = d2
|
||||
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
|
||||
'd2: %(d2str)s' %
|
||||
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
|
||||
raise AssertionError(base_msg)
|
||||
|
||||
d1keys = set(d1.keys())
|
||||
d2keys = set(d2.keys())
|
||||
if d1keys != d2keys:
|
||||
d1only = d1keys - d2keys
|
||||
d2only = d2keys - d1keys
|
||||
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
|
||||
'Keys in d2 and not d1: %(d2only)s' %
|
||||
{'d1only': d1only, 'd2only': d2only})
|
||||
|
||||
for key in d1keys:
|
||||
d1value = d1[key]
|
||||
d2value = d2[key]
|
||||
try:
|
||||
error = abs(float(d1value) - float(d2value))
|
||||
within_tolerance = error <= tolerance
|
||||
except (ValueError, TypeError):
|
||||
# If both values aren't convertible to float, just ignore
|
||||
# ValueError if arg is a str, TypeError if it's something else
|
||||
# (like None)
|
||||
within_tolerance = False
|
||||
|
||||
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
|
||||
self.assertDictMatch(d1value, d2value)
|
||||
elif 'DONTCARE' in (d1value, d2value):
|
||||
continue
|
||||
elif approx_equal and within_tolerance:
|
||||
continue
|
||||
elif d1value != d2value:
|
||||
raise_assertion("d1['%(key)s']=%(d1value)s != "
|
||||
"d2['%(key)s']=%(d2value)s" %
|
||||
{
|
||||
'key': key,
|
||||
'd1value': d1value,
|
||||
'd2value': d2value,
|
||||
})
|
0
brick/tests/initiator/__init__.py
Normal file
0
brick/tests/initiator/__init__.py
Normal file
645
brick/tests/initiator/test_connector.py
Normal file
645
brick/tests/initiator/test_connector.py
Normal file
@ -0,0 +1,645 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os.path
|
||||
import string
|
||||
import time
|
||||
|
||||
import mock
|
||||
from oslo_concurrency import processutils as putils
|
||||
import testtools
|
||||
|
||||
from brick import exception
|
||||
from brick.i18n import _
|
||||
from brick.initiator import connector
|
||||
from brick.initiator import host_driver
|
||||
from brick.openstack.common import log as logging
|
||||
from brick.openstack.common import loopingcall
|
||||
from brick.tests import base
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConnectorTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ConnectorTestCase, self).setUp()
|
||||
self.cmds = []
|
||||
|
||||
def fake_execute(self, *cmd, **kwargs):
|
||||
self.cmds.append(string.join(cmd))
|
||||
return "", None
|
||||
|
||||
def test_connect_volume(self):
|
||||
self.connector = connector.InitiatorConnector(None)
|
||||
self.assertRaises(NotImplementedError,
|
||||
self.connector.connect_volume, None)
|
||||
|
||||
def test_disconnect_volume(self):
|
||||
self.connector = connector.InitiatorConnector(None)
|
||||
self.assertRaises(NotImplementedError,
|
||||
self.connector.disconnect_volume, None, None)
|
||||
|
||||
def test_factory(self):
|
||||
obj = connector.InitiatorConnector.factory('iscsi', None)
|
||||
self.assertEqual(obj.__class__.__name__, "ISCSIConnector")
|
||||
|
||||
obj = connector.InitiatorConnector.factory('fibre_channel', None)
|
||||
self.assertEqual(obj.__class__.__name__, "FibreChannelConnector")
|
||||
|
||||
obj = connector.InitiatorConnector.factory('aoe', None)
|
||||
self.assertEqual(obj.__class__.__name__, "AoEConnector")
|
||||
|
||||
obj = connector.InitiatorConnector.factory(
|
||||
'nfs', None, nfs_mount_point_base='/mnt/test')
|
||||
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
|
||||
|
||||
obj = connector.InitiatorConnector.factory(
|
||||
'glusterfs', None, glusterfs_mount_point_base='/mnt/test')
|
||||
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
|
||||
|
||||
obj = connector.InitiatorConnector.factory('local', None)
|
||||
self.assertEqual(obj.__class__.__name__, "LocalConnector")
|
||||
|
||||
self.assertRaises(ValueError,
|
||||
connector.InitiatorConnector.factory,
|
||||
"bogus", None)
|
||||
|
||||
def test_check_valid_device_with_wrong_path(self):
|
||||
self.connector = connector.InitiatorConnector(None)
|
||||
self.connector._execute = \
|
||||
lambda *args, **kwargs: ("", None)
|
||||
self.assertFalse(self.connector.check_valid_device('/d0v'))
|
||||
|
||||
def test_check_valid_device(self):
|
||||
self.connector = connector.InitiatorConnector(None)
|
||||
self.connector._execute = \
|
||||
lambda *args, **kwargs: ("", "")
|
||||
self.assertTrue(self.connector.check_valid_device('/dev'))
|
||||
|
||||
def test_check_valid_device_with_cmd_error(self):
|
||||
def raise_except(*args, **kwargs):
|
||||
raise putils.ProcessExecutionError
|
||||
self.connector = connector.InitiatorConnector(None)
|
||||
self.connector._execute = mock.Mock()
|
||||
self.connector._execute.side_effect = raise_except
|
||||
self.assertFalse(self.connector.check_valid_device('/dev'))
|
||||
|
||||
|
||||
class HostDriverTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(HostDriverTestCase, self).setUp()
|
||||
isdir_mock = mock.Mock()
|
||||
isdir_mock.return_value = True
|
||||
os.path.isdir = isdir_mock
|
||||
self.devlist = ['device1', 'device2']
|
||||
listdir_mock = mock.Mock()
|
||||
listdir_mock.return_value = self.devlist
|
||||
os.listdir = listdir_mock
|
||||
|
||||
def test_host_driver(self):
|
||||
expected = ['/dev/disk/by-path/' + dev for dev in self.devlist]
|
||||
driver = host_driver.HostDriver()
|
||||
actual = driver.get_all_block_devices()
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
|
||||
class ISCSIConnectorTestCase(ConnectorTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ISCSIConnectorTestCase, self).setUp()
|
||||
self.connector = connector.ISCSIConnector(
|
||||
None, execute=self.fake_execute, use_multipath=False)
|
||||
|
||||
get_name_mock = mock.Mock()
|
||||
get_name_mock.return_value = "/dev/sdb"
|
||||
self.connector._linuxscsi.get_name_from_path = get_name_mock
|
||||
|
||||
def iscsi_connection(self, volume, location, iqn):
|
||||
return {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': {
|
||||
'volume_id': volume['id'],
|
||||
'target_portal': location,
|
||||
'target_iqn': iqn,
|
||||
'target_lun': 1,
|
||||
}
|
||||
}
|
||||
|
||||
def test_get_initiator(self):
|
||||
def initiator_no_file(*args, **kwargs):
|
||||
raise putils.ProcessExecutionError('No file')
|
||||
|
||||
def initiator_get_text(*arg, **kwargs):
|
||||
text = ('## DO NOT EDIT OR REMOVE THIS FILE!\n'
|
||||
'## If you remove this file, the iSCSI daemon '
|
||||
'will not start.\n'
|
||||
'## If you change the InitiatorName, existing '
|
||||
'access control lists\n'
|
||||
'## may reject this initiator. The InitiatorName must '
|
||||
'be unique\n'
|
||||
'## for each iSCSI initiator. Do NOT duplicate iSCSI '
|
||||
'InitiatorNames.\n'
|
||||
'InitiatorName=iqn.1234-56.foo.bar:01:23456789abc')
|
||||
return text, None
|
||||
|
||||
self.connector._execute = initiator_no_file
|
||||
initiator = self.connector.get_initiator()
|
||||
self.assertIsNone(initiator)
|
||||
self.connector._execute = initiator_get_text
|
||||
initiator = self.connector.get_initiator()
|
||||
self.assertEqual(initiator, 'iqn.1234-56.foo.bar:01:23456789abc')
|
||||
|
||||
@testtools.skipUnless(os.path.exists('/dev/disk/by-path'),
|
||||
'Test requires /dev/disk/by-path')
|
||||
def test_connect_volume(self):
|
||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||
location = '10.0.2.15:3260'
|
||||
name = 'volume-00000001'
|
||||
iqn = 'iqn.2010-10.org.openstack:%s' % name
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_info = self.iscsi_connection(vol, location, iqn)
|
||||
device = self.connector.connect_volume(connection_info['data'])
|
||||
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
|
||||
self.assertEqual(device['type'], 'block')
|
||||
self.assertEqual(device['path'], dev_str)
|
||||
|
||||
self.connector.disconnect_volume(connection_info['data'], device)
|
||||
expected_commands = [('iscsiadm -m node -T %s -p %s' %
|
||||
(iqn, location)),
|
||||
('iscsiadm -m session'),
|
||||
('iscsiadm -m node -T %s -p %s --login' %
|
||||
(iqn, location)),
|
||||
('iscsiadm -m node -T %s -p %s --op update'
|
||||
' -n node.startup -v automatic'
|
||||
% (iqn, location)),
|
||||
('iscsiadm -m node --rescan'),
|
||||
('iscsiadm -m session --rescan'),
|
||||
('blockdev --flushbufs /dev/sdb'),
|
||||
('tee -a /sys/block/sdb/device/delete'),
|
||||
('iscsiadm -m node -T %s -p %s --op update'
|
||||
' -n node.startup -v manual' % (iqn, location)),
|
||||
('iscsiadm -m node -T %s -p %s --logout' %
|
||||
(iqn, location)),
|
||||
('iscsiadm -m node -T %s -p %s --op delete' %
|
||||
(iqn, location)), ]
|
||||
LOG.debug("self.cmds = %s" % self.cmds)
|
||||
LOG.debug("expected = %s" % expected_commands)
|
||||
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_connect_volume_with_multipath(self):
|
||||
location = '10.0.2.15:3260'
|
||||
name = 'volume-00000001'
|
||||
iqn = 'iqn.2010-10.org.openstack:%s' % name
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_properties = self.iscsi_connection(vol, location, iqn)
|
||||
|
||||
self.connector_with_multipath = \
|
||||
connector.ISCSIConnector(None, use_multipath=True)
|
||||
self.connector_with_multipath._run_iscsiadm_bare = \
|
||||
lambda *args, **kwargs: "%s %s" % (location, iqn)
|
||||
portals_mock = mock.Mock()
|
||||
portals_mock.return_value = [[location, iqn]]
|
||||
self.connector_with_multipath.\
|
||||
_get_target_portals_from_iscsiadm_output = portals_mock
|
||||
connect_to_mock = mock.Mock()
|
||||
connect_to_mock.return_value = None
|
||||
self.connector_with_multipath._connect_to_iscsi_portal = \
|
||||
connect_to_mock
|
||||
rescan_iscsi_mock = mock.Mock()
|
||||
rescan_iscsi_mock.return_value = None
|
||||
self.connector_with_multipath._rescan_iscsi = rescan_iscsi_mock
|
||||
rescan_multipath_mock = mock.Mock()
|
||||
rescan_multipath_mock.return_value = None
|
||||
self.connector_with_multipath._rescan_multipath = \
|
||||
rescan_multipath_mock
|
||||
get_device_mock = mock.Mock()
|
||||
get_device_mock.return_value = 'iqn.2010-10.org.openstack:%s' % name
|
||||
self.connector_with_multipath._get_multipath_device_name = \
|
||||
get_device_mock
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = True
|
||||
os.path.exists = exists_mock
|
||||
result = self.connector_with_multipath.connect_volume(
|
||||
connection_properties['data'])
|
||||
expected_result = {'path': 'iqn.2010-10.org.openstack:volume-00000001',
|
||||
'type': 'block'}
|
||||
self.assertEqual(result, expected_result)
|
||||
|
||||
def test_connect_volume_with_not_found_device(self):
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = False
|
||||
os.path.exists = exists_mock
|
||||
sleep_mock = mock.Mock()
|
||||
sleep_mock.return_value = None
|
||||
time.sleep = sleep_mock
|
||||
location = '10.0.2.15:3260'
|
||||
name = 'volume-00000001'
|
||||
iqn = 'iqn.2010-10.org.openstack:%s' % name
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_info = self.iscsi_connection(vol, location, iqn)
|
||||
self.assertRaises(exception.VolumeDeviceNotFound,
|
||||
self.connector.connect_volume,
|
||||
connection_info['data'])
|
||||
|
||||
def test_get_target_portals_from_iscsiadm_output(self):
|
||||
connector = self.connector
|
||||
test_output = '''10.15.84.19:3260 iqn.1992-08.com.netapp:sn.33615311
|
||||
10.15.85.19:3260 iqn.1992-08.com.netapp:sn.33615311'''
|
||||
res = connector._get_target_portals_from_iscsiadm_output(test_output)
|
||||
ip_iqn1 = ['10.15.84.19:3260', 'iqn.1992-08.com.netapp:sn.33615311']
|
||||
ip_iqn2 = ['10.15.85.19:3260', 'iqn.1992-08.com.netapp:sn.33615311']
|
||||
expected = [ip_iqn1, ip_iqn2]
|
||||
self.assertEqual(expected, res)
|
||||
|
||||
def test_get_multipath_device_name(self):
|
||||
realpath = mock.Mock()
|
||||
realpath.return_value = None
|
||||
os.path.realpath = realpath
|
||||
multipath_return_string = [('mpath2 (20017380006c00036)'
|
||||
'dm-7 IBM,2810XIV')]
|
||||
self.connector._run_multipath = \
|
||||
lambda *args, **kwargs: multipath_return_string
|
||||
expected = '/dev/mapper/mpath2'
|
||||
self.assertEqual(expected,
|
||||
self.connector.
|
||||
_get_multipath_device_name('/dev/md-1'))
|
||||
|
||||
def test_get_iscsi_devices(self):
|
||||
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
|
||||
'com.netapp:node.netapp02-lun-0')]
|
||||
walk_mock = lambda x: [(['.'], ['by-path'], paths)]
|
||||
os.walk = walk_mock
|
||||
self.assertEqual(self.connector._get_iscsi_devices(), paths)
|
||||
|
||||
def test_get_iscsi_devices_with_empty_dir(self):
|
||||
walk_mock = mock.Mock()
|
||||
walk_mock.return_value = []
|
||||
os.walk = walk_mock
|
||||
self.assertEqual(self.connector._get_iscsi_devices(), [])
|
||||
|
||||
def test_get_multipath_iqn(self):
|
||||
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
|
||||
'com.netapp:node.netapp02-lun-0')]
|
||||
realpath = lambda x: '/dev/disk/by-path/%s' % paths[0]
|
||||
os.path.realpath = realpath
|
||||
|
||||
get_iscsi_mock = mock.Mock()
|
||||
get_iscsi_mock.return_value = paths
|
||||
self.connector._get_iscsi_devices = get_iscsi_mock
|
||||
|
||||
get_multipath_device_mock = mock.Mock()
|
||||
get_multipath_device_mock.return_value = paths[0]
|
||||
self.connector._get_multipath_device_name = get_multipath_device_mock
|
||||
self.assertEqual(self.connector._get_multipath_iqn(paths[0]),
|
||||
'iqn.2013-01.ro.com.netapp:node.netapp02')
|
||||
|
||||
def test_disconnect_volume_multipath_iscsi(self):
|
||||
result = []
|
||||
|
||||
def fake_disconnect_from_iscsi_portal(properties):
|
||||
result.append(properties)
|
||||
|
||||
iqn1 = 'iqn.2013-01.ro.com.netapp:node.netapp01'
|
||||
iqn2 = 'iqn.2013-01.ro.com.netapp:node.netapp02'
|
||||
iqns = [iqn1, iqn2]
|
||||
portal = '10.0.0.1:3260'
|
||||
dev = ('ip-%s-iscsi-%s-lun-0' % (portal, iqn1))
|
||||
|
||||
get_portals_mock = mock.Mock()
|
||||
get_portals_mock.return_value = [[portal, iqn1]]
|
||||
rescan_iscsi_mock = mock.Mock()
|
||||
rescan_iscsi_mock.return_value = None
|
||||
|
||||
rescan_multipath = mock.Mock()
|
||||
rescan_multipath.return_value = None
|
||||
|
||||
get_block_devices_mock = mock.Mock()
|
||||
get_block_devices_mock.return_value = [dev, '/dev/mapper/md-1']
|
||||
|
||||
get_multipath_name_mock = mock.Mock()
|
||||
get_multipath_name_mock.return_value = '/dev/mapper/md-3'
|
||||
|
||||
self.connector._get_multipath_iqn = lambda x: iqns.pop()
|
||||
|
||||
disconnect_mock = fake_disconnect_from_iscsi_portal
|
||||
self.connector._disconnect_from_iscsi_portal = disconnect_mock
|
||||
fake_property = {'target_portal': portal,
|
||||
'target_iqn': iqn1}
|
||||
self.connector._disconnect_volume_multipath_iscsi(fake_property,
|
||||
'fake/multipath')
|
||||
# Target in use by other mp devices, don't disconnect
|
||||
self.assertEqual([], result)
|
||||
|
||||
def test_disconnect_volume_multipath_iscsi_without_other_mp_devices(self):
|
||||
result = []
|
||||
|
||||
def fake_disconnect_from_iscsi_portal(properties):
|
||||
result.append(properties)
|
||||
portal = '10.0.2.15:3260'
|
||||
name = 'volume-00000001'
|
||||
iqn = 'iqn.2010-10.org.openstack:%s' % name
|
||||
|
||||
get_portals_mock = mock.Mock()
|
||||
get_portals_mock.return_value = [[portal, iqn]]
|
||||
self.connector._get_target_portals_from_iscsiadm_output = \
|
||||
get_portals_mock
|
||||
|
||||
rescan_iscsi_mock = mock.Mock()
|
||||
rescan_iscsi_mock.return_value = None
|
||||
self.connector._rescan_iscsi = rescan_iscsi_mock
|
||||
|
||||
rescan_multipath_mock = mock.Mock()
|
||||
rescan_multipath_mock.return_value = None
|
||||
self.connector._rescan_multipath = rescan_multipath_mock
|
||||
|
||||
get_all_devices_mock = mock.Mock()
|
||||
get_all_devices_mock.return_value = []
|
||||
self.connector.driver.get_all_block_devices = get_all_devices_mock
|
||||
|
||||
self.connector._disconnect_from_iscsi_portal = \
|
||||
fake_disconnect_from_iscsi_portal
|
||||
fake_property = {'target_portal': portal,
|
||||
'target_iqn': iqn}
|
||||
self.connector._disconnect_volume_multipath_iscsi(fake_property,
|
||||
'fake/multipath')
|
||||
# Target not in use by other mp devices, disconnect
|
||||
self.assertEqual([fake_property], result)
|
||||
|
||||
|
||||
class FibreChannelConnectorTestCase(ConnectorTestCase):
|
||||
def setUp(self):
|
||||
super(FibreChannelConnectorTestCase, self).setUp()
|
||||
self.connector = connector.FibreChannelConnector(
|
||||
None, execute=self.fake_execute, use_multipath=False)
|
||||
self.assertIsNotNone(self.connector)
|
||||
self.assertIsNotNone(self.connector._linuxfc)
|
||||
self.assertIsNotNone(self.connector._linuxscsi)
|
||||
|
||||
def fake_get_fc_hbas(self):
|
||||
return [{'ClassDevice': 'host1',
|
||||
'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
|
||||
'/0000:05:00.2/host1/fc_host/host1',
|
||||
'dev_loss_tmo': '30',
|
||||
'fabric_name': '0x1000000533f55566',
|
||||
'issue_lip': '<store method only>',
|
||||
'max_npiv_vports': '255',
|
||||
'maxframe_size': '2048 bytes',
|
||||
'node_name': '0x200010604b019419',
|
||||
'npiv_vports_inuse': '0',
|
||||
'port_id': '0x680409',
|
||||
'port_name': '0x100010604b019419',
|
||||
'port_state': 'Online',
|
||||
'port_type': 'NPort (fabric via point-to-point)',
|
||||
'speed': '10 Gbit',
|
||||
'supported_classes': 'Class 3',
|
||||
'supported_speeds': '10 Gbit',
|
||||
'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
|
||||
'tgtid_bind_type': 'wwpn (World Wide Port Name)',
|
||||
'uevent': None,
|
||||
'vport_create': '<store method only>',
|
||||
'vport_delete': '<store method only>'}]
|
||||
|
||||
def fake_get_fc_hbas_info(self):
|
||||
hbas = self.fake_get_fc_hbas()
|
||||
info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
|
||||
'node_name': hbas[0]['node_name'].replace('0x', ''),
|
||||
'host_device': hbas[0]['ClassDevice'],
|
||||
'device_path': hbas[0]['ClassDevicePath']}]
|
||||
return info
|
||||
|
||||
def fibrechan_connection(self, volume, location, wwn):
|
||||
return {'driver_volume_type': 'fibrechan',
|
||||
'data': {
|
||||
'volume_id': volume['id'],
|
||||
'target_portal': location,
|
||||
'target_wwn': wwn,
|
||||
'target_lun': 1,
|
||||
}}
|
||||
|
||||
def test_connect_volume(self):
|
||||
self.connector._linuxfc.get_fc_hbas = self.fake_get_fc_hbas
|
||||
self.connector._linuxfc.get_fc_hbas_info = \
|
||||
self.fake_get_fc_hbas_info
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = True
|
||||
os.path.exists = exists_mock
|
||||
realpath_mock = mock.Mock()
|
||||
realpath_mock.return_value = '/dev/sdb'
|
||||
os.path.realpath = realpath_mock
|
||||
|
||||
multipath_devname = '/dev/md-1'
|
||||
devices = {"device": multipath_devname,
|
||||
"id": "1234567890",
|
||||
"devices": [{'device': '/dev/sdb',
|
||||
'address': '1:0:0:1',
|
||||
'host': 1, 'channel': 0,
|
||||
'id': 0, 'lun': 1}]}
|
||||
find_device_mock = mock.Mock()
|
||||
find_device_mock.return_value = devices
|
||||
self.connector._linuxscsi.find_multipath_device = find_device_mock
|
||||
remove_device_mock = mock.Mock()
|
||||
remove_device_mock.return_value = None
|
||||
self.connector._linuxscsi.remove_scsi_device = remove_device_mock
|
||||
get_device_info_mock = mock.Mock()
|
||||
get_device_info_mock.return_value = devices['devices'][0]
|
||||
self.connector._linuxscsi.get_device_info = get_device_info_mock
|
||||
location = '10.0.2.15:3260'
|
||||
name = 'volume-00000001'
|
||||
vol = {'id': 1, 'name': name}
|
||||
# Should work for string, unicode, and list
|
||||
wwns = ['1234567890123456', unicode('1234567890123456'),
|
||||
['1234567890123456', '1234567890123457']]
|
||||
for wwn in wwns:
|
||||
connection_info = self.fibrechan_connection(vol, location, wwn)
|
||||
dev_info = self.connector.connect_volume(connection_info['data'])
|
||||
exp_wwn = wwn[0] if isinstance(wwn, list) else wwn
|
||||
dev_str = ('/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' %
|
||||
exp_wwn)
|
||||
self.assertEqual(dev_info['type'], 'block')
|
||||
self.assertEqual(dev_info['path'], dev_str)
|
||||
|
||||
self.connector.disconnect_volume(connection_info['data'], dev_info)
|
||||
expected_commands = []
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
# Should not work for anything other than string, unicode, and list
|
||||
connection_info = self.fibrechan_connection(vol, location, 123)
|
||||
self.assertRaises(exception.NoFibreChannelHostsFound,
|
||||
self.connector.connect_volume,
|
||||
connection_info['data'])
|
||||
|
||||
get_fc_hbas_mock = mock.Mock()
|
||||
get_fc_hbas_mock.return_value = []
|
||||
self.connector._linuxfc.get_fc_hbas = get_fc_hbas_mock
|
||||
|
||||
get_fc_hbas_info_mock = mock.Mock()
|
||||
get_fc_hbas_info_mock.return_value = []
|
||||
self.connector._linuxfc.get_fc_hbas_info = get_fc_hbas_info_mock
|
||||
self.assertRaises(exception.NoFibreChannelHostsFound,
|
||||
self.connector.connect_volume,
|
||||
connection_info['data'])
|
||||
|
||||
|
||||
class FakeFixedIntervalLoopingCall(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._stop = False
|
||||
|
||||
def stop(self):
|
||||
self._stop = True
|
||||
|
||||
def wait(self):
|
||||
return self
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
while not self._stop:
|
||||
try:
|
||||
self.f(*self.args, **self.kw)
|
||||
except loopingcall.LoopingCallDone:
|
||||
return self
|
||||
except Exception:
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
raise
|
||||
|
||||
|
||||
class AoEConnectorTestCase(ConnectorTestCase):
|
||||
"""Test cases for AoE initiator class."""
|
||||
def setUp(self):
|
||||
super(AoEConnectorTestCase, self).setUp()
|
||||
self.connector = connector.AoEConnector('sudo')
|
||||
self.connection_properties = {'target_shelf': 'fake_shelf',
|
||||
'target_lun': 'fake_lun'}
|
||||
loopingcall.FixedIntervalLoopingCall = FakeFixedIntervalLoopingCall
|
||||
|
||||
def _mock_path_exists(self, aoe_path, mock_values=None):
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = mock_values
|
||||
os.path.exists = exists_mock
|
||||
|
||||
def test_connect_volume(self):
|
||||
"""Ensure that if path exist aoe-revaliadte was called."""
|
||||
aoe_device, aoe_path = self.connector._get_aoe_info(
|
||||
self.connection_properties)
|
||||
|
||||
self._mock_path_exists(aoe_path, [True, True])
|
||||
|
||||
exec_mock = mock.Mock()
|
||||
exec_mock.return_value = ["", ""]
|
||||
self.connector._execute = exec_mock
|
||||
|
||||
self.connector.connect_volume(self.connection_properties)
|
||||
|
||||
def test_connect_volume_without_path(self):
|
||||
"""Ensure that if path doesn't exist aoe-discovery was called."""
|
||||
|
||||
aoe_device, aoe_path = self.connector._get_aoe_info(
|
||||
self.connection_properties)
|
||||
expected_info = {
|
||||
'type': 'block',
|
||||
'device': aoe_device,
|
||||
'path': aoe_path,
|
||||
}
|
||||
|
||||
self._mock_path_exists(aoe_path, [False, True])
|
||||
|
||||
exec_mock = mock.Mock()
|
||||
exec_mock.return_value = ["", ""]
|
||||
self.connector._execute = exec_mock
|
||||
|
||||
volume_info = self.connector.connect_volume(
|
||||
self.connection_properties)
|
||||
|
||||
self.assertDictMatch(volume_info, expected_info)
|
||||
|
||||
def test_connect_volume_could_not_discover_path(self):
|
||||
aoe_device, aoe_path = self.connector._get_aoe_info(
|
||||
self.connection_properties)
|
||||
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = False
|
||||
os.path.exists = exists_mock
|
||||
exec_mock = mock.Mock()
|
||||
exec_mock.return_value = ["", ""]
|
||||
self.connector._execute = exec_mock
|
||||
self.assertRaises(exception.VolumeDeviceNotFound,
|
||||
self.connector.connect_volume,
|
||||
self.connection_properties)
|
||||
|
||||
def test_disconnect_volume(self):
|
||||
"""Ensure that if path exist aoe-revaliadte was called."""
|
||||
aoe_device, aoe_path = self.connector._get_aoe_info(
|
||||
self.connection_properties)
|
||||
|
||||
self._mock_path_exists(aoe_path, [True])
|
||||
|
||||
exec_mock = mock.Mock()
|
||||
exec_mock.return_value = ["", ""]
|
||||
self.connector._execute = exec_mock
|
||||
|
||||
self.connector.disconnect_volume(self.connection_properties, {})
|
||||
|
||||
|
||||
class RemoteFsConnectorTestCase(ConnectorTestCase):
|
||||
"""Test cases for Remote FS initiator class."""
|
||||
TEST_DEV = '172.18.194.100:/var/nfs'
|
||||
TEST_PATH = '/mnt/test/df0808229363aad55c27da50c38d6328'
|
||||
|
||||
def setUp(self):
|
||||
super(RemoteFsConnectorTestCase, self).setUp()
|
||||
self.connection_properties = {
|
||||
'export': self.TEST_DEV,
|
||||
'name': '9c592d52-ce47-4263-8c21-4ecf3c029cdb'}
|
||||
self.connector = connector.RemoteFsConnector(
|
||||
'nfs', root_helper='sudo', nfs_mount_point_base='/mnt/test',
|
||||
nfs_mount_options='vers=3')
|
||||
|
||||
def test_connect_volume(self):
|
||||
"""Test the basic connect volume case."""
|
||||
client = self.connector._remotefsclient
|
||||
client.mount = mock.Mock()
|
||||
client.get_mount_point = mock.Mock()
|
||||
client.get_mount_point.return_value = "ass"
|
||||
|
||||
self.connector.connect_volume(self.connection_properties)
|
||||
|
||||
def test_disconnect_volume(self):
|
||||
"""Nothing should happen here -- make sure it doesn't blow up."""
|
||||
self.connector.disconnect_volume(self.connection_properties, {})
|
||||
|
||||
|
||||
class LocalConnectorTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LocalConnectorTestCase, self).setUp()
|
||||
self.connection_properties = {'name': 'foo',
|
||||
'device_path': '/tmp/bar'}
|
||||
|
||||
def test_connect_volume(self):
|
||||
self.connector = connector.LocalConnector(None)
|
||||
cprops = self.connection_properties
|
||||
dev_info = self.connector.connect_volume(cprops)
|
||||
self.assertEqual(dev_info['type'], 'local')
|
||||
self.assertEqual(dev_info['path'], cprops['device_path'])
|
||||
|
||||
def test_connect_volume_with_invalid_connection_data(self):
|
||||
self.connector = connector.LocalConnector(None)
|
||||
cprops = {}
|
||||
self.assertRaises(ValueError,
|
||||
self.connector.connect_volume, cprops)
|
176
brick/tests/initiator/test_linuxfc.py
Normal file
176
brick/tests/initiator/test_linuxfc.py
Normal file
@ -0,0 +1,176 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os.path
|
||||
import string
|
||||
|
||||
import mock
|
||||
|
||||
from brick.initiator import linuxfc
|
||||
from brick.openstack.common import log as logging
|
||||
from brick.tests import base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LinuxFCTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LinuxFCTestCase, self).setUp()
|
||||
self.cmds = []
|
||||
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = True
|
||||
os.path.exists = exists_mock
|
||||
self.lfc = linuxfc.LinuxFibreChannel(None, execute=self.fake_execute)
|
||||
|
||||
def fake_execute(self, *cmd, **kwargs):
|
||||
self.cmds.append(string.join(cmd))
|
||||
return "", None
|
||||
|
||||
def test_rescan_hosts(self):
|
||||
hbas = [{'host_device': 'foo'},
|
||||
{'host_device': 'bar'}, ]
|
||||
self.lfc.rescan_hosts(hbas)
|
||||
expected_commands = ['tee -a /sys/class/scsi_host/foo/scan',
|
||||
'tee -a /sys/class/scsi_host/bar/scan']
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_get_fc_hbas_fail(self):
|
||||
def fake_exec1(a, b, c, d, run_as_root=True, root_helper='sudo'):
|
||||
raise OSError
|
||||
|
||||
def fake_exec2(a, b, c, d, run_as_root=True, root_helper='sudo'):
|
||||
return None, 'None found'
|
||||
|
||||
self.lfc._execute = fake_exec1
|
||||
hbas = self.lfc.get_fc_hbas()
|
||||
self.assertEqual(0, len(hbas))
|
||||
self.lfc._execute = fake_exec2
|
||||
hbas = self.lfc.get_fc_hbas()
|
||||
self.assertEqual(0, len(hbas))
|
||||
|
||||
def test_get_fc_hbas(self):
|
||||
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
|
||||
return SYSTOOL_FC, None
|
||||
self.lfc._execute = fake_exec
|
||||
hbas = self.lfc.get_fc_hbas()
|
||||
self.assertEqual(2, len(hbas))
|
||||
hba1 = hbas[0]
|
||||
self.assertEqual(hba1["ClassDevice"], "host0")
|
||||
hba2 = hbas[1]
|
||||
self.assertEqual(hba2["ClassDevice"], "host2")
|
||||
|
||||
def test_get_fc_hbas_info(self):
|
||||
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
|
||||
return SYSTOOL_FC, None
|
||||
self.lfc._execute = fake_exec
|
||||
hbas_info = self.lfc.get_fc_hbas_info()
|
||||
expected_info = [{'device_path': '/sys/devices/pci0000:20/'
|
||||
'0000:20:03.0/0000:21:00.0/'
|
||||
'host0/fc_host/host0',
|
||||
'host_device': 'host0',
|
||||
'node_name': '50014380242b9751',
|
||||
'port_name': '50014380242b9750'},
|
||||
{'device_path': '/sys/devices/pci0000:20/'
|
||||
'0000:20:03.0/0000:21:00.1/'
|
||||
'host2/fc_host/host2',
|
||||
'host_device': 'host2',
|
||||
'node_name': '50014380242b9753',
|
||||
'port_name': '50014380242b9752'}, ]
|
||||
self.assertEqual(expected_info, hbas_info)
|
||||
|
||||
def test_get_fc_wwpns(self):
|
||||
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
|
||||
return SYSTOOL_FC, None
|
||||
|
||||
self.lfc._execute = fake_exec
|
||||
wwpns = self.lfc.get_fc_wwpns()
|
||||
expected_wwpns = ['50014380242b9750', '50014380242b9752']
|
||||
self.assertEqual(expected_wwpns, wwpns)
|
||||
|
||||
def test_get_fc_wwnns(self):
|
||||
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
|
||||
return SYSTOOL_FC, None
|
||||
self.lfc._execute = fake_exec
|
||||
wwnns = self.lfc.get_fc_wwpns()
|
||||
expected_wwnns = ['50014380242b9750', '50014380242b9752']
|
||||
self.assertEqual(expected_wwnns, wwnns)
|
||||
|
||||
SYSTOOL_FC = """
|
||||
Class = "fc_host"
|
||||
|
||||
Class Device = "host0"
|
||||
Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\
|
||||
0000:21:00.0/host0/fc_host/host0"
|
||||
dev_loss_tmo = "16"
|
||||
fabric_name = "0x100000051ea338b9"
|
||||
issue_lip = <store method only>
|
||||
max_npiv_vports = "0"
|
||||
node_name = "0x50014380242b9751"
|
||||
npiv_vports_inuse = "0"
|
||||
port_id = "0x960d0d"
|
||||
port_name = "0x50014380242b9750"
|
||||
port_state = "Online"
|
||||
port_type = "NPort (fabric via point-to-point)"
|
||||
speed = "8 Gbit"
|
||||
supported_classes = "Class 3"
|
||||
supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit"
|
||||
symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k"
|
||||
system_hostname = ""
|
||||
tgtid_bind_type = "wwpn (World Wide Port Name)"
|
||||
uevent =
|
||||
vport_create = <store method only>
|
||||
vport_delete = <store method only>
|
||||
|
||||
Device = "host0"
|
||||
Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.0/host0"
|
||||
edc = <store method only>
|
||||
optrom_ctl = <store method only>
|
||||
reset = <store method only>
|
||||
uevent = "DEVTYPE=scsi_host"
|
||||
|
||||
|
||||
Class Device = "host2"
|
||||
Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\
|
||||
0000:21:00.1/host2/fc_host/host2"
|
||||
dev_loss_tmo = "16"
|
||||
fabric_name = "0x100000051ea33b79"
|
||||
issue_lip = <store method only>
|
||||
max_npiv_vports = "0"
|
||||
node_name = "0x50014380242b9753"
|
||||
npiv_vports_inuse = "0"
|
||||
port_id = "0x970e09"
|
||||
port_name = "0x50014380242b9752"
|
||||
port_state = "Online"
|
||||
port_type = "NPort (fabric via point-to-point)"
|
||||
speed = "8 Gbit"
|
||||
supported_classes = "Class 3"
|
||||
supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit"
|
||||
symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k"
|
||||
system_hostname = ""
|
||||
tgtid_bind_type = "wwpn (World Wide Port Name)"
|
||||
uevent =
|
||||
vport_create = <store method only>
|
||||
vport_delete = <store method only>
|
||||
|
||||
Device = "host2"
|
||||
Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.1/host2"
|
||||
edc = <store method only>
|
||||
optrom_ctl = <store method only>
|
||||
reset = <store method only>
|
||||
uevent = "DEVTYPE=scsi_host"
|
||||
|
||||
|
||||
"""
|
226
brick/tests/initiator/test_linuxscsi.py
Normal file
226
brick/tests/initiator/test_linuxscsi.py
Normal file
@ -0,0 +1,226 @@
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os.path
|
||||
import string
|
||||
|
||||
import mock
|
||||
|
||||
from brick.initiator import linuxscsi
|
||||
from brick.openstack.common import log as logging
|
||||
from brick.tests import base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LinuxSCSITestCase(base.TestCase):
|
||||
def setUp(self):
|
||||
super(LinuxSCSITestCase, self).setUp()
|
||||
self.cmds = []
|
||||
realpath_mock = mock.Mock()
|
||||
realpath_mock.return_value = '/dev/sdc'
|
||||
os.path.realpath = realpath_mock
|
||||
self.linuxscsi = linuxscsi.LinuxSCSI(None, execute=self.fake_execute)
|
||||
|
||||
def fake_execute(self, *cmd, **kwargs):
|
||||
self.cmds.append(string.join(cmd))
|
||||
return "", None
|
||||
|
||||
def test_echo_scsi_command(self):
|
||||
self.linuxscsi.echo_scsi_command("/some/path", "1")
|
||||
expected_commands = ['tee -a /some/path']
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_get_name_from_path(self):
|
||||
device_name = "/dev/sdc"
|
||||
realpath_mock = mock.Mock()
|
||||
realpath_mock.return_value = device_name
|
||||
os.path.realpath = realpath_mock
|
||||
disk_path = ("/dev/disk/by-path/ip-10.10.220.253:3260-"
|
||||
"iscsi-iqn.2000-05.com.3pardata:21810002ac00383d-lun-0")
|
||||
name = self.linuxscsi.get_name_from_path(disk_path)
|
||||
self.assertEqual(name, device_name)
|
||||
realpath_mock = mock.Mock()
|
||||
realpath_mock.return_value = "bogus"
|
||||
os.path.realpath = realpath_mock
|
||||
name = self.linuxscsi.get_name_from_path(disk_path)
|
||||
self.assertIsNone(name)
|
||||
|
||||
def test_remove_scsi_device(self):
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = False
|
||||
os.path.exists = exists_mock
|
||||
self.linuxscsi.remove_scsi_device("/dev/sdc")
|
||||
expected_commands = []
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = True
|
||||
os.path.exists = exists_mock
|
||||
self.linuxscsi.remove_scsi_device("/dev/sdc")
|
||||
expected_commands = [
|
||||
('blockdev --flushbufs /dev/sdc'),
|
||||
('tee -a /sys/block/sdc/device/delete')]
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_flush_multipath_device(self):
|
||||
self.linuxscsi.flush_multipath_device('/dev/dm-9')
|
||||
expected_commands = [('multipath -f /dev/dm-9')]
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_flush_multipath_devices(self):
|
||||
self.linuxscsi.flush_multipath_devices()
|
||||
expected_commands = [('multipath -F')]
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_remove_multipath_device(self):
|
||||
def fake_find_multipath_device(device):
|
||||
devices = [{'device': '/dev/sde', 'host': 0,
|
||||
'channel': 0, 'id': 0, 'lun': 1},
|
||||
{'device': '/dev/sdf', 'host': 2,
|
||||
'channel': 0, 'id': 0, 'lun': 1}, ]
|
||||
|
||||
info = {"device": "dm-3",
|
||||
"id": "350002ac20398383d",
|
||||
"devices": devices}
|
||||
return info
|
||||
|
||||
exists_mock = mock.Mock()
|
||||
exists_mock.return_value = True
|
||||
os.path.exists = exists_mock
|
||||
|
||||
self.linuxscsi.find_multipath_device = fake_find_multipath_device
|
||||
|
||||
self.linuxscsi.remove_multipath_device('/dev/dm-3')
|
||||
expected_commands = [
|
||||
('blockdev --flushbufs /dev/sde'),
|
||||
('tee -a /sys/block/sde/device/delete'),
|
||||
('blockdev --flushbufs /dev/sdf'),
|
||||
('tee -a /sys/block/sdf/device/delete'),
|
||||
('multipath -f 350002ac20398383d'), ]
|
||||
self.assertEqual(expected_commands, self.cmds)
|
||||
|
||||
def test_find_multipath_device_3par(self):
|
||||
def fake_execute(*cmd, **kwargs):
|
||||
out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n"
|
||||
"size=2.0G features='0' hwhandler='0' wp=rw\n"
|
||||
"`-+- policy='round-robin 0' prio=-1 status=active\n"
|
||||
" |- 0:0:0:1 sde 8:64 active undef running\n"
|
||||
" `- 2:0:0:1 sdf 8:80 active undef running\n"
|
||||
)
|
||||
return out, None
|
||||
|
||||
self.linuxscsi._execute = fake_execute
|
||||
|
||||
info = self.linuxscsi.find_multipath_device('/dev/sde')
|
||||
LOG.error("info = %s" % info)
|
||||
self.assertEqual("/dev/dm-3", info["device"])
|
||||
self.assertEqual("/dev/sde", info['devices'][0]['device'])
|
||||
self.assertEqual("0", info['devices'][0]['host'])
|
||||
self.assertEqual("0", info['devices'][0]['id'])
|
||||
self.assertEqual("0", info['devices'][0]['channel'])
|
||||
self.assertEqual("1", info['devices'][0]['lun'])
|
||||
|
||||
self.assertEqual("/dev/sdf", info['devices'][1]['device'])
|
||||
self.assertEqual("2", info['devices'][1]['host'])
|
||||
self.assertEqual("0", info['devices'][1]['id'])
|
||||
self.assertEqual("0", info['devices'][1]['channel'])
|
||||
self.assertEqual("1", info['devices'][1]['lun'])
|
||||
|
||||
def test_find_multipath_device_svc(self):
|
||||
def fake_execute(*cmd, **kwargs):
|
||||
out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n"
|
||||
"size=954M features='1 queue_if_no_path' hwhandler='0'"
|
||||
" wp=rw\n"
|
||||
"|-+- policy='round-robin 0' prio=-1 status=active\n"
|
||||
"| |- 6:0:2:0 sde 8:64 active undef running\n"
|
||||
"| `- 6:0:4:0 sdg 8:96 active undef running\n"
|
||||
"`-+- policy='round-robin 0' prio=-1 status=enabled\n"
|
||||
" |- 6:0:3:0 sdf 8:80 active undef running\n"
|
||||
" `- 6:0:5:0 sdh 8:112 active undef running\n"
|
||||
)
|
||||
return out, None
|
||||
|
||||
self.linuxscsi._execute = fake_execute
|
||||
|
||||
info = self.linuxscsi.find_multipath_device('/dev/sde')
|
||||
LOG.error("info = %s" % info)
|
||||
self.assertEqual("/dev/dm-2", info["device"])
|
||||
self.assertEqual("/dev/sde", info['devices'][0]['device'])
|
||||
self.assertEqual("6", info['devices'][0]['host'])
|
||||
self.assertEqual("0", info['devices'][0]['channel'])
|
||||
self.assertEqual("2", info['devices'][0]['id'])
|
||||
self.assertEqual("0", info['devices'][0]['lun'])
|
||||
|
||||
self.assertEqual("/dev/sdf", info['devices'][2]['device'])
|
||||
self.assertEqual("6", info['devices'][2]['host'])
|
||||
self.assertEqual("0", info['devices'][2]['channel'])
|
||||
self.assertEqual("3", info['devices'][2]['id'])
|
||||
self.assertEqual("0", info['devices'][2]['lun'])
|
||||
|
||||
def test_find_multipath_device_ds8000(self):
|
||||
def fake_execute(*cmd, **kwargs):
|
||||
out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n"
|
||||
"size=1.0G features='1 queue_if_no_path' hwhandler='0'"
|
||||
" wp=rw\n"
|
||||
"`-+- policy='round-robin 0' prio=-1 status=active\n"
|
||||
" |- 6:0:2:0 sdd 8:64 active undef running\n"
|
||||
" `- 6:1:0:3 sdc 8:32 active undef running\n"
|
||||
)
|
||||
return out, None
|
||||
|
||||
self.linuxscsi._execute = fake_execute
|
||||
|
||||
info = self.linuxscsi.find_multipath_device('/dev/sdd')
|
||||
LOG.error("info = %s" % info)
|
||||
self.assertEqual("/dev/dm-2", info["device"])
|
||||
self.assertEqual("/dev/sdd", info['devices'][0]['device'])
|
||||
self.assertEqual("6", info['devices'][0]['host'])
|
||||
self.assertEqual("0", info['devices'][0]['channel'])
|
||||
self.assertEqual("2", info['devices'][0]['id'])
|
||||
self.assertEqual("0", info['devices'][0]['lun'])
|
||||
|
||||
self.assertEqual("/dev/sdc", info['devices'][1]['device'])
|
||||
self.assertEqual("6", info['devices'][1]['host'])
|
||||
self.assertEqual("1", info['devices'][1]['channel'])
|
||||
self.assertEqual("0", info['devices'][1]['id'])
|
||||
self.assertEqual("3", info['devices'][1]['lun'])
|
||||
|
||||
def test_find_multipath_device_with_error(self):
|
||||
def fake_execute(*cmd, **kwargs):
|
||||
out = ("Oct 13 10:24:01 | /lib/udev/scsi_id exitted with 1\n"
|
||||
"36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n"
|
||||
"size=1.0G features='1 queue_if_no_path' hwhandler='0'"
|
||||
" wp=rw\n"
|
||||
"`-+- policy='round-robin 0' prio=-1 status=active\n"
|
||||
" |- 6:0:2:0 sdd 8:64 active undef running\n"
|
||||
" `- 6:1:0:3 sdc 8:32 active undef running\n"
|
||||
)
|
||||
return out, None
|
||||
|
||||
self.linuxscsi._execute = fake_execute
|
||||
|
||||
info = self.linuxscsi.find_multipath_device('/dev/sdd')
|
||||
LOG.error("info = %s" % info)
|
||||
self.assertEqual("/dev/dm-2", info["device"])
|
||||
self.assertEqual("/dev/sdd", info['devices'][0]['device'])
|
||||
self.assertEqual("6", info['devices'][0]['host'])
|
||||
self.assertEqual("0", info['devices'][0]['channel'])
|
||||
self.assertEqual("2", info['devices'][0]['id'])
|
||||
self.assertEqual("0", info['devices'][0]['lun'])
|
||||
|
||||
self.assertEqual("/dev/sdc", info['devices'][1]['device'])
|
||||
self.assertEqual("6", info['devices'][1]['host'])
|
||||
self.assertEqual("1", info['devices'][1]['channel'])
|
||||
self.assertEqual("0", info['devices'][1]['id'])
|
||||
self.assertEqual("3", info['devices'][1]['lun'])
|
28
brick/tests/test_brick.py
Normal file
28
brick/tests/test_brick.py
Normal file
@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
test_brick
|
||||
----------------------------------
|
||||
|
||||
Tests for `brick` module.
|
||||
"""
|
||||
|
||||
from brick.tests import base
|
||||
|
||||
|
||||
class TestBrick(base.TestCase):
|
||||
|
||||
def test_something(self):
|
||||
pass
|
59
brick/tests/test_exception.py
Normal file
59
brick/tests/test_exception.py
Normal file
@ -0,0 +1,59 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from brick import exception
|
||||
from brick.tests import base
|
||||
|
||||
|
||||
class BrickExceptionTestCase(base.TestCase):
|
||||
def test_default_error_msg(self):
|
||||
class FakeBrickException(exception.BrickException):
|
||||
message = "default message"
|
||||
|
||||
exc = FakeBrickException()
|
||||
self.assertEqual(unicode(exc), 'default message')
|
||||
|
||||
def test_error_msg(self):
|
||||
self.assertEqual(unicode(exception.BrickException('test')), 'test')
|
||||
|
||||
def test_default_error_msg_with_kwargs(self):
|
||||
class FakeBrickException(exception.BrickException):
|
||||
message = "default message: %(code)s"
|
||||
|
||||
exc = FakeBrickException(code=500)
|
||||
self.assertEqual(unicode(exc), 'default message: 500')
|
||||
|
||||
def test_error_msg_exception_with_kwargs(self):
|
||||
class FakeBrickException(exception.BrickException):
|
||||
message = "default message: %(mispelled_code)s"
|
||||
|
||||
exc = FakeBrickException(code=500)
|
||||
self.assertEqual(unicode(exc), 'default message: %(mispelled_code)s')
|
||||
|
||||
def test_default_error_code(self):
|
||||
class FakeBrickException(exception.BrickException):
|
||||
code = 404
|
||||
|
||||
exc = FakeBrickException()
|
||||
self.assertEqual(exc.kwargs['code'], 404)
|
||||
|
||||
def test_error_code_from_kwarg(self):
|
||||
class FakeBrickException(exception.BrickException):
|
||||
code = 500
|
||||
|
||||
exc = FakeBrickException(code=404)
|
||||
self.assertEqual(exc.kwargs['code'], 404)
|
75
doc/source/conf.py
Executable file
75
doc/source/conf.py
Executable file
@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
'oslosphinx'
|
||||
]
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'brick'
|
||||
copyright = u'2013, OpenStack Foundation'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
u'%s Documentation' % project,
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
4
doc/source/contributing.rst
Normal file
4
doc/source/contributing.rst
Normal file
@ -0,0 +1,4 @@
|
||||
============
|
||||
Contributing
|
||||
============
|
||||
.. include:: ../../CONTRIBUTING.rst
|
50
doc/source/index.rst
Normal file
50
doc/source/index.rst
Normal file
@ -0,0 +1,50 @@
|
||||
Brick |release| Documenation
|
||||
============================
|
||||
|
||||
Overview
|
||||
--------
|
||||
**Brick** is a Python package containing classes that help
|
||||
with volume discover and creation for local storage.
|
||||
|
||||
:doc:`installation`
|
||||
Instructions on how to get the distribution.
|
||||
|
||||
:doc:`tutorial`
|
||||
Start here for a quick overview.
|
||||
|
||||
:doc:`api/index`
|
||||
The complete API Documenation, organized by module.
|
||||
|
||||
|
||||
Changes
|
||||
-------
|
||||
see the :doc:`changelog` for a full list of changes to **Brick**.
|
||||
|
||||
About This Documentation
|
||||
------------------------
|
||||
This documentation is generated using the `Sphinx
|
||||
<http://sphinx.pocoo.org/>`_ documentation generator. The source files
|
||||
for the documentation are located in the *doc/* directory of the
|
||||
**Brick** distribution. To generate the docs locally run the
|
||||
following command from the root directory of the **Brick** source.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ python setup.py doc
|
||||
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
||||
installation
|
||||
tutorial
|
||||
changelog
|
||||
api/index
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
12
doc/source/installation.rst
Normal file
12
doc/source/installation.rst
Normal file
@ -0,0 +1,12 @@
|
||||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
At the command line::
|
||||
|
||||
$ pip install brick
|
||||
|
||||
Or, if you have virtualenvwrapper installed::
|
||||
|
||||
$ mkvirtualenv brick
|
||||
$ pip install brick
|
1
doc/source/readme.rst
Normal file
1
doc/source/readme.rst
Normal file
@ -0,0 +1 @@
|
||||
.. include:: ../../README.rst
|
7
doc/source/usage.rst
Normal file
7
doc/source/usage.rst
Normal file
@ -0,0 +1,7 @@
|
||||
========
|
||||
Usage
|
||||
========
|
||||
|
||||
To use brick in a project::
|
||||
|
||||
import brick
|
10
openstack-common.conf
Normal file
10
openstack-common.conf
Normal file
@ -0,0 +1,10 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from oslo-incubator.git
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=brick
|
||||
|
||||
module=log
|
||||
module=log_handler
|
||||
module=loopingcall
|
12
requirements.txt
Normal file
12
requirements.txt
Normal file
@ -0,0 +1,12 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
pbr>=0.6,!=0.7,<1.0
|
||||
Babel>=1.3
|
||||
eventlet>=0.15.2
|
||||
oslo.concurrency>=0.3.0,!=0.4.0
|
||||
oslo.rootwrap>=1.3.0
|
||||
oslo.serialization>=1.2.0
|
||||
oslo.i18n>=1.0.0
|
||||
six>=1.7.0
|
56
setup.cfg
Normal file
56
setup.cfg
Normal file
@ -0,0 +1,56 @@
|
||||
[metadata]
|
||||
name = brick
|
||||
summary = OpenStack Cinder brick library for managing local volume attaches
|
||||
description-file =
|
||||
README.rst
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = http://www.openstack.org/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
Programming Language :: Python :: 2.6
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.3
|
||||
Programming Language :: Python :: 3.4
|
||||
|
||||
[global]
|
||||
setup-hooks =
|
||||
pbr.hooks.setup_hook
|
||||
|
||||
[files]
|
||||
packages =
|
||||
brick
|
||||
|
||||
[egg_info]
|
||||
tag_build =
|
||||
tag_date = 0
|
||||
tag_svn_revision = 0
|
||||
|
||||
[build_sphinx]
|
||||
source-dir = doc/source
|
||||
build-dir = doc/build
|
||||
all_files = 1
|
||||
|
||||
[upload_sphinx]
|
||||
upload-dir = doc/build/html
|
||||
|
||||
[compile_catalog]
|
||||
directory = brick/locale
|
||||
domain = brick
|
||||
|
||||
[update_catalog]
|
||||
domain = brick
|
||||
output_dir = brick/locale
|
||||
input_file = brick/locale/brick.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ gettext ngettext l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = brick/locale/brick.pot
|
22
setup.py
Executable file
22
setup.py
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr'],
|
||||
pbr=True)
|
14
test-requirements.txt
Normal file
14
test-requirements.txt
Normal file
@ -0,0 +1,14 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
hacking>=0.9.2,<0.10
|
||||
coverage>=3.6
|
||||
discover
|
||||
python-subunit
|
||||
sphinx>=1.1.2
|
||||
oslosphinx
|
||||
oslotest>=1.1.0.0a1
|
||||
testrepository>=0.0.18
|
||||
testscenarios>=0.4
|
||||
testtools>=0.9.34
|
62
tox.ini
Normal file
62
tox.ini
Normal file
@ -0,0 +1,62 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
envlist = py27,pep8
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
usedevelop = True
|
||||
install_command = pip install -U {opts} {packages}
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = python setup.py testr --slowest --testr-args='{posargs}'
|
||||
whitelist_externals = bash
|
||||
|
||||
[tox:jenkins]
|
||||
downloadcache = ~/cache/pip
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||
|
||||
[testenv:docs]
|
||||
commands = python setup.py build_sphinx
|
||||
|
||||
[flake8]
|
||||
# H803 skipped on purpose per list discussion.
|
||||
# E123, E125 skipped as they are invalid PEP-8.
|
||||
# Following checks are ignored on purpose.
|
||||
#
|
||||
# E251 unexpected spaces around keyword / parameter equals
|
||||
# reason: no improvement in readability
|
||||
#
|
||||
# E265 block comment should start with '# '
|
||||
# reason: no improvement in readability
|
||||
#
|
||||
# H402 one line docstring needs punctuation
|
||||
# reason: removed in hacking (https://review.openstack.org/#/c/101497/)
|
||||
#
|
||||
# H803 git commit title should not end with period
|
||||
# reason: removed in hacking (https://review.openstack.org/#/c/101498/)
|
||||
#
|
||||
# H904 wrap long lines in parentheses instead of a backslash
|
||||
# reason: removed in hacking (https://review.openstack.org/#/c/101701/)
|
||||
#
|
||||
# Due to the upgrade to hacking 0.9.2 the following checking are
|
||||
# ignored on purpose for the moment and should be re-enabled.
|
||||
|
||||
|
||||
show-source = True
|
||||
ignore = E123,E125,E251,E265,H302,H402,H405,H803,H904
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
|
||||
max-complexity=30
|
||||
|
||||
[hacking]
|
||||
import_exceptions = brick.i18n
|
Loading…
Reference in New Issue
Block a user