Add base cinder common interface
Addition of common cinder related code that a storage interface driver will be able to leverage for interacting with cinder. Co-Authored-By: Joanna Taryma <joanna.taryma@intel.com> Partial-Bug: #1559691 Change-Id: I4aed895e52fcefb61dd0751e8bc9b39a26706276
This commit is contained in:
parent
7ce234d8b4
commit
459fe314fa
@ -888,6 +888,103 @@
|
|||||||
#action_interval = 10
|
#action_interval = 10
|
||||||
|
|
||||||
|
|
||||||
|
[cinder]
|
||||||
|
|
||||||
|
#
|
||||||
|
# From ironic
|
||||||
|
#
|
||||||
|
|
||||||
|
# Authentication URL (string value)
|
||||||
|
#auth_url = <None>
|
||||||
|
|
||||||
|
# Authentication type to load (string value)
|
||||||
|
# Deprecated group/name - [cinder]/auth_plugin
|
||||||
|
#auth_type = <None>
|
||||||
|
|
||||||
|
# PEM encoded Certificate Authority to use when verifying
|
||||||
|
# HTTPs connections. (string value)
|
||||||
|
#cafile = <None>
|
||||||
|
|
||||||
|
# PEM encoded client certificate cert file (string value)
|
||||||
|
#certfile = <None>
|
||||||
|
|
||||||
|
# Optional domain ID to use with v3 and v2 parameters. It will
|
||||||
|
# be used for both the user and project domain in v3 and
|
||||||
|
# ignored in v2 authentication. (string value)
|
||||||
|
#default_domain_id = <None>
|
||||||
|
|
||||||
|
# Optional domain name to use with v3 API and v2 parameters.
|
||||||
|
# It will be used for both the user and project domain in v3
|
||||||
|
# and ignored in v2 authentication. (string value)
|
||||||
|
#default_domain_name = <None>
|
||||||
|
|
||||||
|
# Domain ID to scope to (string value)
|
||||||
|
#domain_id = <None>
|
||||||
|
|
||||||
|
# Domain name to scope to (string value)
|
||||||
|
#domain_name = <None>
|
||||||
|
|
||||||
|
# Verify HTTPS connections. (boolean value)
|
||||||
|
#insecure = false
|
||||||
|
|
||||||
|
# PEM encoded client certificate key file (string value)
|
||||||
|
#keyfile = <None>
|
||||||
|
|
||||||
|
# User's password (string value)
|
||||||
|
#password = <None>
|
||||||
|
|
||||||
|
# Domain ID containing project (string value)
|
||||||
|
#project_domain_id = <None>
|
||||||
|
|
||||||
|
# Domain name containing project (string value)
|
||||||
|
#project_domain_name = <None>
|
||||||
|
|
||||||
|
# Project ID to scope to (string value)
|
||||||
|
# Deprecated group/name - [cinder]/tenant-id
|
||||||
|
#project_id = <None>
|
||||||
|
|
||||||
|
# Project name to scope to (string value)
|
||||||
|
# Deprecated group/name - [cinder]/tenant-name
|
||||||
|
#project_name = <None>
|
||||||
|
|
||||||
|
# Client retries in the case of a failed request connection.
|
||||||
|
# (integer value)
|
||||||
|
#retries = 3
|
||||||
|
|
||||||
|
# Tenant ID (string value)
|
||||||
|
#tenant_id = <None>
|
||||||
|
|
||||||
|
# Tenant Name (string value)
|
||||||
|
#tenant_name = <None>
|
||||||
|
|
||||||
|
# Timeout value for http requests (integer value)
|
||||||
|
#timeout = <None>
|
||||||
|
|
||||||
|
# Trust ID (string value)
|
||||||
|
#trust_id = <None>
|
||||||
|
|
||||||
|
# URL for connecting to cinder. If set, the value must start
|
||||||
|
# with either http:// or https://. (string value)
|
||||||
|
#url = <None>
|
||||||
|
|
||||||
|
# Timeout value for connecting to cinder in seconds. (integer
|
||||||
|
# value)
|
||||||
|
#url_timeout = 30
|
||||||
|
|
||||||
|
# User's domain id (string value)
|
||||||
|
#user_domain_id = <None>
|
||||||
|
|
||||||
|
# User's domain name (string value)
|
||||||
|
#user_domain_name = <None>
|
||||||
|
|
||||||
|
# User id (string value)
|
||||||
|
#user_id = <None>
|
||||||
|
|
||||||
|
# Username (string value)
|
||||||
|
# Deprecated group/name - [cinder]/user-name
|
||||||
|
#username = <None>
|
||||||
|
|
||||||
|
|
||||||
[cisco_ucs]
|
[cisco_ucs]
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -2069,22 +2166,6 @@
|
|||||||
# Reason: PKI token format is no longer supported.
|
# Reason: PKI token format is no longer supported.
|
||||||
#hash_algorithms = md5
|
#hash_algorithms = md5
|
||||||
|
|
||||||
# A choice of roles that must be present in a service token.
|
|
||||||
# Service tokens are allowed to request that an expired token
|
|
||||||
# can be used and so this check should tightly control that
|
|
||||||
# only actual services should be sending this token. Roles
|
|
||||||
# here are applied as an ANY check so any role in this list
|
|
||||||
# must be present. For backwards compatibility reasons this
|
|
||||||
# currently only affects the allow_expired check. (list value)
|
|
||||||
#service_token_roles = service
|
|
||||||
|
|
||||||
# For backwards compatibility reasons we must let valid
|
|
||||||
# service tokens pass that don't pass the service_token_roles
|
|
||||||
# check as valid. Setting this true will become the default in
|
|
||||||
# a future release and should be enabled if possible. (boolean
|
|
||||||
# value)
|
|
||||||
#service_token_roles_required = false
|
|
||||||
|
|
||||||
# Authentication type to load (string value)
|
# Authentication type to load (string value)
|
||||||
# Deprecated group/name - [keystone_authtoken]/auth_plugin
|
# Deprecated group/name - [keystone_authtoken]/auth_plugin
|
||||||
#auth_type = <None>
|
#auth_type = <None>
|
||||||
|
@ -35,7 +35,7 @@ CONF = cfg.CONF
|
|||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
SECTIONS_WITH_AUTH = (
|
SECTIONS_WITH_AUTH = (
|
||||||
'service_catalog', 'neutron', 'glance', 'swift', 'inspector')
|
'service_catalog', 'neutron', 'glance', 'swift', 'cinder', 'inspector')
|
||||||
|
|
||||||
|
|
||||||
# TODO(pas-ha) remove this check after deprecation period
|
# TODO(pas-ha) remove this check after deprecation period
|
||||||
|
431
ironic/common/cinder.py
Normal file
431
ironic/common/cinder.py
Normal file
@ -0,0 +1,431 @@
|
|||||||
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
|
||||||
|
# Copyright 2017 IBM Corp
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from cinderclient import exceptions as cinder_exceptions
|
||||||
|
from cinderclient.v3 import client as client
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from ironic.common import exception
|
||||||
|
from ironic.common.i18n import _
|
||||||
|
from ironic.common import keystone
|
||||||
|
from ironic.conf import CONF
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
AVAILABLE = 'available'
|
||||||
|
IN_USE = 'in-use'
|
||||||
|
|
||||||
|
_CINDER_SESSION = None
|
||||||
|
|
||||||
|
|
||||||
|
def _get_cinder_session():
|
||||||
|
global _CINDER_SESSION
|
||||||
|
if not _CINDER_SESSION:
|
||||||
|
_CINDER_SESSION = keystone.get_session('cinder')
|
||||||
|
return _CINDER_SESSION
|
||||||
|
|
||||||
|
|
||||||
|
def get_client():
|
||||||
|
"""Get a cinder client connection.
|
||||||
|
|
||||||
|
:returns: A cinder client.
|
||||||
|
"""
|
||||||
|
params = {
|
||||||
|
'connect_retries': CONF.cinder.retries
|
||||||
|
}
|
||||||
|
# TODO(jtaryma): Add support for noauth
|
||||||
|
# NOTE(TheJulia): If a URL is provided for cinder, we will pass
|
||||||
|
# along the URL to python-cinderclient. Otherwise the library
|
||||||
|
# handles keystone url autodetection.
|
||||||
|
if CONF.cinder.url:
|
||||||
|
params['endpoint_override'] = CONF.cinder.url
|
||||||
|
|
||||||
|
if CONF.keystone.region_name:
|
||||||
|
params['region_name'] = CONF.keystone.region_name
|
||||||
|
|
||||||
|
params['session'] = _get_cinder_session()
|
||||||
|
|
||||||
|
return client.Client(**params)
|
||||||
|
|
||||||
|
|
||||||
|
def is_volume_available(volume):
|
||||||
|
"""Check if a volume is available for a connection.
|
||||||
|
|
||||||
|
:param volume: The object representing the volume.
|
||||||
|
|
||||||
|
:returns: Boolean if volume is available.
|
||||||
|
"""
|
||||||
|
return (volume.status == AVAILABLE or
|
||||||
|
(volume.status == IN_USE and
|
||||||
|
volume.multiattach))
|
||||||
|
|
||||||
|
|
||||||
|
def is_volume_attached(node, volume):
|
||||||
|
"""Check if a volume is attached to the supplied node.
|
||||||
|
|
||||||
|
:param node: The object representing the node.
|
||||||
|
:param volume: The object representing the volume from cinder.
|
||||||
|
|
||||||
|
:returns: Boolean indicating if the volume is attached. Returns True if
|
||||||
|
cinder shows the volume as presently attached, otherwise
|
||||||
|
returns False.
|
||||||
|
"""
|
||||||
|
attachments = volume.attachments
|
||||||
|
if attachments is not None:
|
||||||
|
for attachment in attachments:
|
||||||
|
if attachment.get('server_id') in (node.instance_uuid, node.uuid):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _get_attachment_id(node, volume):
|
||||||
|
"""Return the attachment ID for a node to a volume.
|
||||||
|
|
||||||
|
:param node: The object representing the node.
|
||||||
|
:param volume: The object representing the volume from cinder.
|
||||||
|
|
||||||
|
:returns: The UUID of the attachment in cinder, if present. Otherwise
|
||||||
|
returns None.
|
||||||
|
"""
|
||||||
|
# NOTE(TheJulia): This is under the belief that there is a single
|
||||||
|
# attachment for each node that represents all possible attachment
|
||||||
|
# information as multiple types can be submitted in a single request.
|
||||||
|
attachments = volume.attachments
|
||||||
|
if attachments is not None:
|
||||||
|
for attachment in attachments:
|
||||||
|
if attachment.get('server_id') in (node.instance_uuid, node.uuid):
|
||||||
|
return attachment.get('attachment_id')
|
||||||
|
|
||||||
|
|
||||||
|
def _create_metadata_dictionary(node, action):
|
||||||
|
"""Create a volume metadata dictionary utilizing the node UUID.
|
||||||
|
|
||||||
|
:param node: Object representing a node.
|
||||||
|
:param action: String value representing the last action.
|
||||||
|
|
||||||
|
:returns: Metadata dictionary for volume.
|
||||||
|
"""
|
||||||
|
label = "ironic_node_%s" % node.uuid
|
||||||
|
return {
|
||||||
|
label: {
|
||||||
|
'instance_uuid': node.instance_uuid,
|
||||||
|
'last_seen': datetime.datetime.utcnow().isoformat(),
|
||||||
|
'last_action': action}}
|
||||||
|
|
||||||
|
|
||||||
|
def _init_client_for_operations(task):
|
||||||
|
"""Obtain cinder client and return it for use.
|
||||||
|
|
||||||
|
:param task: TaskManager instance representing the operation.
|
||||||
|
|
||||||
|
:returns: A cinder client.
|
||||||
|
:raises: StorageError If an exception is encountered creating the client.
|
||||||
|
"""
|
||||||
|
node = task.node
|
||||||
|
try:
|
||||||
|
return get_client()
|
||||||
|
except Exception as e:
|
||||||
|
msg = (_('Failed to initialize cinder client for node %(uuid)s: %('
|
||||||
|
'err)s') % {'uuid': node.uuid, 'err': e})
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.StorageError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def attach_volumes(task, volume_list, connector):
|
||||||
|
"""Attach volumes to a node.
|
||||||
|
|
||||||
|
Enumerate through the provided list of volumes and attach the volumes
|
||||||
|
to the node defined in the task utilizing the provided connector
|
||||||
|
information.
|
||||||
|
|
||||||
|
If an attachment appears to already exist, we will skip attempting to
|
||||||
|
attach the volume. If use of the volume fails, a user may need to
|
||||||
|
remove any lingering pre-existing/unused attachment records since
|
||||||
|
we have no way to validate if the connector profile data differs
|
||||||
|
from what was provided to cinder.
|
||||||
|
|
||||||
|
:param task: TaskManager instance representing the operation.
|
||||||
|
:param volume_list: List of volume_id UUID values representing volumes.
|
||||||
|
:param connector: Dictionary object representing the node sufficiently
|
||||||
|
to attach a volume. This value can vary based upon
|
||||||
|
the node's configuration, capability, and ultimately
|
||||||
|
the back-end storage driver. As cinder was designed
|
||||||
|
around iSCSI, the 'ip' and 'initiator' keys are
|
||||||
|
generally expected by cinder drivers.
|
||||||
|
For FiberChannel, the key 'wwpns' can be used
|
||||||
|
with a list of port addresses.
|
||||||
|
Some drivers support a 'multipath' boolean key,
|
||||||
|
although it is generally False. The 'host' key
|
||||||
|
is generally used for logging by drivers.
|
||||||
|
Example:
|
||||||
|
|
||||||
|
{
|
||||||
|
'wwpns': ['list','of','port','wwns'],
|
||||||
|
'ip': 'ip address',
|
||||||
|
'initiator': 'initiator iqn',
|
||||||
|
'multipath': False,
|
||||||
|
'host': 'hostname',
|
||||||
|
}
|
||||||
|
|
||||||
|
:raises: StorageError If storage subsystem exception is raised.
|
||||||
|
:raises: TypeError If the supplied volume_list is not a list.
|
||||||
|
:returns: List of connected volumes, including volumes that were
|
||||||
|
already connected to desired nodes. The returned list
|
||||||
|
can be relatively consistent depending on the end storage
|
||||||
|
driver that the volume is configured for, however
|
||||||
|
the 'driver_volume_type' key should not be relied upon
|
||||||
|
as it is a free-form value returned by the driver.
|
||||||
|
The accompanying 'data' key contains the actual target
|
||||||
|
details which will indicate either target WWNs and a LUN
|
||||||
|
or a target portal and IQN. It also always contains
|
||||||
|
volume ID in cinder and ironic. Except for these two IDs,
|
||||||
|
each driver may return somewhat different data although
|
||||||
|
the same keys are used if the target is FC or iSCSI,
|
||||||
|
so any logic should be based upon the returned contents.
|
||||||
|
For already attached volumes, the structure contains
|
||||||
|
'already_attached': True key-value pair. In such case,
|
||||||
|
connection info for the node is already in the database,
|
||||||
|
'data' structure contains only basic info of volume ID in
|
||||||
|
cinder and ironic, so any logic based on that should
|
||||||
|
retrieve it from the database.
|
||||||
|
Example:
|
||||||
|
|
||||||
|
[{
|
||||||
|
'driver_volume_type': 'fibre_channel'
|
||||||
|
'data': {
|
||||||
|
'encrypted': False,
|
||||||
|
'target_lun': 1,
|
||||||
|
'target_wwn': ['1234567890123', '1234567890124'],
|
||||||
|
'volume_id': '00000000-0000-0000-0000-000000000001',
|
||||||
|
'ironic_volume_id':
|
||||||
|
'11111111-0000-0000-0000-000000000001'}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'driver_volume_type': 'iscsi'
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'volume_id': '00000000-0000-0000-0000-000000000002',
|
||||||
|
'ironic_volume_id':
|
||||||
|
'11111111-0000-0000-0000-000000000002',
|
||||||
|
'target_lun': 2}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'already_attached': True
|
||||||
|
'data': {
|
||||||
|
'volume_id': '00000000-0000-0000-0000-000000000002',
|
||||||
|
'ironic_volume_id':
|
||||||
|
'11111111-0000-0000-0000-000000000002'}
|
||||||
|
}]
|
||||||
|
"""
|
||||||
|
node = task.node
|
||||||
|
client = _init_client_for_operations(task)
|
||||||
|
|
||||||
|
connected = []
|
||||||
|
for volume_id in volume_list:
|
||||||
|
try:
|
||||||
|
volume = client.volumes.get(volume_id)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
msg = (_('Failed to get volume %(vol_id)s from cinder for node '
|
||||||
|
'%(uuid)s: %(err)s') %
|
||||||
|
{'vol_id': volume_id, 'uuid': node.uuid, 'err': e})
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.StorageError(msg)
|
||||||
|
if is_volume_attached(node, volume):
|
||||||
|
LOG.debug('Volume %(vol_id)s is already attached to node '
|
||||||
|
'%(uuid)s. Skipping attachment.',
|
||||||
|
{'vol_id': volume_id, 'uuid': node.uuid})
|
||||||
|
|
||||||
|
# NOTE(jtaryma): Actual connection info of already connected
|
||||||
|
# volume will be provided by nova. Adding this dictionary to
|
||||||
|
# 'connected' list so it contains also already connected volumes.
|
||||||
|
connection = {'data': {'ironic_volume_uuid': volume.uuid,
|
||||||
|
'volume_id': volume_id},
|
||||||
|
'already_attached': True}
|
||||||
|
connected.append(connection)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
client.volumes.reserve(volume_id)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
msg = (_('Failed to reserve volume %(vol_id)s for node %(node)s: '
|
||||||
|
'%(err)s)') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.StorageError(msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Provide connector information to cinder
|
||||||
|
connection = client.volumes.initialize_connection(volume_id,
|
||||||
|
connector)
|
||||||
|
if 'volume_id' not in connection['data']:
|
||||||
|
connection['data']['volume_id'] = volume_id
|
||||||
|
connection['data']['ironic_volume_uuid'] = volume.uuid
|
||||||
|
connected.append(connection)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
msg = (_('Failed to initialize connection for volume '
|
||||||
|
'%(vol_id)s to node %(node)s: %(err)s') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.StorageError(msg)
|
||||||
|
|
||||||
|
LOG.info('Successfully initialized volume %(vol_id)s for '
|
||||||
|
'node %(node)s.', {'vol_id': volume_id, 'node': node.uuid})
|
||||||
|
|
||||||
|
instance_uuid = node.instance_uuid or node.uuid
|
||||||
|
|
||||||
|
try:
|
||||||
|
# NOTE(TheJulia): The final step of the cinder volume
|
||||||
|
# attachment process involves updating the volume
|
||||||
|
# database record to indicate that the attachment has
|
||||||
|
# been completed, which moves the volume to the
|
||||||
|
# 'attached' state. This action also sets a mountpoint
|
||||||
|
# for the volume, if known. In our use case, there is
|
||||||
|
# no way for us to know what the mountpoint is inside of
|
||||||
|
# the operating system, thus we send None.
|
||||||
|
client.volumes.attach(volume_id, instance_uuid, None)
|
||||||
|
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
msg = (_('Failed to inform cinder that the attachment for volume '
|
||||||
|
'%(vol_id)s for node %(node)s has been completed: '
|
||||||
|
'%(err)s') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.StorageError(msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Set metadata to assist a user in volume identification
|
||||||
|
client.volumes.set_metadata(
|
||||||
|
volume_id,
|
||||||
|
_create_metadata_dictionary(node, 'attached'))
|
||||||
|
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
LOG.warning('Failed to update volume metadata for volume '
|
||||||
|
'%(vol_id)s for node %(node)s: %(err)s',
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
return connected
|
||||||
|
|
||||||
|
|
||||||
|
def detach_volumes(task, volume_list, connector, allow_errors=False):
|
||||||
|
"""Detach a list of volumes from a provided connector detail.
|
||||||
|
|
||||||
|
Enumerates through a provided list of volumes and issues
|
||||||
|
detachment requests utilizing the connector information
|
||||||
|
that describes the node.
|
||||||
|
|
||||||
|
:param task: The TaskManager task representing the request.
|
||||||
|
:param volume_list: The list of volume id values to detach.
|
||||||
|
:param connector: Dictionary object representing the node sufficiently
|
||||||
|
to attach a volume. This value can vary based upon
|
||||||
|
the node's configuration, capability, and ultimately
|
||||||
|
the back-end storage driver. As cinder was designed
|
||||||
|
around iSCSI, the 'ip' and 'initiator' keys are
|
||||||
|
generally expected. For FiberChannel, the key
|
||||||
|
'wwpns' can be used with a list of port addresses.
|
||||||
|
Some drivers support a 'multipath' boolean key,
|
||||||
|
although it is generally False. The 'host' key
|
||||||
|
is generally used for logging by drivers.
|
||||||
|
Example:
|
||||||
|
|
||||||
|
{
|
||||||
|
'wwpns': ['list','of','port','wwns']
|
||||||
|
'ip': 'ip address',
|
||||||
|
'initiator': 'initiator iqn',
|
||||||
|
'multipath': False,
|
||||||
|
'host': 'hostname'
|
||||||
|
}
|
||||||
|
|
||||||
|
:param allow_errors: Boolean value governing if errors that are returned
|
||||||
|
are treated as warnings instead of exceptions.
|
||||||
|
Default False.
|
||||||
|
:raises: TypeError If the supplied volume_list is not a sequence.
|
||||||
|
:raises: StorageError
|
||||||
|
"""
|
||||||
|
def _handle_errors(msg):
|
||||||
|
if allow_errors:
|
||||||
|
LOG.warning(msg)
|
||||||
|
else:
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.StorageError(msg)
|
||||||
|
|
||||||
|
client = _init_client_for_operations(task)
|
||||||
|
node = task.node
|
||||||
|
|
||||||
|
for volume_id in volume_list:
|
||||||
|
try:
|
||||||
|
volume = client.volumes.get(volume_id)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
_handle_errors(_('Failed to get volume %(vol_id)s from cinder for '
|
||||||
|
'node %(node)s: %(err)s') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
# If we do not raise an exception, we should move on to
|
||||||
|
# the next volume since the volume could have been deleted
|
||||||
|
# before we're attempting to power off the node.
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not is_volume_attached(node, volume):
|
||||||
|
LOG.debug('Volume %(vol_id)s is not attached to node '
|
||||||
|
'%(uuid)s: Skipping detachment.',
|
||||||
|
{'vol_id': volume_id, 'uuid': node.uuid})
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
client.volumes.begin_detaching(volume_id)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
_handle_errors(_('Failed to request detach for volume %(vol_id)s '
|
||||||
|
'from cinder for node %(node)s: %(err)s') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e}
|
||||||
|
)
|
||||||
|
# NOTE(jtaryma): This operation only updates the volume status, so
|
||||||
|
# we can proceed the process of actual detachment if allow_errors
|
||||||
|
# is set to True.
|
||||||
|
try:
|
||||||
|
# Remove the attachment
|
||||||
|
client.volumes.terminate_connection(volume_id, connector)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
_handle_errors(_('Failed to detach volume %(vol_id)s from node '
|
||||||
|
'%(node)s: %(err)s') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
# Skip proceeding with this method if we're not raising
|
||||||
|
# errors. This will leave the volume in the detaching
|
||||||
|
# state, but in that case something very unexpected
|
||||||
|
# has occured.
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Attempt to identify the attachment id value to provide
|
||||||
|
# accessible relationship data to leave in the cinder API
|
||||||
|
# to enable reconciliation.
|
||||||
|
attachment_id = _get_attachment_id(node, volume)
|
||||||
|
try:
|
||||||
|
# Update the API attachment record
|
||||||
|
client.volumes.detach(volume_id, attachment_id)
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
_handle_errors(_('Failed to inform cinder that the detachment for '
|
||||||
|
'volume %(vol_id)s from node %(node)s has been '
|
||||||
|
'completed: %(err)s') %
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
||||||
|
# NOTE(jtaryma): This operation mainly updates the volume status,
|
||||||
|
# so we can proceed the process of volume updating if allow_errors
|
||||||
|
# is set to True.
|
||||||
|
try:
|
||||||
|
# Set metadata to assist in volume identification.
|
||||||
|
client.volumes.set_metadata(
|
||||||
|
volume_id,
|
||||||
|
_create_metadata_dictionary(node, 'detached'))
|
||||||
|
except cinder_exceptions.ClientException as e:
|
||||||
|
LOG.warning('Failed to update volume %(vol_id)s metadata for node '
|
||||||
|
'%(node)s: %(err)s',
|
||||||
|
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
@ -738,3 +738,7 @@ class NotificationSchemaKeyError(IronicException):
|
|||||||
class NotificationPayloadError(IronicException):
|
class NotificationPayloadError(IronicException):
|
||||||
_msg_fmt = _("Payload not populated when trying to send notification "
|
_msg_fmt = _("Payload not populated when trying to send notification "
|
||||||
"\"%(class_name)s\"")
|
"\"%(class_name)s\"")
|
||||||
|
|
||||||
|
|
||||||
|
class StorageError(IronicException):
|
||||||
|
_msg_fmt = _("Storage operation failure.")
|
||||||
|
@ -18,6 +18,7 @@ from oslo_config import cfg
|
|||||||
from ironic.conf import agent
|
from ironic.conf import agent
|
||||||
from ironic.conf import api
|
from ironic.conf import api
|
||||||
from ironic.conf import audit
|
from ironic.conf import audit
|
||||||
|
from ironic.conf import cinder
|
||||||
from ironic.conf import cisco
|
from ironic.conf import cisco
|
||||||
from ironic.conf import conductor
|
from ironic.conf import conductor
|
||||||
from ironic.conf import console
|
from ironic.conf import console
|
||||||
@ -48,6 +49,7 @@ CONF = cfg.CONF
|
|||||||
agent.register_opts(CONF)
|
agent.register_opts(CONF)
|
||||||
api.register_opts(CONF)
|
api.register_opts(CONF)
|
||||||
audit.register_opts(CONF)
|
audit.register_opts(CONF)
|
||||||
|
cinder.register_opts(CONF)
|
||||||
cisco.register_opts(CONF)
|
cisco.register_opts(CONF)
|
||||||
conductor.register_opts(CONF)
|
conductor.register_opts(CONF)
|
||||||
console.register_opts(CONF)
|
console.register_opts(CONF)
|
||||||
|
43
ironic/conf/cinder.py
Normal file
43
ironic/conf/cinder.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
from ironic.common.i18n import _
|
||||||
|
from ironic.conf import auth
|
||||||
|
|
||||||
|
opts = [
|
||||||
|
cfg.StrOpt('url',
|
||||||
|
regex='^http(s?):\/\/.+',
|
||||||
|
help=_('URL for connecting to cinder. If set, the value must '
|
||||||
|
'start with either http:// or https://.')),
|
||||||
|
cfg.IntOpt('retries',
|
||||||
|
default=3,
|
||||||
|
help=_('Client retries in the case of a failed request '
|
||||||
|
'connection.')),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def register_opts(conf):
|
||||||
|
conf.register_opts(opts, group='cinder')
|
||||||
|
auth.register_auth_opts(conf, 'cinder')
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
# NOTE(jtaryma): Function add_auth_opts uses deepcopy on passed array.
|
||||||
|
# Since deepcopy does not support regex, to enable regex
|
||||||
|
# protocol restriction for 'url' option, empty array is
|
||||||
|
# passed. The result is appended to opts array and resorted.
|
||||||
|
cinder_opts = opts + auth.add_auth_opts([])
|
||||||
|
cinder_opts.sort(key=lambda x: x.name)
|
||||||
|
return cinder_opts
|
@ -35,6 +35,7 @@ _opts = [
|
|||||||
('api', ironic.conf.api.opts),
|
('api', ironic.conf.api.opts),
|
||||||
('audit', ironic.conf.audit.opts),
|
('audit', ironic.conf.audit.opts),
|
||||||
('cimc', ironic.conf.cisco.cimc_opts),
|
('cimc', ironic.conf.cisco.cimc_opts),
|
||||||
|
('cinder', ironic.conf.cinder.list_opts()),
|
||||||
('cisco_ucs', ironic.conf.cisco.ucsm_opts),
|
('cisco_ucs', ironic.conf.cisco.ucsm_opts),
|
||||||
('conductor', ironic.conf.conductor.opts),
|
('conductor', ironic.conf.conductor.opts),
|
||||||
('console', ironic.conf.console.opts),
|
('console', ironic.conf.console.opts),
|
||||||
|
711
ironic/tests/unit/common/test_cinder.py
Normal file
711
ironic/tests/unit/common/test_cinder.py
Normal file
@ -0,0 +1,711 @@
|
|||||||
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
|
||||||
|
from cinderclient import exceptions as cinder_exceptions
|
||||||
|
import cinderclient.v3 as cinderclient
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from six.moves import http_client
|
||||||
|
|
||||||
|
from ironic.common import cinder
|
||||||
|
from ironic.common import exception
|
||||||
|
from ironic.common import keystone
|
||||||
|
from ironic.conductor import task_manager
|
||||||
|
from ironic.tests import base
|
||||||
|
from ironic.tests.unit.conductor import mgr_utils
|
||||||
|
from ironic.tests.unit.db import base as db_base
|
||||||
|
from ironic.tests.unit.objects import utils as object_utils
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch.object(keystone, 'get_session', autospec=True)
|
||||||
|
class TestCinderSession(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCinderSession, self).setUp()
|
||||||
|
self.config(timeout=1,
|
||||||
|
retries=2,
|
||||||
|
group='cinder')
|
||||||
|
|
||||||
|
def test__get_cinder_session(self, mock_keystone_session):
|
||||||
|
"""Check establishing new session when no session exists."""
|
||||||
|
mock_keystone_session.return_value = 'session1'
|
||||||
|
self.assertEqual('session1', cinder._get_cinder_session())
|
||||||
|
mock_keystone_session.assert_called_once_with('cinder')
|
||||||
|
|
||||||
|
"""Check if existing session is used."""
|
||||||
|
mock_keystone_session.reset_mock()
|
||||||
|
mock_keystone_session.return_value = 'session2'
|
||||||
|
self.assertEqual('session1', cinder._get_cinder_session())
|
||||||
|
self.assertFalse(mock_keystone_session.called)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch.object(cinder, '_get_cinder_session', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.Client, '__init__', autospec=True)
|
||||||
|
class TestCinderClient(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCinderClient, self).setUp()
|
||||||
|
self.config(timeout=1,
|
||||||
|
retries=2,
|
||||||
|
group='cinder')
|
||||||
|
|
||||||
|
def test_get_client(self, mock_client_init, mock_session):
|
||||||
|
mock_session_obj = mock.Mock()
|
||||||
|
expected = {'connect_retries': 2,
|
||||||
|
'session': mock_session_obj}
|
||||||
|
mock_session.return_value = mock_session_obj
|
||||||
|
mock_client_init.return_value = None
|
||||||
|
cinder.get_client()
|
||||||
|
mock_session.assert_called_once_with()
|
||||||
|
mock_client_init.assert_called_once_with(mock.ANY, **expected)
|
||||||
|
|
||||||
|
def test_get_client_with_endpoint_override(
|
||||||
|
self, mock_client_init, mock_session):
|
||||||
|
self.config(url='test-url', group='cinder')
|
||||||
|
mock_session_obj = mock.Mock()
|
||||||
|
expected = {'connect_retries': 2,
|
||||||
|
'endpoint_override': 'test-url',
|
||||||
|
'session': mock_session_obj}
|
||||||
|
mock_session.return_value = mock_session_obj
|
||||||
|
mock_client_init.return_value = None
|
||||||
|
cinder.get_client()
|
||||||
|
mock_client_init.assert_called_once_with(mock.ANY, **expected)
|
||||||
|
mock_session.assert_called_once_with()
|
||||||
|
|
||||||
|
def test_get_client_with_region(self, mock_client_init, mock_session):
|
||||||
|
mock_session_obj = mock.Mock()
|
||||||
|
expected = {'connect_retries': 2,
|
||||||
|
'region_name': 'test-region',
|
||||||
|
'session': mock_session_obj}
|
||||||
|
mock_session.return_value = mock_session_obj
|
||||||
|
self.config(region_name='test-region',
|
||||||
|
group='keystone')
|
||||||
|
mock_client_init.return_value = None
|
||||||
|
cinder.get_client()
|
||||||
|
mock_client_init.assert_called_once_with(mock.ANY, **expected)
|
||||||
|
mock_session.assert_called_once_with()
|
||||||
|
|
||||||
|
|
||||||
|
class TestCinderUtils(db_base.DbTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCinderUtils, self).setUp()
|
||||||
|
mgr_utils.mock_the_extension_manager(driver='fake')
|
||||||
|
self.config(enabled_drivers=['fake'])
|
||||||
|
self.node = object_utils.create_test_node(
|
||||||
|
self.context,
|
||||||
|
instance_uuid=uuidutils.generate_uuid())
|
||||||
|
|
||||||
|
def test_cinder_states(self):
|
||||||
|
self.assertEqual('available', cinder.AVAILABLE)
|
||||||
|
self.assertEqual('in-use', cinder.IN_USE)
|
||||||
|
|
||||||
|
def test_is_volume_available(self):
|
||||||
|
available_volumes = [
|
||||||
|
mock.Mock(status=cinder.AVAILABLE, multiattach=False),
|
||||||
|
mock.Mock(status=cinder.IN_USE, multiattach=True)]
|
||||||
|
unavailable_volumes = [
|
||||||
|
mock.Mock(status=cinder.IN_USE, multiattach=False),
|
||||||
|
mock.Mock(status='fake-non-status', multiattach=True)]
|
||||||
|
|
||||||
|
for vol in available_volumes:
|
||||||
|
result = cinder.is_volume_available(vol)
|
||||||
|
self.assertEqual(True, result,
|
||||||
|
message="Failed for status '%s'." % vol.status)
|
||||||
|
|
||||||
|
for vol in unavailable_volumes:
|
||||||
|
result = cinder.is_volume_available(vol)
|
||||||
|
self.assertEqual(False, result,
|
||||||
|
message="Failed for status '%s'." % vol.status)
|
||||||
|
|
||||||
|
def test_is_volume_attached(self):
|
||||||
|
attached_vol = mock.Mock(id='foo', attachments=[
|
||||||
|
{'server_id': self.node.uuid, 'attachment_id': 'meow'}])
|
||||||
|
attached_vol2 = mock.Mock(id='bar', attachments=[
|
||||||
|
{'server_id': self.node.instance_uuid, 'attachment_id': 'meow'}],)
|
||||||
|
unattached = mock.Mock(attachments=[])
|
||||||
|
self.assertTrue(cinder.is_volume_attached(self.node, attached_vol))
|
||||||
|
self.assertTrue(cinder.is_volume_attached(self.node, attached_vol2))
|
||||||
|
self.assertFalse(cinder.is_volume_attached(self.node, unattached))
|
||||||
|
|
||||||
|
def test__get_attachment_id(self):
|
||||||
|
expectation = 'meow'
|
||||||
|
attached_vol = mock.Mock(attachments=[
|
||||||
|
{'server_id': self.node.instance_uuid, 'attachment_id': 'meow'}])
|
||||||
|
attached_vol2 = mock.Mock(attachments=[
|
||||||
|
{'server_id': self.node.uuid, 'attachment_id': 'meow'}])
|
||||||
|
unattached = mock.Mock(attachments=[])
|
||||||
|
no_attachment = mock.Mock(attachments=[
|
||||||
|
{'server_id': 'cat', 'id': 'cat'}])
|
||||||
|
|
||||||
|
self.assertEqual(expectation,
|
||||||
|
cinder._get_attachment_id(self.node, attached_vol))
|
||||||
|
self.assertEqual(expectation,
|
||||||
|
cinder._get_attachment_id(self.node, attached_vol2))
|
||||||
|
self.assertIsNone(cinder._get_attachment_id(self.node, unattached))
|
||||||
|
self.assertIsNone(cinder._get_attachment_id(self.node, no_attachment))
|
||||||
|
|
||||||
|
def test__create_metadata_dictionary(self):
|
||||||
|
expected_key = ("ironic_node_%s" % self.node.uuid)
|
||||||
|
expected = {
|
||||||
|
expected_key: {
|
||||||
|
'instance_uuid': self.node.instance_uuid,
|
||||||
|
'last_seen': 'faked-time',
|
||||||
|
'last_action': 'meow'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result = cinder._create_metadata_dictionary(self.node, 'meow')
|
||||||
|
self.maxDiff = None
|
||||||
|
# Since datetime is an internal, we can't exactly mock it.
|
||||||
|
# We can however verify it's presence, and replace it.
|
||||||
|
self.assertIsInstance(result[expected_key]['last_seen'], str)
|
||||||
|
result[expected_key]['last_seen'] = 'faked-time'
|
||||||
|
self.assertDictEqual(expected, result)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch.object(cinder, '_get_cinder_session', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'set_metadata',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'get', autospec=True)
|
||||||
|
class TestCinderActions(db_base.DbTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCinderActions, self).setUp()
|
||||||
|
mgr_utils.mock_the_extension_manager(driver='fake')
|
||||||
|
self.config(enabled_drivers=['fake'])
|
||||||
|
self.node = object_utils.create_test_node(
|
||||||
|
self.context,
|
||||||
|
instance_uuid=uuidutils.generate_uuid())
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'attach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'initialize_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_attach_volumes(self, mock_create_meta, mock_is_attached,
|
||||||
|
mock_reserve, mock_init, mock_attach, mock_get,
|
||||||
|
mock_set_meta, mock_session):
|
||||||
|
"""Iterate once on a single volume with success."""
|
||||||
|
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
expected = [{
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'volume_id': volume_id,
|
||||||
|
'target_lun': 2,
|
||||||
|
'ironic_volume_uuid': '000-001'}}]
|
||||||
|
volumes = [volume_id]
|
||||||
|
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = False
|
||||||
|
mock_get.return_value = mock.Mock(attachments=[], uuid='000-001')
|
||||||
|
|
||||||
|
mock_init.return_value = {
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'target_lun': 2}}
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
attachments = cinder.attach_volumes(task, volumes, connector)
|
||||||
|
|
||||||
|
self.assertDictEqual(expected[0], attachments[0])
|
||||||
|
mock_reserve.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_init.assert_called_once_with(mock.ANY, volume_id, connector)
|
||||||
|
mock_attach.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
self.node.instance_uuid, None)
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
mock_get.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'attach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'initialize_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_attach_volumes_one_attached(
|
||||||
|
self, mock_create_meta, mock_reserve, mock_init, mock_attach,
|
||||||
|
mock_get, mock_set_meta, mock_session):
|
||||||
|
"""Iterate with two volumes, one already attached."""
|
||||||
|
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
expected = [
|
||||||
|
{'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'volume_id': volume_id,
|
||||||
|
'target_lun': 2,
|
||||||
|
'ironic_volume_uuid': '000-000'}},
|
||||||
|
{'already_attached': True,
|
||||||
|
'data': {
|
||||||
|
'volume_id': 'already_attached',
|
||||||
|
'ironic_volume_uuid': '000-001'}}]
|
||||||
|
|
||||||
|
volumes = [volume_id, 'already_attached']
|
||||||
|
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_get.side_effect = [
|
||||||
|
mock.Mock(attachments=[], uuid='000-000'),
|
||||||
|
mock.Mock(attachments=[{'server_id': self.node.uuid}],
|
||||||
|
uuid='000-001')
|
||||||
|
]
|
||||||
|
|
||||||
|
mock_init.return_value = {
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'target_lun': 2}}
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
attachments = cinder.attach_volumes(task, volumes, connector)
|
||||||
|
|
||||||
|
self.assertEqual(expected, attachments)
|
||||||
|
mock_reserve.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_init.assert_called_once_with(mock.ANY, volume_id, connector)
|
||||||
|
mock_attach.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
self.node.instance_uuid, None)
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.Client, '__init__')
|
||||||
|
def test_attach_volumes_client_init_failure(
|
||||||
|
self, mock_client, mock_get, mock_set_meta, mock_session):
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
volumes = ['111111111-0000-0000-0000-000000000003']
|
||||||
|
mock_client.side_effect = cinder_exceptions.BadRequest(400)
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.attach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'attach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'initialize_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_attach_volumes_vol_not_found(
|
||||||
|
self, mock_create_meta, mock_reserve, mock_init, mock_attach,
|
||||||
|
mock_get, mock_set_meta, mock_session):
|
||||||
|
"""Raise an error if the volume lookup fails"""
|
||||||
|
|
||||||
|
def __mock_get_side_effect(*args, **kwargs):
|
||||||
|
if args[1] == 'not_found':
|
||||||
|
raise cinder_exceptions.NotFound(404, message='error')
|
||||||
|
else:
|
||||||
|
return mock.Mock(attachments=[], uuid='000-000')
|
||||||
|
|
||||||
|
volumes = ['111111111-0000-0000-0000-000000000003',
|
||||||
|
'not_found',
|
||||||
|
'not_reached']
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_get.side_effect = __mock_get_side_effect
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.attach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
mock_get.assert_any_call(mock.ANY,
|
||||||
|
'111111111-0000-0000-0000-000000000003')
|
||||||
|
mock_get.assert_any_call(mock.ANY, 'not_found')
|
||||||
|
self.assertEqual(2, mock_get.call_count)
|
||||||
|
mock_reserve.assert_called_once_with(
|
||||||
|
mock.ANY, '111111111-0000-0000-0000-000000000003')
|
||||||
|
mock_init.assert_called_once_with(
|
||||||
|
mock.ANY, '111111111-0000-0000-0000-000000000003', connector)
|
||||||
|
mock_attach.assert_called_once_with(
|
||||||
|
mock.ANY, '111111111-0000-0000-0000-000000000003',
|
||||||
|
self.node.instance_uuid, None)
|
||||||
|
mock_set_meta.assert_called_once_with(
|
||||||
|
mock.ANY, '111111111-0000-0000-0000-000000000003', {'bar': 'baz'})
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
def test_attach_volumes_reserve_failure(self, mock_is_attached,
|
||||||
|
mock_reserve, mock_get,
|
||||||
|
mock_set_meta, mock_session):
|
||||||
|
volumes = ['111111111-0000-0000-0000-000000000003']
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
volume = mock.Mock(attachments=[])
|
||||||
|
mock_get.return_value = volume
|
||||||
|
mock_is_attached.return_value = False
|
||||||
|
mock_reserve.side_effect = cinder_exceptions.NotAcceptable(406)
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.attach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
mock_is_attached.assert_called_once_with(mock.ANY, volume)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'initialize_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_attach_volumes_initialize_connection_failure(
|
||||||
|
self, mock_create_meta, mock_is_attached, mock_reserve, mock_init,
|
||||||
|
mock_get, mock_set_meta, mock_session):
|
||||||
|
"""Fail attachment upon an initialization failure."""
|
||||||
|
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = False
|
||||||
|
mock_get.return_value = mock.Mock(attachments=[])
|
||||||
|
mock_init.side_effect = cinder_exceptions.NotAcceptable(406)
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.attach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
|
||||||
|
mock_get.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_reserve.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_init.assert_called_once_with(mock.ANY, volume_id, connector)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'attach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'initialize_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_attach_volumes_attach_record_failure(
|
||||||
|
self, mock_create_meta, mock_is_attached, mock_reserve,
|
||||||
|
mock_init, mock_attach, mock_get, mock_set_meta, mock_session):
|
||||||
|
"""Attach a volume and fail if final record failure occurs"""
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = False
|
||||||
|
mock_get.return_value = mock.Mock(attachments=[], uuid='000-003')
|
||||||
|
mock_init.return_value = {
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'target_lun': 2}}
|
||||||
|
mock_attach.side_effect = cinder_exceptions.ClientException(406,
|
||||||
|
'error')
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError, cinder.attach_volumes,
|
||||||
|
task, volumes, connector)
|
||||||
|
|
||||||
|
mock_reserve.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_init.assert_called_once_with(mock.ANY, volume_id, connector)
|
||||||
|
mock_attach.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
self.node.instance_uuid, None)
|
||||||
|
mock_get.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_is_attached.assert_called_once_with(mock.ANY,
|
||||||
|
mock_get.return_value)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'attach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'initialize_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'reserve',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'LOG', autospec=True)
|
||||||
|
def test_attach_volumes_attach_set_meta_failure(
|
||||||
|
self, mock_log, mock_create_meta, mock_is_attached,
|
||||||
|
mock_reserve, mock_init, mock_attach, mock_get, mock_set_meta,
|
||||||
|
mock_session):
|
||||||
|
"""Attach a volume and tolerate set_metadata failure."""
|
||||||
|
|
||||||
|
expected = [{
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'volume_id': '111111111-0000-0000-0000-000000000003',
|
||||||
|
'target_lun': 2,
|
||||||
|
'ironic_volume_uuid': '000-000'}}]
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = False
|
||||||
|
mock_get.return_value = mock.Mock(attachments=[], uuid='000-000')
|
||||||
|
mock_init.return_value = {
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
|
||||||
|
'target_portal': '127.0.0.0.1:3260',
|
||||||
|
'target_lun': 2}}
|
||||||
|
mock_set_meta.side_effect = cinder_exceptions.NotAcceptable(406)
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
attachments = cinder.attach_volumes(task, volumes, connector)
|
||||||
|
|
||||||
|
self.assertDictEqual(expected[0], attachments[0])
|
||||||
|
mock_reserve.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_init.assert_called_once_with(mock.ANY, volume_id, connector)
|
||||||
|
mock_attach.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
self.node.instance_uuid, None)
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
mock_get.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_is_attached.assert_called_once_with(mock.ANY,
|
||||||
|
mock_get.return_value)
|
||||||
|
self.assertTrue(mock_log.warning.called)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'detach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'terminate_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'begin_detaching',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_detach_volumes(
|
||||||
|
self, mock_create_meta, mock_is_attached, mock_begin, mock_term,
|
||||||
|
mock_detach, mock_get, mock_set_meta, mock_session):
|
||||||
|
"""Iterate once and detach a volume without issues."""
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = True
|
||||||
|
mock_get.return_value = mock.Mock(attachments=[
|
||||||
|
{'server_id': self.node.uuid, 'attachment_id': 'qux'}])
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=False)
|
||||||
|
|
||||||
|
mock_begin.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_term.assert_called_once_with(mock.ANY, volume_id, {'foo': 'bar'})
|
||||||
|
mock_detach.assert_called_once_with(mock.ANY, volume_id, 'qux')
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'detach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'terminate_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'begin_detaching',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_detach_volumes_one_detached(
|
||||||
|
self, mock_create_meta, mock_begin, mock_term, mock_detach,
|
||||||
|
mock_get, mock_set_meta, mock_session):
|
||||||
|
"""Iterate with two volumes, one already detached."""
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id, 'detached']
|
||||||
|
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
|
||||||
|
mock_get.side_effect = [
|
||||||
|
mock.Mock(attachments=[
|
||||||
|
{'server_id': self.node.uuid, 'attachment_id': 'qux'}]),
|
||||||
|
mock.Mock(attachments=[])
|
||||||
|
]
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=False)
|
||||||
|
|
||||||
|
mock_begin.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_term.assert_called_once_with(mock.ANY, volume_id, {'foo': 'bar'})
|
||||||
|
mock_detach.assert_called_once_with(mock.ANY, volume_id, 'qux')
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.Client, '__init__', autospec=True)
|
||||||
|
def test_detach_volumes_client_init_failure(
|
||||||
|
self, mock_client, mock_get, mock_set_meta, mock_session):
|
||||||
|
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
volumes = ['111111111-0000-0000-0000-000000000003']
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
mock_client.side_effect = cinder_exceptions.BadRequest(400)
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.detach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
# While we would be permitting failures, this is an
|
||||||
|
# exception that must be raised since the client
|
||||||
|
# cannot be initialized.
|
||||||
|
mock_client.side_effect = exception.InvalidParameterValue('error')
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.detach_volumes, task, volumes,
|
||||||
|
connector, allow_errors=True)
|
||||||
|
|
||||||
|
def test_detach_volumes_vol_not_found(self, mock_get, mock_set_meta,
|
||||||
|
mock_session):
|
||||||
|
"""Raise an error if the volume lookup fails"""
|
||||||
|
volumes = ['vol1']
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_get.side_effect = cinder_exceptions.NotFound(
|
||||||
|
404, message='error')
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.detach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
self.assertFalse(mock_set_meta.called)
|
||||||
|
# We should not raise any exception when issuing a command
|
||||||
|
# with errors being permitted.
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=True)
|
||||||
|
self.assertFalse(mock_set_meta.called)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'detach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'terminate_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'begin_detaching',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_detach_volumes_begin_detaching_failure(
|
||||||
|
self, mock_create_meta, mock_is_attached, mock_begin, mock_term,
|
||||||
|
mock_detach, mock_get, mock_set_meta, mock_session):
|
||||||
|
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
volume = mock.Mock(attachments=[])
|
||||||
|
mock_get.return_value = volume
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = True
|
||||||
|
mock_begin.side_effect = cinder_exceptions.NotAcceptable(406)
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.detach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
mock_is_attached.assert_called_once_with(mock.ANY, volume)
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=True)
|
||||||
|
mock_term.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'foo': 'bar'})
|
||||||
|
mock_detach.assert_called_once_with(mock.ANY, volume_id, None)
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'terminate_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'begin_detaching',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_detach_volumes_term_failure(
|
||||||
|
self, mock_create_meta, mock_is_attached, mock_begin, mock_term,
|
||||||
|
mock_get, mock_set_meta, mock_session):
|
||||||
|
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = True
|
||||||
|
mock_get.return_value = {'id': volume_id, 'attachments': []}
|
||||||
|
mock_term.side_effect = cinder_exceptions.NotAcceptable(406)
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.detach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector)
|
||||||
|
mock_begin.assert_called_once_with(mock.ANY, volume_id)
|
||||||
|
mock_term.assert_called_once_with(mock.ANY, volume_id, connector)
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=True)
|
||||||
|
self.assertFalse(mock_set_meta.called)
|
||||||
|
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'detach',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager,
|
||||||
|
'terminate_connection', autospec=True)
|
||||||
|
@mock.patch.object(cinderclient.volumes.VolumeManager, 'begin_detaching',
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(cinder, 'is_volume_attached', autospec=True)
|
||||||
|
@mock.patch.object(cinder, '_create_metadata_dictionary', autospec=True)
|
||||||
|
def test_detach_volumes_detach_meta_failure(
|
||||||
|
self, mock_create_meta, mock_is_attached, mock_begin, mock_term,
|
||||||
|
mock_detach, mock_get, mock_set_meta, mock_session):
|
||||||
|
|
||||||
|
volume_id = '111111111-0000-0000-0000-000000000003'
|
||||||
|
volumes = [volume_id]
|
||||||
|
|
||||||
|
connector = {'foo': 'bar'}
|
||||||
|
mock_create_meta.return_value = {'bar': 'baz'}
|
||||||
|
mock_is_attached.return_value = True
|
||||||
|
mock_get.return_value = mock.Mock(attachments=[
|
||||||
|
{'server_id': self.node.uuid, 'attachment_id': 'qux'}])
|
||||||
|
|
||||||
|
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||||
|
mock_detach.side_effect = cinder_exceptions.NotAcceptable(
|
||||||
|
http_client.NOT_ACCEPTABLE)
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=True)
|
||||||
|
mock_detach.assert_called_once_with(mock.ANY, volume_id, 'qux')
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
mock_detach.reset_mock()
|
||||||
|
mock_set_meta.reset_mock()
|
||||||
|
|
||||||
|
mock_set_meta.side_effect = cinder_exceptions.NotAcceptable(
|
||||||
|
http_client.NOT_ACCEPTABLE)
|
||||||
|
cinder.detach_volumes(task, volumes, connector, allow_errors=True)
|
||||||
|
mock_detach.assert_called_once_with(mock.ANY, volume_id, 'qux')
|
||||||
|
mock_set_meta.assert_called_once_with(mock.ANY, volume_id,
|
||||||
|
{'bar': 'baz'})
|
||||||
|
mock_detach.reset_mock()
|
||||||
|
mock_set_meta.reset_mock()
|
||||||
|
|
||||||
|
self.assertRaises(exception.StorageError,
|
||||||
|
cinder.detach_volumes,
|
||||||
|
task,
|
||||||
|
volumes,
|
||||||
|
connector,
|
||||||
|
allow_errors=False)
|
||||||
|
mock_detach.assert_called_once_with(mock.ANY, volume_id, 'qux')
|
||||||
|
self.assertFalse(mock_set_meta.called)
|
@ -8,6 +8,7 @@ automaton>=0.5.0 # Apache-2.0
|
|||||||
eventlet!=0.18.3,>=0.18.2 # MIT
|
eventlet!=0.18.3,>=0.18.2 # MIT
|
||||||
WebOb>=1.6.0 # MIT
|
WebOb>=1.6.0 # MIT
|
||||||
paramiko>=2.0 # LGPLv2.1+
|
paramiko>=2.0 # LGPLv2.1+
|
||||||
|
python-cinderclient>=2.0.1 # Apache-2.0
|
||||||
python-neutronclient>=5.1.0 # Apache-2.0
|
python-neutronclient>=5.1.0 # Apache-2.0
|
||||||
python-glanceclient>=2.5.0 # Apache-2.0
|
python-glanceclient>=2.5.0 # Apache-2.0
|
||||||
keystoneauth1>=2.18.0 # Apache-2.0
|
keystoneauth1>=2.18.0 # Apache-2.0
|
||||||
|
Loading…
Reference in New Issue
Block a user