Re-add Infortrend Cinder volume driver

The Infortrend driver was removed due to
not meet the CI requirements.
Now we have fixed the CI issues.

Change-Id: I60416fa93a4990a7a07ac662c448335289ba7417
Implements: blueprint readd-infortrend-driver
This commit is contained in:
JohnnyChou 2017-11-29 10:16:21 +08:00 committed by Kuirong.Chen
parent ab6fbbec80
commit 99cb4a0b5d
14 changed files with 10196 additions and 0 deletions

View File

@ -113,6 +113,8 @@ from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi
from cinder.volume.drivers import infinidat as cinder_volume_drivers_infinidat
from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli as \
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli
from cinder.volume.drivers.inspur.as13000 import as13000_driver as \
cinder_volume_drivers_inspur_as13000_as13000driver
from cinder.volume.drivers.inspur.instorage import instorage_common as \
@ -252,6 +254,8 @@ def list_opts():
cinder_volume_driver.backup_opts,
cinder_volume_driver.image_opts,
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli.
infortrend_opts,
cinder_volume_drivers_inspur_as13000_as13000driver.
inspur_as13000_opts,
cinder_volume_drivers_inspur_instorage_instoragecommon.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,387 @@
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fibre Channel Driver for Infortrend Eonstor based on CLI.
"""
from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli
LOG = logging.getLogger(__name__)
@interface.volumedriver
class InfortrendCLIFCDriver(driver.FibreChannelDriver):
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Infortrend_Storage_CI"
VERSION = common_cli.InfortrendCommon.VERSION
def __init__(self, *args, **kwargs):
super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs)
self.common = common_cli.InfortrendCommon(
'FC', configuration=self.configuration)
self.VERSION = self.common.VERSION
def do_setup(self, context):
"""Any initialization the volume driver does while starting.
note: This runs before check_for_setup_error
"""
LOG.debug('do_setup start')
self.common.do_setup()
def check_for_setup_error(self):
LOG.debug('check_for_setup_error start')
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug('create_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(
'create_volume_from_snapshot volume id=%(volume_id)s '
'snapshot id=%(snapshot_id)s', {
'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
return self.common.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(
'create_cloned_volume volume id=%(volume_id)s '
'src_vref provider_location=%(provider_location)s', {
'volume_id': volume['id'],
'provider_location': src_vref['provider_location']})
return self.common.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
LOG.debug(
'extend_volume volume id=%(volume_id)s new size=%(size)s', {
'volume_id': volume['id'], 'size': new_size})
self.common.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
LOG.debug('delete_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
'volume_id': volume['id'], 'host': host['host']})
return self.common.migrate_volume(volume, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(
'create_snapshot snapshot id=%(snapshot_id)s '
'volume id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
return self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(
'delete_snapshot snapshot id=%(snapshot_id)s '
'volume id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug(
'create_export volume provider_location=%(provider_location)s', {
'provider_location': volume['provider_location']})
return self.common.create_export(context, volume)
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection information.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
"""
LOG.debug(
'initialize_connection volume id=%(volume_id)s '
'connector initiator=%(initiator)s', {
'volume_id': volume['id'],
'initiator': connector['initiator']})
return self.common.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
LOG.debug('get_volume_stats refresh=%(refresh)s', {
'refresh': refresh})
return self.common.get_volume_stats(refresh)
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
:param existing_ref: Driver-specific information used to identify
a volume
"""
LOG.debug(
'manage_existing volume: %(volume)s '
'existing_ref source: %(source)s', {
'volume': volume,
'source': existing_ref})
return self.common.manage_existing(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
LOG.debug('unmanage volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.unmanage(volume)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
LOG.debug(
'manage_existing_get_size volume: %(volume)s '
'existing_ref source: %(source)s', {
'volume': volume,
'source': existing_ref})
return self.common.manage_existing_get_size(volume, existing_ref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(
'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
return self.common.update_migrated_volume(ctxt, volume, new_volume,
original_volume_status)
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder."""
LOG.debug(
'get_manageable_volumes CALLED '
'cinder_volumes: %(volume)s, '
'marker: %(mkr)s, '
'limit: %(lmt)s, '
'offset: %(_offset)s, '
'sort_keys: %(s_key)s, '
'sort_dirs: %(sort_dir)s', {
'volume': cinder_volumes,
'mkr': marker,
'lmt': limit,
'_offset': offset,
's_key': sort_keys,
'sort_dir': sort_dirs
}
)
return self.common.get_manageable_volumes(cinder_volumes, marker,
limit, offset, sort_keys,
sort_dirs)
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
:param snapshot: Cinder volume snapshot to manage
:param existing_ref: Driver-specific information used to identify a
volume snapshot
"""
LOG.debug(
'manage_existing_snapshot CALLED '
'snapshot: %(si)s, '
'existing_ref: %(ref)s', {
'si': snapshot, 'ref': existing_ref
}
)
return self.common.manage_existing_snapshot(snapshot, existing_ref)
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing.
:param snapshot: Cinder volume snapshot to manage
:param existing_ref: Driver-specific information used to identify a
volume snapshot
:returns size: Volume snapshot size in GiB (integer)
"""
LOG.debug(
'manage_existing_snapshot_get_size CALLED '
'snapshot: %(si)s, '
'existing_ref: %(ref)s', {
'si': snapshot, 'ref': existing_ref
}
)
return self.common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
sort_keys, sort_dirs):
"""List snapshots on the backend available for management by Cinder."""
LOG.debug(
'get_manageable_volumes CALLED '
'cinder_snapshots: %(volume)s, '
'marker: %(mkr)s, '
'limit: %(lmt)s, '
'offset: %(_offset)s, '
'sort_keys: %(s_key)s, '
'sort_dirs: %(sort_dir)s', {
'volume': cinder_snapshots,
'mkr': marker,
'lmt': limit,
'_offset': offset,
's_key': sort_keys,
'sort_dir': sort_dirs
}
)
return self.common.get_manageable_snapshots(cinder_snapshots, marker,
limit, offset, sort_keys,
sort_dirs)
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param snapshot: Cinder volume snapshot to unmanage
"""
LOG.debug(
'manage_existing_snapshot_get_size CALLED '
'snapshot: %(si)s', {
'si': snapshot
}
)
return self.common.unmanage_snapshot(snapshot)

View File

@ -0,0 +1,363 @@
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI Driver for Infortrend Eonstor based on CLI.
"""
from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli
LOG = logging.getLogger(__name__)
@interface.volumedriver
class InfortrendCLIISCSIDriver(driver.ISCSIDriver):
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Infortrend_Storage_CI"
VERSION = common_cli.InfortrendCommon.VERSION
def __init__(self, *args, **kwargs):
super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs)
self.common = common_cli.InfortrendCommon(
'iSCSI', configuration=self.configuration)
self.VERSION = self.common.VERSION
def do_setup(self, context):
"""Any initialization the volume driver does while starting.
note: This runs before check_for_setup_error
"""
LOG.debug('do_setup start')
self.common.do_setup()
def check_for_setup_error(self):
LOG.debug('check_for_setup_error start')
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug('create_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(
'create_volume_from_snapshot volume id=%(volume_id)s '
'snapshot id=%(snapshot_id)s', {
'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
return self.common.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(
'create_cloned_volume volume id=%(volume_id)s '
'src_vref provider_location=%(provider_location)s', {
'volume_id': volume['id'],
'provider_location': src_vref['provider_location']})
return self.common.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
LOG.debug(
'extend_volume volume id=%(volume_id)s new size=%(size)s', {
'volume_id': volume['id'], 'size': new_size})
self.common.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
LOG.debug('delete_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
'volume_id': volume['id'], 'host': host['host']})
return self.common.migrate_volume(volume, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(
'create_snapshot snapshot id=%(snapshot_id)s '
'volume_id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
return self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(
'delete_snapshot snapshot id=%(snapshot_id)s '
'volume_id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug(
'create_export volume provider_location=%(provider_location)s', {
'provider_location': volume['provider_location']})
return self.common.create_export(context, volume)
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection information.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
}
}
"""
LOG.debug(
'initialize_connection volume id=%(volume_id)s '
'connector initiator=%(initiator)s', {
'volume_id': volume['id'],
'initiator': connector['initiator']})
return self.common.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
LOG.debug('get_volume_stats refresh=%(refresh)s', {
'refresh': refresh})
return self.common.get_volume_stats(refresh)
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
:param existing_ref: Driver-specific information used to identify
a volume
"""
LOG.debug(
'manage_existing volume: %(volume)s '
'existing_ref source: %(source)s', {
'volume': volume,
'source': existing_ref})
return self.common.manage_existing(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
LOG.debug('unmanage volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.unmanage(volume)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
LOG.debug(
'manage_existing_get_size volume: %(volume)s '
'existing_ref source: %(source)s', {
'volume': volume,
'source': existing_ref})
return self.common.manage_existing_get_size(volume, existing_ref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(
'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
return self.common.update_migrated_volume(ctxt, volume, new_volume,
original_volume_status)
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder."""
LOG.debug(
'get_manageable_volumes CALLED '
'cinder_volumes: %(volume)s, '
'marker: %(mkr)s, '
'limit: %(lmt)s, '
'offset: %(_offset)s, '
'sort_keys: %(s_key)s, '
'sort_dirs: %(sort_dir)s', {
'volume': cinder_volumes,
'mkr': marker,
'lmt': limit,
'_offset': offset,
's_key': sort_keys,
'sort_dir': sort_dirs
}
)
return self.common.get_manageable_volumes(cinder_volumes, marker,
limit, offset, sort_keys,
sort_dirs)
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
:param snapshot: Cinder volume snapshot to manage
:param existing_ref: Driver-specific information used to identify a
volume snapshot
"""
LOG.debug(
'manage_existing_snapshot CALLED '
'snapshot: %(si)s, '
'existing_ref: %(ref)s', {
'si': snapshot, 'ref': existing_ref
}
)
return self.common.manage_existing_snapshot(snapshot, existing_ref)
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing.
:param snapshot: Cinder volume snapshot to manage
:param existing_ref: Driver-specific information used to identify a
volume snapshot
:returns size: Volume snapshot size in GiB (integer)
"""
LOG.debug(
'manage_existing_snapshot_get_size CALLED '
'snapshot: %(si)s, '
'existing_ref: %(ref)s', {
'si': snapshot, 'ref': existing_ref
}
)
return self.common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
sort_keys, sort_dirs):
"""List snapshots on the backend available for management by Cinder."""
LOG.debug(
'get_manageable_volumes CALLED '
'cinder_snapshots: %(volume)s, '
'marker: %(mkr)s, '
'limit: %(lmt)s, '
'offset: %(_offset)s, '
'sort_keys: %(s_key)s, '
'sort_dirs: %(sort_dir)s', {
'volume': cinder_snapshots,
'mkr': marker,
'lmt': limit,
'_offset': offset,
's_key': sort_keys,
'sort_dir': sort_dirs
}
)
return self.common.get_manageable_snapshots(cinder_snapshots, marker,
limit, offset, sort_keys,
sort_dirs)
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param snapshot: Cinder volume snapshot to unmanage
"""
LOG.debug(
'manage_existing_snapshot_get_size CALLED '
'snapshot: %(si)s', {
'si': snapshot
}
)
return self.common.unmanage_snapshot(snapshot)

View File

@ -0,0 +1,887 @@
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Infortrend basic CLI factory.
"""
import abc
import os
import time
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import strutils
import six
from cinder import utils
LOG = logging.getLogger(__name__)
DEFAULT_RETRY_TIME = 5
def retry_cli(func):
def inner(self, *args, **kwargs):
total_retry_time = self.cli_retry_time
if total_retry_time is None:
total_retry_time = DEFAULT_RETRY_TIME
retry_time = 0
while retry_time < total_retry_time:
rc, out = func(self, *args, **kwargs)
retry_time += 1
if rc == 0:
break
LOG.error(
'Retry %(retry)s times: %(method)s Failed '
'%(rc)s: %(reason)s', {
'retry': retry_time,
'method': self.__class__.__name__,
'rc': rc,
'reason': out})
# show error log, not retrying
if rc == 1:
# RAID return fail
break
elif rc == 11:
# rc == 11 means not exist
break
elif rc == 20:
# rc == 20 means already exist
break
LOG.debug(
'Method: %(method)s Return Code: %(rc)s '
'Output: %(out)s', {
'method': self.__class__.__name__, 'rc': rc, 'out': out})
return rc, out
return inner
def os_execute(fd, raidcmd_timeout, command_line):
os.write(fd, command_line.encode('utf-8'))
return os_read(fd, 8192, 'RAIDCmd:>', raidcmd_timeout)
def os_read(fd, buffer_size, cmd_pattern, raidcmd_timeout):
content = ''
start_time = int(time.time())
while True:
time.sleep(0.5)
output = os.read(fd, buffer_size)
if len(output) > 0:
content += output.decode('utf-8')
if content.find(cmd_pattern) >= 0:
break
if int(time.time()) - start_time > raidcmd_timeout:
content = 'Raidcmd timeout: %s' % content
LOG.error(
'Raidcmd exceeds cli timeout [%(timeout)s]s.', {
'timeout': raidcmd_timeout})
break
return content
def strip_empty_in_list(list):
result = []
for entry in list:
entry = entry.strip()
if entry != "":
result.append(entry)
return result
def table_to_dict(table):
tableHeader = table[0].split(" ")
tableHeaderList = strip_empty_in_list(tableHeader)
result = []
for i in range(len(table) - 2):
if table[i + 2].strip() == "":
break
resultEntry = {}
tableEntry = table[i + 2].split(" ")
tableEntryList = strip_empty_in_list(tableEntry)
for key, value in zip(tableHeaderList, tableEntryList):
resultEntry[key] = value
result.append(resultEntry)
return result
def content_lines_to_dict(content_lines):
result = []
resultEntry = {}
for content_line in content_lines:
if content_line.strip() == "":
result.append(resultEntry)
resultEntry = {}
continue
split_entry = content_line.strip().split(": ", 1)
resultEntry[split_entry[0]] = split_entry[1]
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCommand(object):
"""The BaseCommand abstract class."""
def __init__(self):
super(BaseCommand, self).__init__()
@abc.abstractmethod
def execute(self, *args, **kwargs):
pass
class ShellCommand(BaseCommand):
"""The Common ShellCommand."""
def __init__(self, cli_conf):
super(ShellCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
@retry_cli
def execute(self, *args, **kwargs):
commands = ' '.join(args)
result = None
rc = 0
try:
result, err = utils.execute(commands, shell=True)
except processutils.ProcessExecutionError as pe:
rc = pe.exit_code
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(
'Error on execute command. '
'Error code: %(exit_code)d Error msg: %(result)s', {
'exit_code': pe.exit_code, 'result': result})
return rc, result
class ExecuteCommand(BaseCommand):
"""The Cinder Filter Command."""
def __init__(self, cli_conf):
super(ExecuteCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
@retry_cli
def execute(self, *args, **kwargs):
result = None
rc = 0
try:
result, err = utils.execute(*args, **kwargs)
except processutils.ProcessExecutionError as pe:
rc = pe.exit_code
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(
'Error on execute command. '
'Error code: %(exit_code)d Error msg: %(result)s', {
'exit_code': pe.exit_code, 'result': result})
return rc, result
class CLIBaseCommand(BaseCommand):
"""The CLIBaseCommand class."""
def __init__(self, cli_conf):
super(CLIBaseCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
self.raidcmd_timeout = cli_conf.get('raidcmd_timeout')
self.cli_cache = cli_conf.get('cli_cache')
self.pid = cli_conf.get('pid')
self.fd = cli_conf.get('fd')
self.command = ""
self.parameters = ()
self.show_noinit = ""
self.command_line = ""
def _generate_command(self, parameters):
"""Generate execute Command. use java, execute, command, parameters."""
self.parameters = parameters
parameters_line = ' '.join(parameters)
self.command_line = "{0} {1} {2}\n".format(
self.command,
parameters_line,
self.show_noinit)
return self.command_line
def _parser(self, content=None):
"""The parser to parse command result.
:param content: The parse Content
:returns: parse result
"""
content = content.replace("\r", "")
content = content.replace("\\/-", "")
content = content.strip()
LOG.debug(content)
if content is not None:
content_lines = content.split("\n")
rc, out = self._parse_return(content_lines)
if rc != 0:
return rc, out
else:
return rc, content_lines
return -1, None
@retry_cli
def execute(self, *args, **kwargs):
command_line = self._generate_command(args)
LOG.debug('Executing: %(command)s', {
'command': strutils.mask_password(command_line)})
rc = 0
result = None
try:
content = self._execute(command_line)
rc, result = self._parser(content)
except processutils.ProcessExecutionError as pe:
rc = -2 # prevent confusing with cli real rc
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(
'Error on execute %(command)s. '
'Error code: %(exit_code)d Error msg: %(result)s', {
'command': strutils.mask_password(command_line),
'exit_code': pe.exit_code,
'result': result})
return rc, result
def _execute(self, command_line):
return os_execute(
self.fd, self.raidcmd_timeout, command_line)
def _parse_return(self, content_lines):
"""Get the end of command line result."""
rc = 0
if 'Raidcmd timeout' in content_lines[0]:
rc = -3
return_cli_result = content_lines
elif len(content_lines) < 4:
rc = -4
return_cli_result = 'Raidcmd output error: %s' % content_lines
else:
return_value = content_lines[-3].strip().split(' ', 1)[1]
return_cli_result = content_lines[-4].strip().split(' ', 1)[1]
rc = int(return_value, 16)
return rc, return_cli_result
class ConnectRaid(CLIBaseCommand):
"""The Connect Raid Command."""
def __init__(self, *args, **kwargs):
super(ConnectRaid, self).__init__(*args, **kwargs)
self.command = "connect"
class CheckConnection(CLIBaseCommand):
"""The Check Connection Command."""
def __init__(self, *args, **kwargs):
super(CheckConnection, self).__init__(*args, **kwargs)
self.command = "lock"
class InitCache(CLIBaseCommand):
"""Refresh cacahe data for update volume status."""
def __init__(self, *args, **kwargs):
super(InitCache, self).__init__(*args, **kwargs)
self.command = "utility init-cache"
class CreateLD(CLIBaseCommand):
"""The Create LD Command."""
def __init__(self, *args, **kwargs):
super(CreateLD, self).__init__(*args, **kwargs)
self.command = "create ld"
class CreateLV(CLIBaseCommand):
"""The Create LV Command."""
def __init__(self, *args, **kwargs):
super(CreateLV, self).__init__(*args, **kwargs)
self.command = "create lv"
class CreatePartition(CLIBaseCommand):
"""Create Partition.
create part
[LV-ID] [name] [size={partition-size}]
[min={minimal-reserve-size}] [init={switch}]
[tier={tier-level-list}]
"""
def __init__(self, *args, **kwargs):
super(CreatePartition, self).__init__(*args, **kwargs)
self.command = "create part"
class DeletePartition(CLIBaseCommand):
"""Delete Partition.
delete part [partition-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeletePartition, self).__init__(*args, **kwargs)
self.command = "delete part"
class SetPartition(CLIBaseCommand):
"""Set Partition.
set part
[partition-ID] [name={partition-name}] [min={minimal-reserve-size}]
set part expand [partition-ID] [size={expand-size}]
set part purge [partition-ID] [number] [rule-type]
set part reclaim [partition-ID]
set part tier-resided [partition-ID] tier={tier-level-list}
"""
def __init__(self, *args, **kwargs):
super(SetPartition, self).__init__(*args, **kwargs)
self.command = "set part"
class SetLV(CLIBaseCommand):
"""Set Logical Volume.
set lv tier-migrate [LV-ID] [part={partition-IDs}]
"""
def __init__(self, *args, **kwargs):
super(SetLV, self).__init__(*args, **kwargs)
self.command = "set lv"
class SetSnapshot(CLIBaseCommand):
"""Set Logical Volume.
set lv tier-migrate [LV-ID] [part={partition-IDs}]
"""
def __init__(self, *args, **kwargs):
super(SetSnapshot, self).__init__(*args, **kwargs)
self.command = "set si"
class CreateMap(CLIBaseCommand):
"""Map the Partition on the channel.
create map
[part] [partition-ID] [Channel-ID]
[Target-ID] [LUN-ID] [assign={assign-to}]
"""
def __init__(self, *args, **kwargs):
super(CreateMap, self).__init__(*args, **kwargs)
self.command = "create map"
class DeleteMap(CLIBaseCommand):
"""Unmap the Partition on the channel.
delete map
[part] [partition-ID] [Channel-ID]
[Target-ID] [LUN-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeleteMap, self).__init__(*args, **kwargs)
self.command = "delete map"
class CreateSnapshot(CLIBaseCommand):
"""Create partition's Snapshot.
create si [part] [partition-ID]
"""
def __init__(self, *args, **kwargs):
super(CreateSnapshot, self).__init__(*args, **kwargs)
self.command = "create si"
class DeleteSnapshot(CLIBaseCommand):
"""Delete partition's Snapshot.
delete si [snapshot-image-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeleteSnapshot, self).__init__(*args, **kwargs)
self.command = "delete si"
class CreateReplica(CLIBaseCommand):
"""Create partition or snapshot's replica.
create replica
[name] [part | si] [source-volume-ID]
[part] [target-volume-ID] [type={replication-mode}]
[priority={level}] [desc={description}]
[incremental={switch}] [timeout={value}]
[compression={switch}]
"""
def __init__(self, *args, **kwargs):
super(CreateReplica, self).__init__(*args, **kwargs)
self.command = "create replica"
class DeleteReplica(CLIBaseCommand):
"""Delete and terminate specific replication job.
delete replica [volume-pair-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeleteReplica, self).__init__(*args, **kwargs)
self.command = "delete replica"
class CreateIQN(CLIBaseCommand):
"""Create host iqn for CHAP or lun filter.
create iqn
[IQN] [IQN-alias-name] [user={username}] [password={secret}]
[target={name}] [target-password={secret}] [ip={ip-address}]
[mask={netmask-ip}]
"""
def __init__(self, *args, **kwargs):
super(CreateIQN, self).__init__(*args, **kwargs)
self.command = "create iqn"
class DeleteIQN(CLIBaseCommand):
"""Delete host iqn by name.
delete iqn [name]
"""
def __init__(self, *args, **kwargs):
super(DeleteIQN, self).__init__(*args, **kwargs)
self.command = "delete iqn"
class SetIOTimeout(CLIBaseCommand):
"""Set CLI IO timeout.
utility set io-timeout [time]
"""
def __init__(self, *args, **kwargs):
super(SetIOTimeout, self).__init__(*args, **kwargs)
self.command = "utility set io-timeout"
class ShowCommand(CLIBaseCommand):
"""Basic Show Command."""
def __init__(self, *args, **kwargs):
super(ShowCommand, self).__init__(*args, **kwargs)
self.param_detail = "-l"
self.default_type = "table"
self.start_key = ""
if self.cli_cache:
self.show_noinit = "-noinit"
def _parser(self, content=None):
"""Parse Table or Detail format into dict.
# Table format
ID Name LD-amount
----------------------
123 LV-1 1
# Result
{
'ID': '123',
'Name': 'LV-1',
'LD-amount': '1'
}
# Detail format
ID: 5DE94FF775D81C30
Name: LV-1
LD-amount: 1
# Result
{
'ID': '123',
'Name': 'LV-1',
'LD-amount': '1'
}
:param content: The parse Content.
:returns: parse result
"""
rc, out = super(ShowCommand, self)._parser(content)
# Error.
if rc != 0:
return rc, out
# No content.
if len(out) < 6:
return rc, []
detect_type = self.detect_type()
# Show detail content.
if detect_type == "list":
start_id = self.detect_detail_start_index(out)
if start_id < 0:
return rc, []
result = content_lines_to_dict(out[start_id:-3])
else:
start_id = self.detect_table_start_index(out)
if start_id < 0:
return rc, []
result = table_to_dict(out[start_id:-4])
return rc, result
def detect_type(self):
if self.param_detail in self.parameters:
detect_type = "list"
else:
detect_type = self.default_type
return detect_type
def detect_table_start_index(self, content):
for i in range(1, len(content)):
key = content[i].strip().split(' ')
if self.start_key in key[0].strip():
return i
return -1
def detect_detail_start_index(self, content):
for i in range(1, len(content)):
split_entry = content[i].strip().split(' ')
if len(split_entry) >= 2 and ':' in split_entry[0]:
return i
return -1
class ShowLD(ShowCommand):
"""Show LD.
show ld [index-list]
"""
def __init__(self, *args, **kwargs):
super(ShowLD, self).__init__(*args, **kwargs)
self.command = "show ld"
class ShowLV(ShowCommand):
"""Show LV.
show lv [lv={LV-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowLV, self).__init__(*args, **kwargs)
self.command = "show lv"
self.start_key = "ID"
self.show_noinit = ""
def detect_table_start_index(self, content):
if "tier" in self.parameters:
self.start_key = "LV-Name"
for i in range(1, len(content)):
key = content[i].strip().split(' ')
if self.start_key in key[0].strip():
return i
return -1
class ShowPartition(ShowCommand):
"""Show Partition.
show part [part={partition-IDs} | lv={LV-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowPartition, self).__init__(*args, **kwargs)
self.command = "show part"
self.start_key = "ID"
self.show_noinit = ""
class ShowSnapshot(ShowCommand):
"""Show Snapshot.
show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowSnapshot, self).__init__(*args, **kwargs)
self.command = "show si"
self.start_key = "Index"
class ShowDevice(ShowCommand):
"""Show Device.
show device
"""
def __init__(self, *args, **kwargs):
super(ShowDevice, self).__init__(*args, **kwargs)
self.command = "show device"
self.start_key = "Index"
class ShowChannel(ShowCommand):
"""Show Channel.
show channel
"""
def __init__(self, *args, **kwargs):
super(ShowChannel, self).__init__(*args, **kwargs)
self.command = "show channel"
self.start_key = "Ch"
class ShowDisk(ShowCommand):
"""The Show Disk Command.
show disk [disk-index-list | channel={ch}]
"""
def __init__(self, *args, **kwargs):
super(ShowDisk, self).__init__(*args, **kwargs)
self.command = "show disk"
class ShowMap(ShowCommand):
"""Show Map.
show map [part={partition-IDs} | channel={channel-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowMap, self).__init__(*args, **kwargs)
self.command = "show map"
self.start_key = "Ch"
class ShowNet(ShowCommand):
"""Show IP network.
show net [id={channel-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowNet, self).__init__(*args, **kwargs)
self.command = "show net"
self.start_key = "ID"
class ShowLicense(ShowCommand):
"""Show License.
show license
"""
def __init__(self, *args, **kwargs):
super(ShowLicense, self).__init__(*args, **kwargs)
self.command = "show license"
self.start_key = "License"
def _parser(self, content=None):
"""Parse License format.
# License format
License Amount(Partition/Subsystem) Expired
------------------------------------------------
EonPath --- True
# Result
{
'EonPath': {
'Amount': '---',
'Support': True
}
}
:param content: The parse Content.
:returns: parse result
"""
rc, out = super(ShowLicense, self)._parser(content)
if rc != 0:
return rc, out
if len(out) > 0:
result = {}
for entry in out:
if entry['Expired'] == '---' or entry['Expired'] == 'Expired':
support = False
else:
support = True
result[entry['License']] = {
'Amount':
entry['Amount(Partition/Subsystem)'],
'Support': support
}
return rc, result
return rc, []
class ShowReplica(ShowCommand):
"""Show information of all replication jobs or specific job.
show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs}
"""
def __init__(self, *args, **kwargs):
super(ShowReplica, self).__init__(*args, **kwargs)
self.command = 'show replica'
self.show_noinit = ""
class ShowWWN(ShowCommand):
"""Show Fibre network.
show wwn
"""
def __init__(self, *args, **kwargs):
super(ShowWWN, self).__init__(*args, **kwargs)
self.command = "show wwn"
self.start_key = "CH"
class ShowIQN(ShowCommand):
"""Show iSCSI initiator IQN which is set by create iqn.
show iqn
"""
LIST_START_LINE = "List of initiator IQN(s):"
def __init__(self, *args, **kwargs):
super(ShowIQN, self).__init__(*args, **kwargs)
self.command = "show iqn"
self.default_type = "list"
def detect_detail_start_index(self, content):
for i in range(1, len(content)):
if content[i].strip() == self.LIST_START_LINE:
return i + 2
return -1
class ShowHost(ShowCommand):
"""Show host settings.
show host
"""
def __init__(self, *args, **kwargs):
super(ShowHost, self).__init__(*args, **kwargs)
self.command = "show host"
self.default_type = "list"
def detect_detail_start_index(self, content):
for i in range(1, len(content)):
if ':' in content[i]:
return i
return -1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,143 @@
========================
Infortrend volume driver
========================
The `Infortrend <http://www.infortrend.com/global>`__ volume driver is a Block Storage driver
providing iSCSI and Fibre Channel support for Infortrend storages.
Supported operations
~~~~~~~~~~~~~~~~~~~~
The Infortrend volume driver supports the following volume operations:
* Create, delete, attach, and detach volumes.
* Create and delete a snapshot.
* Create a volume from a snapshot.
* Copy an image to a volume.
* Copy a volume to an image.
* Clone a volume.
* Extend a volume
* Retype a volume.
* Manage and unmanage a volume.
* Migrate a volume with back-end assistance.
* Live migrate an instance with volumes hosted on an Infortrend backend.
System requirements
~~~~~~~~~~~~~~~~~~~
To use the Infortrend volume driver, the following settings are required:
Set up Infortrend storage
-------------------------
* Create logical volumes in advance.
* Host side setting ``Peripheral device type`` should be
``No Device Present (Type=0x7f)``.
Set up cinder-volume node
-------------------------
* Install JRE 7 or later.
* Download the Infortrend storage CLI from the
`release page <https://github.com/infortrend-openstack/infortrend-cinder-driver/releases>`__.
Choose the raidcmd_ESDS10.jar file,
which's under v2.1.3 on the github releases page,
and assign it to the default path ``/opt/bin/Infortrend/``.
Driver configuration
~~~~~~~~~~~~~~~~~~~~
On ``cinder-volume`` nodes, set the following in your
``/etc/cinder/cinder.conf``, and use the following options to configure it:
Driver options
--------------
.. include:: ../../tables/cinder-infortrend.inc
iSCSI configuration example
---------------------------
.. code-block:: ini
[DEFAULT]
default_volume_type = IFT-ISCSI
enabled_backends = IFT-ISCSI
[IFT-ISCSI]
volume_driver = cinder.volume.drivers.infortrend.infortrend_iscsi_cli.InfortrendCLIISCSIDriver
volume_backend_name = IFT-ISCSI
infortrend_pools_name = POOL-1,POOL-2
san_ip = MANAGEMENT_PORT_IP
infortrend_slots_a_channels_id = 0,1,2,3
infortrend_slots_b_channels_id = 0,1,2,3
Fibre Channel configuration example
-----------------------------------
.. code-block:: ini
[DEFAULT]
default_volume_type = IFT-FC
enabled_backends = IFT-FC
[IFT-FC]
volume_driver = cinder.volume.drivers.infortrend.infortrend_fc_cli.InfortrendCLIFCDriver
volume_backend_name = IFT-FC
infortrend_pools_name = POOL-1,POOL-2,POOL-3
san_ip = MANAGEMENT_PORT_IP
infortrend_slots_a_channels_id = 4,5
Multipath configuration
-----------------------
* Enable multipath for image transfer in ``/etc/cinder/cinder.conf``.
.. code-block:: ini
use_multipath_for_image_xfer = True
Restart the ``cinder-volume`` service.
* Enable multipath for volume attach and detach in ``/etc/nova/nova.conf``.
.. code-block:: ini
[libvirt]
...
volume_use_multipath = True
...
Restart the ``nova-compute`` service.
Extra spec usage
----------------
* ``infortrend:provisioning`` - Defaults to ``full`` provisioning,
the valid values are thin and full.
* ``infortrend:tiering`` - Defaults to use ``all`` tiering,
the valid values are subsets of 0, 1, 2, 3.
If multi-pools are configured in ``cinder.conf``,
it can be specified for each pool, separated by semicolon.
For example:
``infortrend:provisioning``: ``POOL-1:thin; POOL-2:full``
``infortrend:tiering``: ``POOL-1:all; POOL-2:0; POOL-3:0,1,3``
For more details, see `Infortrend documents <http://www.infortrend.com/ImageLoader/LoadDoc/715>`_.

View File

@ -0,0 +1,38 @@
..
Warning: Do not edit this file. It is automatically generated from the
software project's code and your changes will be overwritten.
The tool to generate this file lives in openstack-doc-tools repository.
Please make any changes needed in the code, then run the
autogenerate-config-doc tool from the openstack-doc-tools repository, or
ask for help on the documentation mailing list, IRC channel or meeting.
.. _cinder-infortrend:
.. list-table:: Description of Infortrend volume driver configuration options
:header-rows: 1
:class: config-ref-table
* - Configuration option = Default value
- Description
* - **[DEFAULT]**
-
* - ``infortrend_cli_max_retries`` = ``5``
- (Integer) The maximum retry times if a command fails.
* - ``infortrend_cli_path`` = ``/opt/bin/Infortrend/raidcmd_ESDS10.jar``
- (String) The Infortrend CLI absolute path.
* - ``infortrend_cli_timeout`` = ``60``
- (Integer) The timeout for CLI in seconds.
* - ``infortrend_cli_cache`` = ``False``
- (Boolean) The Infortrend CLI cache. Make sure the array is only managed by Openstack, and it is only used by one cinder-volume node. Otherwise, never enable it! The data might be asynchronous if there were any other operations.
* - ``infortrend_pools_name`` = ``None``
- (String) The Infortrend logical volumes name list. It is separated with comma.
* - ``infortrend_iqn_prefix`` = ``iqn.2002-10.com.infortrend``
- (String) Infortrend iqn prefix for iSCSI.
* - ``infortrend_slots_a_channels_id`` = ``None``
- (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma.
* - ``infortrend_slots_b_channels_id`` = ``None``
- (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma.
* - ``java_path`` = ``/usr/bin/java``
- (String) The Java absolute path.

View File

@ -102,6 +102,9 @@ title=IBM XIV Storage Driver (iSCSI, FC)
[driver.infinidat]
title=Infinidat Storage Driver (iSCSI, FC)
[driver.infortrend]
title=infortrend Storage Driver (iSCSI, FC)
[driver.inspur]
title=Inspur G2 Storage Driver (iSCSI, FC)
@ -225,6 +228,7 @@ driver.ibm_flashsystem=complete
driver.ibm_gpfs=complete
driver.ibm_storwize=complete
driver.ibm_xiv=complete
driver.infortrend=complete
driver.inspur=complete
driver.inspur_as13000=complete
driver.kaminario=complete
@ -288,6 +292,7 @@ driver.ibm_flashsystem=complete
driver.ibm_gpfs=complete
driver.ibm_storwize=complete
driver.ibm_xiv=complete
driver.infortrend=complete
driver.inspur=complete
driver.inspur_as13000=complete
driver.kaminario=complete
@ -351,6 +356,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=complete
driver.ibm_xiv=missing
driver.infortrend=missing
driver.inspur=complete
driver.inspur_as13000=missing
driver.kaminario=missing
@ -417,6 +423,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=complete
driver.ibm_xiv=missing
driver.infortrend=missing
driver.inspur=complete
driver.inspur_as13000=missing
driver.kaminario=missing
@ -482,6 +489,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=complete
driver.ibm_xiv=complete
driver.infortrend=complete
driver.inspur=complete
driver.inspur_as13000=missing
driver.kaminario=complete
@ -548,6 +556,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=complete
driver.ibm_xiv=complete
driver.infortrend=missing
driver.inspur=complete
driver.inspur_as13000=missing
driver.kaminario=missing
@ -613,6 +622,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=missing
driver.ibm_xiv=missing
driver.infortrend=complete
driver.inspur=missing
driver.inspur_as13000=complete
driver.kaminario=complete
@ -679,6 +689,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=missing
driver.ibm_xiv=missing
driver.infortrend=complete
driver.inspur=missing
driver.inspur_as13000=missing
driver.kaminario=missing
@ -745,6 +756,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=complete
driver.ibm_xiv=complete
driver.infortrend=complete
driver.inspur=missing
driver.inspur_as13000=complete
driver.kaminario=missing
@ -808,6 +820,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=complete
driver.ibm_xiv=missing
driver.infortrend=missing
driver.inspur=missing
driver.inspur_as13000=missing
driver.kaminario=missing
@ -875,6 +888,7 @@ driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
driver.ibm_storwize=missing
driver.ibm_xiv=missing
driver.infortrend=missing
driver.inspur=missing
driver.inspur_as13000=missing
driver.kaminario=missing

View File

@ -0,0 +1,5 @@
---
features:
- Re-added Infortrend Cinder volume driver. The Infortrend driver,
removed in Cinder 12.0.0 (Queens), has been restored in this release.