Add volume drivers for Infortrend Storage

Infortrend implement ISCSI and FC volume drivers for
EonStor DS product.
It manages storage by Infortrend CLI tool.

common_cli.py implements the basic Cinder Driver API.
infortrend_fc_cli.py and infortrend_iscsi_cli.py use them to
provide FC and iSCSI specific support.

Support features:
- Volume Create/Delete
- Volume Attach/Detach
- Snapshot Create/Delete
- Create Volume from Snapshot
- Get Volume Stats
- Copy Image to Volume
- Copy Volume to Image
- Clone Volume
- Extend Volume

Change-Id: I830c5a48a5fb85707f02396b4634825e27455e8a
Implements: blueprint infortrend-iscsi-fc-volume-driver
This commit is contained in:
Lee 2015-04-24 15:50:20 +08:00
parent 24c60db7a1
commit 2815195053
9 changed files with 7392 additions and 0 deletions

View File

@ -914,6 +914,12 @@ class StorPoolConfigurationInvalid(CinderException):
"of the /etc/storpool.conf file: %(error)s")
# Infortrend EonStor DS Driver
class InfortrendCliException(CinderException):
message = _("Infortrend CLI exception: %(err)s Param: %(param)s "
"(Return Code: %(rc)s) (Output: %(out)s)")
# DOTHILL drivers
class DotHillInvalidBackend(CinderException):
message = _("Backend doesn't exist (%(backend)s)")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,735 @@
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Infortrend basic CLI factory.
"""
import abc
from oslo_concurrency import processutils
from oslo_log import log as logging
import six
from cinder.i18n import _LE
from cinder import utils
LOG = logging.getLogger(__name__)
DEFAULT_RETRY_TIME = 5
def retry_cli(func):
def inner(self, *args, **kwargs):
total_retry_time = self.cli_retry_time
if total_retry_time is None:
total_retry_time = DEFAULT_RETRY_TIME
retry_time = 0
while retry_time < total_retry_time:
rc, out = func(self, *args, **kwargs)
retry_time += 1
if rc == 0:
break
LOG.error(_LE(
'Retry %(retry)s times: %(method)s Failed '
'%(rc)s: %(reason)s'), {
'retry': retry_time,
'method': self.__class__.__name__,
'rc': rc,
'reason': out})
LOG.debug(
'Method: %(method)s Return Code: %(rc)s '
'Output: %(out)s', {
'method': self.__class__.__name__, 'rc': rc, 'out': out})
return rc, out
return inner
def util_execute(command_line):
content, err = utils.execute(command_line, shell=True)
return content
def strip_empty_in_list(list):
result = []
for entry in list:
entry = entry.strip()
if entry != "":
result.append(entry)
return result
def table_to_dict(table):
tableHeader = table[0].split(" ")
tableHeaderList = strip_empty_in_list(tableHeader)
result = []
for i in range(len(table) - 2):
if table[i + 2].strip() == "":
break
resultEntry = {}
tableEntry = table[i + 2].split(" ")
tableEntryList = strip_empty_in_list(tableEntry)
for key, value in zip(tableHeaderList, tableEntryList):
resultEntry[key] = value
result.append(resultEntry)
return result
def content_lines_to_dict(content_lines):
result = []
resultEntry = {}
for content_line in content_lines:
if content_line.strip() == "":
result.append(resultEntry)
resultEntry = {}
continue
split_entry = content_line.strip().split(": ", 1)
resultEntry[split_entry[0]] = split_entry[1]
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCommand(object):
"""The BaseCommand abstract class."""
def __init__(self):
super(BaseCommand, self).__init__()
@abc.abstractmethod
def execute(self, *args, **kwargs):
pass
class ExecuteCommand(BaseCommand):
"""The Common ExecuteCommand."""
def __init__(self, cli_conf):
super(ExecuteCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
@retry_cli
def execute(self, *args, **kwargs):
result = None
rc = 0
try:
result, err = utils.execute(*args, **kwargs)
except processutils.ProcessExecutionError as pe:
rc = pe.exit_code
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(_LE(
'Error on execute command. '
'Error code: %(exit_code)d Error msg: %(result)s'), {
'exit_code': pe.exit_code, 'result': result})
return rc, result
class CLIBaseCommand(BaseCommand):
"""The CLIBaseCommand class."""
def __init__(self, cli_conf):
super(CLIBaseCommand, self).__init__()
self.java = "java -jar"
self.execute_file = cli_conf.get('path')
self.ip = cli_conf.get('ip')
self.password = cli_conf.get('password')
self.cli_retry_time = cli_conf.get('cli_retry_time')
self.command = ""
self.parameters = ()
self.command_line = ""
def _generate_command(self, parameters):
"""Generate execute Command. use java, execute, command, parameters."""
self.parameters = parameters
parameters_line = ' '.join(parameters)
if self.password:
parameters_line = 'password=%s %s' % (
self.password, parameters_line)
self.command_line = "{0} {1} {2} {3} {4}".format(
self.java,
self.execute_file,
self.ip,
self.command,
parameters_line)
return self.command_line
def _parser(self, content=None):
"""The parser to parse command result.
:param content: The parse Content
:returns: parse result
"""
content = content.replace("\r", "")
content = content.replace("\\/-", "")
content = content.strip()
LOG.debug(content)
if content is not None:
content_lines = content.split("\n")
rc, out = self._parse_return(content_lines)
if rc != 0:
return rc, out
else:
return rc, content_lines
return -1, None
@retry_cli
def execute(self, *args, **kwargs):
command_line = self._generate_command(args)
LOG.debug('Executing: %(command)s', {'command': command_line})
rc = 0
result = None
try:
content = self._execute(command_line)
rc, result = self._parser(content)
except processutils.ProcessExecutionError as pe:
rc = -2 # prevent confusing with cli real rc
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(_LE(
'Error on execute %(command)s. '
'Error code: %(exit_code)d Error msg: %(result)s'), {
'command': command_line,
'exit_code': pe.exit_code,
'result': result})
return rc, result
def _execute(self, command_line):
return util_execute(command_line)
def set_ip(self, ip):
"""Set the Raid's ip."""
self.ip = ip
def _parse_return(self, content_lines):
"""Get the end of command line result."""
rc = 0
return_value = content_lines[-1].strip().split(' ', 1)[1]
return_cli_result = content_lines[-2].strip().split(' ', 1)[1]
rc = int(return_value, 16)
return rc, return_cli_result
class CreateLD(CLIBaseCommand):
"""The Create LD Command."""
def __init__(self, *args, **kwargs):
super(CreateLD, self).__init__(*args, **kwargs)
self.command = "create ld"
class CreateLV(CLIBaseCommand):
"""The Create LV Command."""
def __init__(self, *args, **kwargs):
super(CreateLV, self).__init__(*args, **kwargs)
self.command = "create lv"
class CreatePartition(CLIBaseCommand):
"""Create Partition.
create part [LV-ID] [name] [size={partition-size}]
[min={minimal-reserve-size}] [init={switch}]
[tier={tier-level-list}]
"""
def __init__(self, *args, **kwargs):
super(CreatePartition, self).__init__(*args, **kwargs)
self.command = "create part"
class DeletePartition(CLIBaseCommand):
"""Delete Partition.
delete part [partition-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeletePartition, self).__init__(*args, **kwargs)
self.command = "delete part"
class SetPartition(CLIBaseCommand):
"""Set Partition.
set part [partition-ID] [name={partition-name}]
[min={minimal-reserve-size}]
set part expand [partition-ID] [size={expand-size}]
set part purge [partition-ID] [number] [rule-type]
set part reclaim [partition-ID]
"""
def __init__(self, *args, **kwargs):
super(SetPartition, self).__init__(*args, **kwargs)
self.command = "set part"
class CreateMap(CLIBaseCommand):
"""Map the Partition on the channel.
create map [part] [partition-ID] [Channel-ID]
[Target-ID] [LUN-ID] [assign={assign-to}]
"""
def __init__(self, *args, **kwargs):
super(CreateMap, self).__init__(*args, **kwargs)
self.command = "create map"
class DeleteMap(CLIBaseCommand):
"""Unmap the Partition on the channel.
delete map [part] [partition-ID] [Channel-ID]
[Target-ID] [LUN-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeleteMap, self).__init__(*args, **kwargs)
self.command = "delete map"
class CreateSnapshot(CLIBaseCommand):
"""Create partition's Snapshot.
create si [part] [partition-ID]
"""
def __init__(self, *args, **kwargs):
super(CreateSnapshot, self).__init__(*args, **kwargs)
self.command = "create si"
class DeleteSnapshot(CLIBaseCommand):
"""Delete partition's Snapshot.
delete si [snapshot-image-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeleteSnapshot, self).__init__(*args, **kwargs)
self.command = "delete si"
class CreateReplica(CLIBaseCommand):
"""Create partition or snapshot's replica.
create replica [name] [part | si] [source-volume-ID]
[part] [target-volume-ID] [type={replication-mode}]
[priority={level}] [desc={description}]
[incremental={switch}] [timeout={value}]
[compression={switch}]
"""
def __init__(self, *args, **kwargs):
super(CreateReplica, self).__init__(*args, **kwargs)
self.command = "create replica"
class DeleteReplica(CLIBaseCommand):
"""Delete and terminate specific replication job.
delete replica [volume-pair-ID] [-y]
"""
def __init__(self, *args, **kwargs):
super(DeleteReplica, self).__init__(*args, **kwargs)
self.command = "delete replica"
class CreateIQN(CLIBaseCommand):
"""Create host iqn for CHAP or lun filter.
create iqn [IQN] [IQN-alias-name] [user={username}] [password={secret}]
[target={name}] [target-password={secret}] [ip={ip-address}]
[mask={netmask-ip}]
"""
def __init__(self, *args, **kwargs):
super(CreateIQN, self).__init__(*args, **kwargs)
self.command = "create iqn"
class DeleteIQN(CLIBaseCommand):
"""Delete host iqn by name.
delete iqn [name]
"""
def __init__(self, *args, **kwargs):
super(DeleteIQN, self).__init__(*args, **kwargs)
self.command = "delete iqn"
class ShowCommand(CLIBaseCommand):
"""Basic Show Command."""
def __init__(self, *args, **kwargs):
super(ShowCommand, self).__init__(*args, **kwargs)
self.param_detail = "-l"
self.default_type = "table"
self.start_key = ""
def _parser(self, content=None):
"""Parse Table or Detail format into dict.
# Table format
ID Name LD-amount
----------------------
123 LV-1 1
# Result
{
'ID': '123',
'Name': 'LV-1',
'LD-amount': '1'
}
# Detail format
ID: 5DE94FF775D81C30
Name: LV-1
LD-amount: 1
# Result
{
'ID': '123',
'Name': 'LV-1',
'LD-amount': '1'
}
:param content: The parse Content.
:returns: parse result
"""
rc, out = super(ShowCommand, self)._parser(content)
# Error.
if rc != 0:
return rc, out
# No content.
if len(out) < 6:
return rc, []
detect_type = self.detect_type()
# Show detail content.
if detect_type == "list":
start_id = self.detect_detail_start_index(out)
if start_id < 0:
return rc, []
result = content_lines_to_dict(out[start_id:-2])
else:
start_id = self.detect_table_start_index(out)
if start_id < 0:
return rc, []
result = table_to_dict(out[start_id:-3])
return rc, result
def detect_type(self):
if self.param_detail in self.parameters:
detect_type = "list"
else:
detect_type = self.default_type
return detect_type
def detect_table_start_index(self, content):
for i in range(3, len(content)):
key = content[i].strip().split(' ')
if self.start_key in key[0].strip():
return i
return -1
def detect_detail_start_index(self, content):
for i in range(3, len(content)):
split_entry = content[i].strip().split(' ')
if len(split_entry) >= 2 and ':' in split_entry[0]:
return i
return -1
class ShowLD(ShowCommand):
"""Show LD.
show ld [index-list]
"""
def __init__(self, *args, **kwargs):
super(ShowLD, self).__init__(*args, **kwargs)
self.command = "show ld"
class ShowLV(ShowCommand):
"""Show LV.
show lv [lv={LV-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowLV, self).__init__(*args, **kwargs)
self.command = "show lv"
self.start_key = "ID"
def detect_table_start_index(self, content):
if "tier" in self.parameters:
self.start_key = "LV-Name"
for i in range(3, len(content)):
key = content[i].strip().split(' ')
if self.start_key in key[0].strip():
return i
return -1
class ShowPartition(ShowCommand):
"""Show Partition.
show part [part={partition-IDs} | lv={LV-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowPartition, self).__init__(*args, **kwargs)
self.command = "show part"
self.start_key = "ID"
class ShowSnapshot(ShowCommand):
"""Show Snapshot.
show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowSnapshot, self).__init__(*args, **kwargs)
self.command = "show si"
self.start_key = "Index"
class ShowDevice(ShowCommand):
"""Show Device.
show device
"""
def __init__(self, *args, **kwargs):
super(ShowDevice, self).__init__(*args, **kwargs)
self.command = "show device"
self.start_key = "Index"
class ShowChannel(ShowCommand):
"""Show Channel.
show channel
"""
def __init__(self, *args, **kwargs):
super(ShowChannel, self).__init__(*args, **kwargs)
self.command = "show channel"
self.start_key = "Ch"
class ShowDisk(ShowCommand):
"""The Show Disk Command.
show disk [disk-index-list | channel={ch}]
"""
def __init__(self, *args, **kwargs):
super(ShowDisk, self).__init__(*args, **kwargs)
self.command = "show disk"
class ShowMap(ShowCommand):
"""Show Map.
show map [part={partition-IDs} | channel={channel-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowMap, self).__init__(*args, **kwargs)
self.command = "show map"
self.start_key = "Ch"
class ShowNet(ShowCommand):
"""Show IP network.
show net [id={channel-IDs}] [-l]
"""
def __init__(self, *args, **kwargs):
super(ShowNet, self).__init__(*args, **kwargs)
self.command = "show net"
self.start_key = "ID"
class ShowLicense(ShowCommand):
"""Show License.
show license
"""
def __init__(self, *args, **kwargs):
super(ShowLicense, self).__init__(*args, **kwargs)
self.command = "show license"
self.start_key = "License"
def _parser(self, content=None):
"""Parse License format.
# License format
License Amount(Partition/Subsystem) Expired
------------------------------------------------
EonPath --- True
# Result
{
'EonPath': {
'Amount': '---',
'Support': True
}
}
:param content: The parse Content.
:returns: parse result
"""
rc, out = super(ShowLicense, self)._parser(content)
if rc != 0:
return rc, out
if len(out) > 0:
result = {}
for entry in out:
if entry['Expired'] == '---' or entry['Expired'] == 'Expired':
support = False
else:
support = True
result[entry['License']] = {
'Amount':
entry['Amount(Partition/Subsystem)'],
'Support': support
}
return rc, result
return rc, []
class ShowReplica(ShowCommand):
"""Show information of all replication jobs or specific job.
show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs}
"""
def __init__(self, *args, **kwargs):
super(ShowReplica, self).__init__(*args, **kwargs)
self.command = 'show replica'
class ShowWWN(ShowCommand):
"""Show Fibre network.
show wwn
"""
def __init__(self, *args, **kwargs):
super(ShowWWN, self).__init__(*args, **kwargs)
self.command = "show wwn"
self.start_key = "CH"
class ShowIQN(ShowCommand):
"""Show iSCSI initiator IQN which is set by create iqn.
show iqn
"""
LIST_START_LINE = "List of initiator IQN(s):"
def __init__(self, *args, **kwargs):
super(ShowIQN, self).__init__(*args, **kwargs)
self.command = "show iqn"
self.default_type = "list"
def detect_detail_start_index(self, content):
for i in range(3, len(content)):
if content[i].strip() == self.LIST_START_LINE:
return i + 2
return -1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,277 @@
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fibre Channel Driver for Infortrend Eonstor based on CLI.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class InfortrendCLIFCDriver(driver.FibreChannelDriver):
"""Infortrend Fibre Channel Driver for Eonstor DS using CLI.
Version history:
1.0.0 - Initial driver
"""
def __init__(self, *args, **kwargs):
super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs)
self.common = common_cli.InfortrendCommon(
'FC', configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
LOG.debug('check_for_setup_error start')
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug('create_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(
'create_volume_from_snapshot volume id=%(volume_id)s '
'snapshot id=%(snapshot_id)s', {
'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
return self.common.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(
'create_cloned_volume volume id=%(volume_id)s '
'src_vref provider_location=%(provider_location)s', {
'volume_id': volume['id'],
'provider_location': src_vref['provider_location']})
return self.common.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
LOG.debug(
'extend_volume volume id=%(volume_id)s new size=%(size)s', {
'volume_id': volume['id'], 'size': new_size})
self.common.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
LOG.debug('delete_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
'volume_id': volume['id'], 'host': host['host']})
return self.common.migrate_volume(volume, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(
'create_snapshot snapshot id=%(snapshot_id)s '
'volume id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
return self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(
'delete_snapshot snapshot id=%(snapshot_id)s '
'volume id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug(
'create_export volume provider_location=%(provider_location)s', {
'provider_location': volume['provider_location']})
return self.common.create_export(context, volume)
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection information.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
"""
LOG.debug(
'initialize_connection volume id=%(volume_id)s '
'connector initiator=%(initiator)s', {
'volume_id': volume['id'],
'initiator': connector['initiator']})
return self.common.initialize_connection(volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
LOG.debug('get_volume_stats refresh=%(refresh)s', {
'refresh': refresh})
return self.common.get_volume_stats(refresh)
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
existing_ref:{
'id':lun_id
}
"""
LOG.debug(
'manage_existing volume id=%(volume_id)s '
'existing_ref source id=%(source_id)s', {
'volume_id': volume['id'],
'source_id': existing_ref['source-id']})
return self.common.manage_existing(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
LOG.debug('unmanage volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.unmanage(volume)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
LOG.debug(
'manage_existing_get_size volume id=%(volume_id)s '
'existing_ref source id=%(source_id)s', {
'volume_id': volume['id'],
'source_id': existing_ref['source-id']})
return self.common.manage_existing_get_size(volume, existing_ref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(
'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:return model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
return self.common.update_migrated_volume(ctxt, volume, new_volume)

View File

@ -0,0 +1,249 @@
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI Driver for Infortrend Eonstor based on CLI.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
LOG = logging.getLogger(__name__)
class InfortrendCLIISCSIDriver(driver.ISCSIDriver):
"""Infortrend iSCSI Driver for Eonstor DS using CLI.
Version history:
1.0.0 - Initial driver
"""
def __init__(self, *args, **kwargs):
super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs)
self.common = common_cli.InfortrendCommon(
'iSCSI', configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
LOG.debug('check_for_setup_error start')
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug('create_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(
'create_volume_from_snapshot volume id=%(volume_id)s '
'snapshot id=%(snapshot_id)s', {
'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
return self.common.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(
'create_cloned_volume volume id=%(volume_id)s '
'src_vref provider_location=%(provider_location)s', {
'volume_id': volume['id'],
'provider_location': src_vref['provider_location']})
return self.common.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
LOG.debug(
'extend_volume volume id=%(volume_id)s new size=%(size)s', {
'volume_id': volume['id'], 'size': new_size})
self.common.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
LOG.debug('delete_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
'volume_id': volume['id'], 'host': host['host']})
return self.common.migrate_volume(volume, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(
'create_snapshot snapshot id=%(snapshot_id)s '
'volume_id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
return self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(
'delete_snapshot snapshot id=%(snapshot_id)s '
'volume_id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug(
'create_export volume provider_location=%(provider_location)s', {
'provider_location': volume['provider_location']})
return self.common.create_export(context, volume)
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection information.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
'access_mode': 'rw'
}
}
"""
LOG.debug(
'initialize_connection volume id=%(volume_id)s '
'connector initiator=%(initiator)s', {
'volume_id': volume['id'],
'initiator': connector['initiator']})
return self.common.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
LOG.debug('get_volume_stats refresh=%(refresh)s', {
'refresh': refresh})
return self.common.get_volume_stats(refresh)
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
existing_ref:{
'id':lun_id
}
"""
LOG.debug(
'manage_existing volume id=%(volume_id)s '
'existing_ref source id=%(source_id)s', {
'volume_id': volume['id'],
'source_id': existing_ref['source-id']})
return self.common.manage_existing(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
LOG.debug('unmanage volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.unmanage(volume)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
LOG.debug(
'manage_existing_get_size volume id=%(volume_id)s '
'existing_ref source id=%(source_id)s', {
'volume_id': volume['id'],
'source_id': existing_ref['source-id']})
return self.common.manage_existing_get_size(volume, existing_ref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(
'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:return model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
return self.common.update_migrated_volume(ctxt, volume, new_volume)