Adding NVMET target for NVMeOF

Implements NVMeOF driver to support NVMe target CLI.
A new target, NVMET, is added for the LVM driver, and allows
cinder to use nvmetcli in order to create/delete subsystems
on attaching/detaching an LVM volume.

By choosing target_helper as nvmet, target_protocol as rdma,
other parameters can be configured too in cinder.conf like
nvmet_port_id, nvmet_ns_id (for namespace) and
nvmet_subsystem_name. The target IP and port, that will be
used by nvmecli OS-brick NVMe connector, are taken from
target_ip and target_port in cinder.conf.

Implements: blueprint nvme-target-cli
Change-Id: I7cacd76c63e0ad29eb2d448ce07fbb5176f62721
This commit is contained in:
Hamdy Khader 2017-09-20 12:10:46 +03:00
parent dee77b32dd
commit 8d7e131c58
6 changed files with 512 additions and 5 deletions

View File

@ -1354,3 +1354,12 @@ class ServiceUserTokenNoAuth(CinderException):
class UnsupportedNVMETProtocol(Invalid):
message = _("An invalid 'target_protocol' "
"value was provided: %(protocol)s")
# NVMET driver
class NVMETTargetAddError(CinderException):
message = "Failed to add subsystem: %(subsystem)s"
class NVMETTargetDeleteError(CinderException):
message = "Failed to delete subsystem: %(subsystem)s"

View File

@ -0,0 +1,266 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from cinder import context
from cinder import exception
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import nvmet
class TestNVMETDriver(tf.TargetDriverFixture):
def setUp(self):
super(TestNVMETDriver, self).setUp()
self.configuration.target_protocol = 'nvmet_rdma'
self.target = nvmet.NVMET(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.target_ip = '192.168.0.1'
self.target_port = '1234'
self.nvmet_subsystem_name = self.configuration.target_prefix
self.nvmet_ns_id = self.configuration.nvmet_ns_id
self.nvmet_port_id = self.configuration.nvmet_port_id
self.nvme_transport_type = 'rdma'
self.fake_volume_id = 'c446b9a2-c968-4260-b95f-a18a7b41c004'
self.testvol_path = (
'/dev/stack-volumes-lvmdriver-1/volume-%s' % self.fake_volume_id)
self.fake_project_id = 'ed2c1fd4-5555-1111-aa15-123b93f75cba'
self.testvol = (
{'project_id': self.fake_project_id,
'name': 'testvol',
'size': 1,
'id': self.fake_volume_id,
'volume_type_id': None,
'provider_location': self.target.get_nvmeof_location(
"nqn.%s-%s" % (self.nvmet_subsystem_name,
self.fake_volume_id),
self.target_ip, self.target_port, self.nvme_transport_type,
self.nvmet_ns_id),
'provider_auth': None,
'provider_geometry': None,
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'})
@mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem')
@mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems')
@mock.patch.object(nvmet.NVMET, '_add_nvmf_subsystem')
def test_create_export(self, mock_add_nvmf_subsystem,
mock_get_available_nvmf_subsystems,
mock_get_nvmf_subsystem):
mock_testvol = self.testvol
mock_testvol_path = self.testvol_path
ctxt = context.get_admin_context()
mock_get_available_nvmf_subsystems.return_value = {
"subsystems": [],
"hosts": [],
"ports": [
{"subsystems": [],
"referrals": [],
"portid": 1,
"addr":
{"treq": "not specified",
"trtype": "rdma",
"adrfam": "ipv4",
"trsvcid": self.target_port,
"traddr":
self.target_ip
}
}]
}
mock_get_nvmf_subsystem.return_value = (
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id']))
mock_add_nvmf_subsystem.return_value = (
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id']))
expected_return = {
'location': self.target.get_nvmeof_location(
mock_add_nvmf_subsystem.return_value, self.target_ip,
self.target_port, self.nvme_transport_type, self.nvmet_ns_id),
'auth': ''
}
self.target.target_ip = self.target_ip
self.target.target_port = self.target_port
self.assertEqual(expected_return,
self.target.create_export(
ctxt, mock_testvol,
mock_testvol_path))
@mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem')
@mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems')
@mock.patch.object(nvmet.NVMET, '_add_nvmf_subsystem')
def test_create_export_with_error_add_nvmf_subsystem(
self,
mock_add_nvmf_subsystem,
mock_get_available_nvmf_subsystems,
mock_get_nvmf_subsystem):
mock_testvol = self.testvol
mock_testvol_path = self.testvol_path
ctxt = context.get_admin_context()
mock_get_available_nvmf_subsystems.return_value = {
"subsystems": [],
"hosts": [],
"ports": [
{"subsystems": [],
"referrals": [],
"portid": 1,
"addr":
{"treq": "not specified",
"trtype": "rdma",
"adrfam": "ipv4",
"trsvcid": self.target_port,
"traddr":
self.target_ip
}
}]
}
mock_get_nvmf_subsystem.return_value = None
mock_add_nvmf_subsystem.return_value = None
self.target.target_ip = self.target_ip
self.target.target_port = self.target_port
self.assertRaises(exception.NVMETTargetAddError,
self.target.create_export,
ctxt,
mock_testvol,
mock_testvol_path)
@mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem')
@mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems')
@mock.patch.object(nvmet.NVMET, '_delete_nvmf_subsystem')
def test_remove_export(self, mock_delete_nvmf_subsystem,
mock_get_available_nvmf_subsystems,
mock_get_nvmf_subsystem):
mock_testvol = self.testvol
mock_testvol_path = self.testvol_path
ctxt = context.get_admin_context()
mock_get_available_nvmf_subsystems.return_value = {
"subsystems": [
{"allowed_hosts": [],
"nqn": "nqn.%s-%s" % (
self.nvmet_subsystem_name,
mock_testvol['id']),
"attr": {"allow_any_host": "1"},
"namespaces": [
{"device":
{"path": mock_testvol_path,
"nguid":
"86fab0e0-825d-4f25-a449-28b93c5e8dd6"
},
"enable": 1, "nsid":
self.nvmet_ns_id,
}]}],
"hosts": [],
"ports": [
{"subsystems": [
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id'])],
"referrals": [],
"portid": self.nvmet_port_id,
"addr":
{"treq": "not specified",
"trtype": "rdma",
"adrfam": "ipv4",
"trsvcid": self.target_port,
"traddr": self.target_ip}}
]
}
mock_get_nvmf_subsystem.return_value = (
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id']))
mock_delete_nvmf_subsystem.return_value = (
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id']))
expected_return = mock_delete_nvmf_subsystem.return_value
self.assertEqual(expected_return,
self.target.remove_export(ctxt, mock_testvol))
@mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem')
@mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems')
def test_remove_export_with_empty_subsystems(
self,
mock_get_available_nvmf_subsystems,
mock_get_nvmf_subsystem):
mock_testvol = self.testvol
ctxt = context.get_admin_context()
mock_get_available_nvmf_subsystems.return_value = {
"subsystems": [],
"hosts": [],
"ports": []
}
mock_get_nvmf_subsystem.return_value = None
self.assertIsNone(self.target.remove_export(ctxt, mock_testvol))
@mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem')
@mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems')
@mock.patch.object(nvmet.NVMET, '_delete_nvmf_subsystem')
def test_remove_export_with_delete_nvmf_subsystem_fails(
self,
moc_delete_nvmf_subsystem,
mock_get_available_nvmf_subsystems,
mock_get_nvmf_subsystem):
mock_testvol = self.testvol
mock_testvol_path = self.testvol_path
ctxt = context.get_admin_context()
mock_get_available_nvmf_subsystems.return_value = {
"subsystems": [
{"allowed_hosts": [],
"nqn": "nqn.%s-%s" % (
self.nvmet_subsystem_name,
mock_testvol['id']),
"attr": {"allow_any_host": "1"},
"namespaces": [
{"device":
{"path": mock_testvol_path,
"nguid":
"86fab0e0-825d-4f25-a449-28b93c5e8dd6"
},
"enable": 1, "nsid":
self.nvmet_ns_id,
}]}],
"hosts": [],
"ports": [
{"subsystems": [
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id'])],
"referrals": [],
"portid": self.nvmet_port_id,
"addr":
{"treq": "not specified",
"trtype": "rdma",
"adrfam": "ipv4",
"trsvcid": self.target_port,
"traddr": self.target_ip}}
]
}
mock_get_nvmf_subsystem.return_value = (
"nqn.%s-%s" % (self.nvmet_subsystem_name,
mock_testvol['id']))
moc_delete_nvmf_subsystem.return_value = None
self.assertRaises(exception.NVMETTargetDeleteError,
self.target.remove_export,
ctxt,
mock_testvol)

View File

@ -95,12 +95,12 @@ volume_opts = [
deprecated_name='iscsi_helper',
default='tgtadm',
choices=['tgtadm', 'lioadm', 'scstadmin', 'iscsictl',
'ietadm', 'fake'],
help='iSCSI target user-land tool to use. tgtadm is default, '
'ietadm', 'nvmet', 'fake'],
help='Target user-land tool to use. tgtadm is default, '
'use lioadm for LIO iSCSI support, scstadmin for SCST '
'target support, ietadm for iSCSI Enterprise Target, '
'iscsictl for Chelsio iSCSI '
'Target or fake for testing.'),
'iscsictl for Chelsio iSCSI Target, nvmet for NVMEoF '
'support, or fake for testing.'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configuration file storage '
@ -412,7 +412,8 @@ class BaseVD(object):
'lioadm': 'cinder.volume.targets.lio.LioAdm',
'tgtadm': 'cinder.volume.targets.tgt.TgtAdm',
'scstadmin': 'cinder.volume.targets.scst.SCSTAdm',
'iscsictl': 'cinder.volume.targets.cxt.CxtAdm'}
'iscsictl': 'cinder.volume.targets.cxt.CxtAdm',
'nvmet': 'cinder.volume.targets.nvmet.NVMET'}
# set True by manager after successful check_for_setup
self._initialized = False

View File

@ -0,0 +1,222 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from cinder import exception
from cinder import utils
from cinder.volume.targets import nvmeof
LOG = logging.getLogger(__name__)
class NVMET(nvmeof.NVMeOF):
@utils.synchronized('nvmetcli', external=True)
def create_nvmeof_target(self,
volume_id,
subsystem_name,
target_ip,
target_port,
transport_type,
nvmet_port_id,
ns_id,
volume_path):
# Create NVME subsystem for previously created LV
nvmf_subsystems = self._get_available_nvmf_subsystems()
# Check if subsystem already exists
search_for_subsystem = self._get_nvmf_subsystem(
nvmf_subsystems, volume_id)
if search_for_subsystem is None:
newly_added_subsystem = self._add_nvmf_subsystem(
nvmf_subsystems,
target_ip,
target_port,
nvmet_port_id,
subsystem_name,
ns_id, volume_id, volume_path)
if newly_added_subsystem is None:
LOG.error('Failed to add subsystem: %s', subsystem_name)
raise exception.NVMETTargetAddError(subsystem=subsystem_name)
LOG.info('Added subsystem: %s', newly_added_subsystem)
search_for_subsystem = newly_added_subsystem
else:
LOG.info('Skip creating subsystem %s as '
'it already exists.', search_for_subsystem)
return {
'location': self.get_nvmeof_location(
search_for_subsystem,
target_ip,
target_port,
transport_type,
ns_id),
'auth': ''}
def _restore(self, nvmf_subsystems):
# Dump updated JSON dict to append new subsystem
with tempfile.NamedTemporaryFile() as tmp_fd:
tmp_fd.write(json.dumps(nvmf_subsystems))
tmp_fd.flush()
cmd = [
'nvmetcli',
'restore',
tmp_fd.name]
try:
out, err = utils.execute(*cmd, run_as_root=True)
except putils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.exception('Error from nvmetcli restore')
def _add_nvmf_subsystem(self, nvmf_subsystems, target_ip, target_port,
nvmet_port_id, nvmet_subsystem_name, nvmet_ns_id,
volume_id, volume_path):
subsystem_name = self._get_target_info(nvmet_subsystem_name, volume_id)
# Create JSON sections for the new subsystem to be created
# Port section
port_section = {
"addr": {
"adrfam": "ipv4",
"traddr": target_ip,
"treq": "not specified",
"trsvcid": target_port,
"trtype": "rdma"
},
"portid": nvmet_port_id,
"referrals": [],
"subsystems": [subsystem_name]
}
nvmf_subsystems['ports'].append(port_section)
# Subsystem section
subsystem_section = {
"allowed_hosts": [],
"attr": {
"allow_any_host": "1"
},
"namespaces": [
{
"device": {
"nguid": six.text_type(uuidutils.generate_uuid()),
"path": volume_path,
},
"enable": 1,
"nsid": nvmet_ns_id
}
],
"nqn": subsystem_name}
nvmf_subsystems['subsystems'].append(subsystem_section)
LOG.info(
'Trying to load the following subsystems:%s', nvmf_subsystems)
self._restore(nvmf_subsystems)
return subsystem_name
@utils.synchronized('nvmetcli', external=True)
def delete_nvmeof_target(self, volume):
nvmf_subsystems = self._get_available_nvmf_subsystems()
subsystem_name = self._get_nvmf_subsystem(
nvmf_subsystems, volume['id'])
if subsystem_name:
removed_subsystem = self._delete_nvmf_subsystem(
nvmf_subsystems, subsystem_name)
if removed_subsystem is None:
LOG.error(
'Failed to delete subsystem: %s', subsystem_name)
raise exception.NVMETTargetDeleteError(
subsystem=subsystem_name)
elif removed_subsystem == subsystem_name:
LOG.info(
'Managed to delete subsystem: %s', subsystem_name)
return removed_subsystem
else:
LOG.info("Skipping remove_export. No NVMe subsystem "
"for volume: %s", volume['id'])
def _delete_nvmf_subsystem(self, nvmf_subsystems, subsystem_name):
LOG.debug(
'Removing this subsystem: %s', subsystem_name)
for port in nvmf_subsystems['ports']:
if subsystem_name in port['subsystems']:
port['subsystems'].remove(subsystem_name)
break
for subsys in nvmf_subsystems['subsystems']:
if subsys['nqn'] == subsystem_name:
nvmf_subsystems['subsystems'].remove(subsys)
break
LOG.debug(
'Newly loaded subsystems will be: %s', nvmf_subsystems)
self._restore(nvmf_subsystems)
return subsystem_name
def _get_nvmf_subsystem(self, nvmf_subsystems, volume_id):
subsystem_name = self._get_target_info(
self.nvmet_subsystem_name, volume_id)
for subsys in nvmf_subsystems['subsystems']:
if subsys['nqn'] == subsystem_name:
return subsystem_name
def _get_available_nvmf_subsystems(self):
__, tmp_file_path = tempfile.mkstemp(prefix='nvmet')
# nvmetcli doesn't support printing to stdout yet,
cmd = [
'nvmetcli',
'save',
tmp_file_path]
try:
out, err = utils.execute(*cmd, run_as_root=True)
except putils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.exception('Error from nvmetcli save')
self._delete_file(tmp_file_path)
# temp file must be readable by this process user
# in order to avoid executing cat as root
with utils.temporary_chown(tmp_file_path):
cmd = ['cat', tmp_file_path]
try:
out, err = utils.execute(*cmd)
except putils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to read: %s', tmp_file_path)
self._delete_file(tmp_file_path)
nvmf_subsystems = json.loads(out)
self._delete_file(tmp_file_path)
return nvmf_subsystems
def _get_target_info(self, subsystem, volume_id):
return "nqn.%s-%s" % (subsystem, volume_id)
def _delete_file(self, file_path):
cmd = ['rm', '-f', file_path]
try:
out, err = utils.execute(*cmd, run_as_root=True)
except putils.ProcessExecutionError:
LOG.exception('Failed to delete file: %s', file_path)

View File

@ -229,3 +229,5 @@ drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio
mount.quobyte: CommandFilter, mount.quobyte, root
umount.quobyte: CommandFilter, umount.quobyte, root
# cinder/volume/targets/nvmet.py
nvmetcli: CommandFilter, nvmetcli, root

View File

@ -0,0 +1,7 @@
---
features:
- |
A new target, NVMET, is added for the LVM driver over RDMA,
it allows cinder to use nvmetcli in order to create/delete
subsystems on attaching/detaching an LVM volume to/from an
instance.