Merge "Adding SPDK NVMe-oF target driver"

This commit is contained in:
Zuul 2019-01-09 04:19:03 +00:00 committed by Gerrit Code Review
commit d9c5a609d6
5 changed files with 611 additions and 3 deletions

View File

@ -169,6 +169,7 @@ from cinder.volume.drivers.zfssa import zfssaiscsi as \
from cinder.volume.drivers.zfssa import zfssanfs as \
cinder_volume_drivers_zfssa_zfssanfs
from cinder.volume import manager as cinder_volume_manager
from cinder.volume.targets import spdknvmf as cinder_volume_targets_spdknvmf
from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \
cinder_zonemanager_drivers_brocade_brcdfabricopts
@ -359,6 +360,7 @@ def list_opts():
cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS,
cinder_volume_drivers_zfssa_zfssanfs.ZFSSA_OPTS,
cinder_volume_manager.volume_backend_opts,
cinder_volume_targets_spdknvmf.spdk_opts,
)),
('nova',
itertools.chain(

View File

@ -0,0 +1,408 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.targets import spdknvmf as spdknvmf_driver
BDEVS = [{
"num_blocks": 4096000,
"name": "Nvme0n1",
"driver_specific": {
"nvme": {
"trid": {
"trtype": "PCIe",
"traddr": "0000:00:04.0"
},
"ns_data": {
"id": 1
},
"pci_address": "0000:00:04.0",
"vs": {
"nvme_version": "1.1"
},
"ctrlr_data": {
"firmware_revision": "1.0",
"serial_number": "deadbeef",
"oacs": {
"ns_manage": 0,
"security": 0,
"firmware": 0,
"format": 0
},
"vendor_id": "0x8086",
"model_number": "QEMU NVMe Ctrl"
},
"csts": {
"rdy": 1,
"cfs": 0
}
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": True,
"unmap": False,
"read": True,
"write_zeroes": False,
"write": True,
"flush": True,
"nvme_io": True
},
"claimed": False,
"block_size": 512,
"product_name": "NVMe disk",
"aliases": ["Nvme0n1"]
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p0"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p1"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"lvs_test/lvol0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297"
}, {
"num_blocks": 8192,
"uuid": "8dec1964-d533-41df-bea7-40520efdb416",
"aliases": [
"lvs_test/lvol1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": True
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298"
}]
NVMF_SUBSYSTEMS = [{
"listen_addresses": [],
"subtype": "Discovery",
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
"hosts": [],
"allow_any_host": True
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [{
"nqn": "nqn.2016-06.io.spdk:init"
}],
"namespaces": [{
"bdev_name": "Nvme0n1p0",
"nsid": 1,
"name": "Nvme0n1p0"
}],
"allow_any_host": False,
"serial_number": "SPDK00000000000001",
"nqn": "nqn.2016-06.io.spdk:cnode1"
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": "SPDK00000000000002",
"nqn": "nqn.2016-06.io.spdk:cnode2"
}]
class JSONRPCException(Exception):
def __init__(self, message):
self.message = message
class JSONRPCClient(object):
def __init__(self, addr=None, port=None):
self.methods = {"get_bdevs": self.get_bdevs,
"construct_nvmf_subsystem":
self.construct_nvmf_subsystem,
"delete_nvmf_subsystem": self.delete_nvmf_subsystem,
"nvmf_subsystem_create": self.nvmf_subsystem_create,
"nvmf_subsystem_add_listener":
self.nvmf_subsystem_add_listener,
"nvmf_subsystem_add_ns":
self.nvmf_subsystem_add_ns,
"get_nvmf_subsystems": self.get_nvmf_subsystems}
self.bdevs = copy.deepcopy(BDEVS)
self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS)
def __del__(self):
pass
def get_bdevs(self, params=None):
if params and 'name' in params:
for bdev in self.bdevs:
for alias in bdev['aliases']:
if params['name'] in alias:
return json.dumps({"result": [bdev]})
if bdev['name'] == params['name']:
return json.dumps({"result": [bdev]})
return json.dumps({"error": "Not found"})
return json.dumps({"result": self.bdevs})
def get_nvmf_subsystems(self, params=None):
return json.dumps({"result": self.nvmf_subsystems})
def construct_nvmf_subsystem(self, params=None):
nvmf_subsystem = {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": params['serial_number'],
"nqn": params['nqn']
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def delete_nvmf_subsystem(self, params=None):
found_id = -1
i = 0
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
found_id = i
i += 1
if found_id != -1:
del self.nvmf_subsystems[found_id]
return json.dumps({"result": {}})
def nvmf_subsystem_create(self, params=None):
nvmf_subsystem = {
"namespaces": [],
"nqn": params['nqn'],
"serial_number": "S0000000000000000001",
"allow_any_host": False,
"subtype": "NVMe",
"hosts": [],
"listen_addresses": []
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def nvmf_subsystem_add_listener(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['listen_addresses'].append(
params['listen_address']
)
return json.dumps({"result": ""})
def nvmf_subsystem_add_ns(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['namespaces'].append(
params['namespace']
)
return json.dumps({"result": ""})
def call(self, method, params=None):
req = {}
req['jsonrpc'] = '2.0'
req['method'] = method
req['id'] = 1
if (params):
req['params'] = params
response = json.loads(self.methods[method](params))
if not response:
return {}
if 'error' in response:
msg = "\n".join(["Got JSON-RPC error response",
"request:",
json.dumps(req, indent=2),
"response:",
json.dumps(response['error'], indent=2)])
raise JSONRPCException(msg)
return response['result']
class Target(object):
def __init__(self, name="Nvme0n1p0"):
self.name = name
class SpdkNvmfDriverTestCase(test.TestCase):
def setUp(self):
super(SpdkNvmfDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_ip_address = '192.168.0.1'
self.configuration.target_port = '4420'
self.configuration.target_prefix = ""
self.configuration.nvmet_port_id = "1"
self.configuration.nvmet_ns_id = "fake_id"
self.configuration.nvmet_subsystem_name = "nqn.2014-08.io.spdk"
self.configuration.target_protocol = "nvmet_rdma"
self.configuration.spdk_rpc_ip = "127.0.0.1"
self.configuration.spdk_rpc_port = 8000
self.driver = spdknvmf_driver.SpdkNvmf(configuration=
self.configuration)
self.jsonrpcclient = JSONRPCClient()
def test__get_spdk_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
bdevs = self.driver._rpc_call("get_bdevs")
bdev_name = bdevs[0]['name']
volume_name = self.driver._get_spdk_volume_name(bdev_name)
self.assertEqual(bdev_name, volume_name)
volume_name = self.driver._get_spdk_volume_name("fake")
self.assertIsNone(volume_name)
def test__get_nqn_with_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
nqn = self.driver._get_nqn_with_volume_name("Nvme0n1p0")
nqn_tmp = self.driver._rpc_call("get_nvmf_subsystems")[1]['nqn']
self.assertEqual(nqn, nqn_tmp)
nqn = self.driver._get_nqn_with_volume_name("fake")
self.assertIsNone(nqn)
def test__get_first_free_node(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
free_node = self.driver._get_first_free_node()
self.assertEqual(3, free_node)
def test_create_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("get_nvmf_subsystems")
self.driver.create_nvmeof_target("Nvme0n1p1",
"nqn.2016-06.io.spdk",
"192.168.0.1",
4420, "rdma", -1, -1, "")
subsystems_last = self.driver._rpc_call("get_nvmf_subsystems")
self.assertEqual(len(subsystems_first) + 1, len(subsystems_last))
def test_delete_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("get_nvmf_subsystems")
target = Target()
self.driver.delete_nvmeof_target(target)
subsystems_last = self.driver._rpc_call("get_nvmf_subsystems")
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
target.name = "fake"
self.driver.delete_nvmeof_target(target)
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))

View File

@ -91,12 +91,13 @@ volume_opts = [
cfg.StrOpt('target_helper',
default='tgtadm',
choices=['tgtadm', 'lioadm', 'scstadmin', 'iscsictl',
'ietadm', 'nvmet', 'fake'],
'ietadm', 'nvmet', 'spdk-nvmeof', 'fake'],
help='Target user-land tool to use. tgtadm is default, '
'use lioadm for LIO iSCSI support, scstadmin for SCST '
'target support, ietadm for iSCSI Enterprise Target, '
'iscsictl for Chelsio iSCSI Target, nvmet for NVMEoF '
'support, or fake for testing.'),
'support, spdk-nvmeof for SPDK NVMe-oF, '
'or fake for testing.'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configuration file storage '
@ -408,7 +409,8 @@ class BaseVD(object):
'tgtadm': 'cinder.volume.targets.tgt.TgtAdm',
'scstadmin': 'cinder.volume.targets.scst.SCSTAdm',
'iscsictl': 'cinder.volume.targets.cxt.CxtAdm',
'nvmet': 'cinder.volume.targets.nvmet.NVMET'}
'nvmet': 'cinder.volume.targets.nvmet.NVMET',
'spdk-nvmeof': 'cinder.volume.targets.spdknvmf.SpdkNvmf'}
# set True by manager after successful check_for_setup
self._initialized = False

View File

@ -0,0 +1,189 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import string
from oslo_config import cfg
from oslo_log import log as logging
import requests
from cinder import exception
from cinder.i18n import _
from cinder.volume import configuration
from cinder.volume.targets import nvmeof
from cinder.volume import utils
spdk_opts = [
cfg.StrOpt('spdk_rpc_ip',
help='The NVMe target remote configuration IP address.'),
cfg.PortOpt('spdk_rpc_port',
default=8000,
help='The NVMe target remote configuration port.'),
cfg.StrOpt('spdk_rpc_username',
help='The NVMe target remote configuration username.'),
cfg.StrOpt('spdk_rpc_password',
help='The NVMe target remote configuration password.',
secret=True),
]
CONF = cfg.CONF
CONF.register_opts(spdk_opts, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
class SpdkNvmf(nvmeof.NVMeOF):
def __init__(self, *args, **kwargs):
super(SpdkNvmf, self).__init__(*args, **kwargs)
self.configuration.append_config_values(spdk_opts)
self.url = ('http://%(ip)s:%(port)s/' %
{'ip': self.configuration.spdk_rpc_ip,
'port': self.configuration.spdk_rpc_port})
# SPDK NVMe-oF Target application requires one time creation
# of RDMA transport type each time it is started. It will
# fail on second attempt which is expected behavior.
try:
params = {
'trtype': 'rdma',
}
self._rpc_call('nvmf_create_transport', params)
except Exception:
pass
def _rpc_call(self, method, params=None):
payload = {}
payload['jsonrpc'] = '2.0'
payload['id'] = 1
payload['method'] = method
if params is not None:
payload['params'] = params
req = requests.post(self.url,
data=json.dumps(payload),
auth=(self.configuration.spdk_rpc_username,
self.configuration.spdk_rpc_password),
verify=self.configuration.driver_ssl_cert_verify,
timeout=30)
if not req.ok:
raise exception.VolumeBackendAPIException(
data=_('SPDK target responded with error: %s') % req.text)
return req.json()['result']
def _get_spdk_volume_name(self, name):
output = self._rpc_call('get_bdevs')
for bdev in output:
for alias in bdev['aliases']:
if name in alias:
return bdev['name']
def _get_nqn_with_volume_name(self, name):
output = self._rpc_call('get_nvmf_subsystems')
spdk_name = self._get_spdk_volume_name(name)
if spdk_name is not None:
for subsystem in output[1:]:
for namespace in subsystem['namespaces']:
if spdk_name in namespace['bdev_name']:
return subsystem['nqn']
def _get_first_free_node(self):
cnode_num = []
output = self._rpc_call('get_nvmf_subsystems')
# Get node numbers for nqn string like this: nqn.2016-06.io.spdk:cnode1
for subsystem in output[1:]:
cnode_num.append(int(subsystem['nqn'].split("cnode")[1]))
test_set = set(range(1, len(cnode_num) + 2))
return list(test_set.difference(cnode_num))[0]
def create_nvmeof_target(self,
volume_id,
subsystem_name,
target_ip,
target_port,
transport_type,
nvmet_port_id,
ns_id,
volume_path):
LOG.debug('SPDK create target')
nqn = self._get_nqn_with_volume_name(volume_id)
if nqn is None:
node = self._get_first_free_node()
nqn = '%s:cnode%s' % (subsystem_name, node)
choice = string.ascii_uppercase + string.digits
serial = ''.join(
utils.generate_password(length=12, symbolgroups=choice))
params = {
'nqn': nqn,
'allow_any_host': True,
'serial_number': serial,
}
self._rpc_call('nvmf_subsystem_create', params)
listen_address = {
'trtype': transport_type,
'traddr': target_ip,
'trsvcid': str(target_port),
}
params = {
'nqn': nqn,
'listen_address': listen_address,
}
self._rpc_call('nvmf_subsystem_add_listener', params)
ns = {
'bdev_name': self._get_spdk_volume_name(volume_id),
'nsid': ns_id,
}
params = {
'nqn': nqn,
'namespace': ns,
}
self._rpc_call('nvmf_subsystem_add_ns', params)
location = self.get_nvmeof_location(
nqn,
target_ip,
target_port,
transport_type,
ns_id)
return {'location': location, 'auth': '', 'provider_id': nqn}
def delete_nvmeof_target(self, target_name):
LOG.debug('SPDK delete target: %s', target_name)
nqn = self._get_nqn_with_volume_name(target_name.name)
if nqn is not None:
try:
params = {'nqn': nqn}
self._rpc_call('delete_nvmf_subsystem', params)
LOG.debug('SPDK subsystem %s deleted', nqn)
except Exception as e:
LOG.debug('SPDK ERROR: subsystem not deleted: %s', e)

View File

@ -0,0 +1,7 @@
---
features:
- |
A new target, spdk-nvmeof, is added for the SPDK driver over RDMA.
It allows cinder to use SPDK target in order to create/delete
subsystems on attaching/detaching an SPDK volume to/from an
instance.