OpenStack Block Storage (Cinder)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

1903 lines
83 KiB

  1. # Nimble Storage, Inc. (c) 2013-2014
  2. # All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. """
  16. Volume driver for Nimble Storage.
  17. This driver supports Nimble Storage controller CS-Series and Nimble AF Arrays.
  18. """
  19. import abc
  20. import eventlet
  21. import functools
  22. import json
  23. import random
  24. import re
  25. import requests
  26. import six
  27. import string
  28. import sys
  29. from oslo_config import cfg
  30. from oslo_log import log as logging
  31. from oslo_utils import units
  32. from cinder import exception
  33. from cinder.i18n import _
  34. from cinder import interface
  35. from cinder.objects import volume
  36. from cinder import utils
  37. from cinder.volume import configuration
  38. from cinder.volume import driver
  39. from cinder.volume.drivers.san import san
  40. from cinder.volume import volume_types
  41. from cinder.zonemanager import utils as fczm_utils
  42. DRIVER_VERSION = "4.0.1"
  43. AES_256_XTS_CIPHER = 'aes_256_xts'
  44. DEFAULT_CIPHER = 'none'
  45. EXTRA_SPEC_ENCRYPTION = 'nimble:encryption'
  46. EXTRA_SPEC_PERF_POLICY = 'nimble:perfpol-name'
  47. EXTRA_SPEC_MULTI_INITIATOR = 'nimble:multi-initiator'
  48. EXTRA_SPEC_DEDUPE = 'nimble:dedupe'
  49. EXTRA_SPEC_IOPS_LIMIT = 'nimble:iops-limit'
  50. EXTRA_SPEC_FOLDER = 'nimble:folder'
  51. DEFAULT_PERF_POLICY_SETTING = 'default'
  52. DEFAULT_ENCRYPTION_SETTING = 'no'
  53. DEFAULT_DEDUPE_SETTING = 'false'
  54. DEFAULT_IOPS_LIMIT_SETTING = None
  55. DEFAULT_MULTI_INITIATOR_SETTING = 'false'
  56. DEFAULT_FOLDER_SETTING = None
  57. DEFAULT_SNAP_QUOTA = sys.maxsize
  58. BACKUP_VOL_PREFIX = 'backup-vol-'
  59. AGENT_TYPE_OPENSTACK = 'openstack'
  60. AGENT_TYPE_OPENSTACK_GST = 'openstackv2'
  61. AGENT_TYPE_NONE = 'none'
  62. SM_SUBNET_DATA = 'data'
  63. SM_SUBNET_MGMT_PLUS_DATA = 'mgmt-data'
  64. SM_STATE_MSG = "is already in requested state"
  65. SM_OBJ_EXIST_MSG = "Object exists"
  66. SM_OBJ_ENOENT_MSG = "No such object"
  67. SM_OBJ_HAS_CLONE = "has a clone"
  68. IOPS_ERR_MSG = "Please set valid IOPS limit in the range"
  69. LUN_ID = '0'
  70. WARN_LEVEL = 80
  71. DEFAULT_SLEEP = 5
  72. MIN_IOPS = 256
  73. MAX_IOPS = 4294967294
  74. NimbleDefaultVersion = 1
  75. LOG = logging.getLogger(__name__)
  76. nimble_opts = [
  77. cfg.StrOpt('nimble_pool_name',
  78. default='default',
  79. help='Nimble Controller pool name'),
  80. cfg.StrOpt('nimble_subnet_label',
  81. default='*',
  82. help='Nimble Subnet Label'),
  83. cfg.BoolOpt('nimble_verify_certificate',
  84. default=False,
  85. help='Whether to verify Nimble SSL Certificate'),
  86. cfg.StrOpt('nimble_verify_cert_path',
  87. help='Path to Nimble Array SSL certificate'), ]
  88. CONF = cfg.CONF
  89. CONF.register_opts(nimble_opts, group=configuration.SHARED_CONF_GROUP)
  90. class NimbleDriverException(exception.VolumeDriverException):
  91. message = _("Nimble Cinder Driver exception")
  92. class NimbleAPIException(exception.VolumeBackendAPIException):
  93. message = _("Unexpected response from Nimble API")
  94. class NimbleVolumeBusyException(exception.VolumeIsBusy):
  95. message = _("Nimble Cinder Driver: Volume Busy")
  96. class NimbleBaseVolumeDriver(san.SanDriver):
  97. """OpenStack driver to enable Nimble Controller.
  98. Version history:
  99. .. code-block:: none
  100. 1.0 - Initial driver
  101. 1.1.1 - Updated VERSION to Nimble driver version
  102. 1.1.2 - Update snap-quota to unlimited
  103. 2.0.0 - Added Extra Spec Capability
  104. Correct capacity reporting
  105. Added Manage/Unmanage volume support
  106. 2.0.1 - Added multi-initiator support through extra-specs
  107. 2.0.2 - Fixed supporting extra specs while cloning vols
  108. 3.0.0 - Newton Support for Force Backup
  109. 3.1.0 - Fibre Channel Support
  110. 4.0.0 - Migrate from SOAP to REST API
  111. Add support for Group Scoped Target
  112. 4.0.1 - Add QoS and dedupe support
  113. """
  114. VERSION = DRIVER_VERSION
  115. # ThirdPartySystems wiki page
  116. CI_WIKI_NAME = "Nimble_Storage_CI"
  117. def __init__(self, *args, **kwargs):
  118. super(NimbleBaseVolumeDriver, self).__init__(*args, **kwargs)
  119. self.APIExecutor = None
  120. self.group_stats = {}
  121. self.api_protocol = None
  122. self._storage_protocol = None
  123. self._group_target_enabled = False
  124. self.configuration.append_config_values(nimble_opts)
  125. self.verify = False
  126. if self.configuration.nimble_verify_certificate is True:
  127. self.verify = self.configuration.nimble_verify_cert_path or True
  128. def _check_config(self):
  129. """Ensure that the flags we care about are set."""
  130. required_config = ['san_ip', 'san_login', 'san_password']
  131. for attr in required_config:
  132. if not getattr(self.configuration, attr, None):
  133. raise exception.InvalidInput(reason=_('%s is not set.') %
  134. attr)
  135. def create_volume(self, volume):
  136. """Create a new volume."""
  137. reserve = not self.configuration.san_thin_provision
  138. self.APIExecutor.create_vol(
  139. volume,
  140. self.configuration.nimble_pool_name, reserve,
  141. self._storage_protocol,
  142. self._group_target_enabled)
  143. return self._get_model_info(volume['name'])
  144. def is_volume_backup_clone(self, volume):
  145. """check if the volume is created through cinder-backup workflow.
  146. :param volume
  147. """
  148. vol_info = self.APIExecutor.get_vol_info(volume['name'])
  149. LOG.debug("is_clone: %(is_clone)s base_snap_id: %(snap)s, "
  150. "parent_vol_id: %(vol)s",
  151. {'is_clone': vol_info['clone'],
  152. 'snap': vol_info['base_snap_id'],
  153. 'vol': vol_info['parent_vol_id']})
  154. if vol_info['base_snap_id'] and (
  155. vol_info['parent_vol_id'] is not None):
  156. LOG.debug("Nimble base-snap exists for volume %(vol)s",
  157. {'vol': volume['name']})
  158. volume_name_prefix = volume['name'].replace(volume['id'], "")
  159. LOG.debug("volume_name_prefix : %(prefix)s",
  160. {'prefix': volume_name_prefix})
  161. snap_id = self.APIExecutor.get_snap_info_by_id(
  162. vol_info['base_snap_id'],
  163. vol_info['parent_vol_id'])
  164. snap_info = self.APIExecutor.get_snap_info_detail(snap_id['id'])
  165. LOG.debug("snap_info description %(snap_info)s",
  166. {'snap_info': snap_info['description']})
  167. if snap_info['description'] and BACKUP_VOL_PREFIX in (
  168. snap_info['description']):
  169. # TODO(rkumar): get parent vol id from parent volume name
  170. parent_vol_name = self.APIExecutor.get_volume_name(
  171. vol_info['parent_vol_id'])
  172. parent_vol_id = parent_vol_name. replace(
  173. volume_name_prefix, "")
  174. if BACKUP_VOL_PREFIX + parent_vol_id in snap_info[
  175. 'description']:
  176. LOG.info('Nimble backup-snapshot exists name=%('
  177. 'name)s', {'name': snap_info['name']})
  178. snap_vol_name = self.APIExecutor.get_volume_name(
  179. snap_info['vol_id'])
  180. LOG.debug("snap_vol_name %(snap)s",
  181. {'snap': snap_vol_name})
  182. return snap_info['name'], snap_vol_name
  183. return "", ""
  184. def delete_volume(self, volume):
  185. """Delete the specified volume."""
  186. backup_snap_name, backup_vol_name = self.is_volume_backup_clone(volume)
  187. eventlet.sleep(DEFAULT_SLEEP)
  188. self.APIExecutor.online_vol(volume['name'], False)
  189. LOG.debug("Deleting volume %(vol)s", {'vol': volume['name']})
  190. try:
  191. self.APIExecutor.delete_vol(volume['name'])
  192. except NimbleAPIException as ex:
  193. LOG.debug("delete volume exception: %s", ex)
  194. if SM_OBJ_HAS_CLONE in six.text_type(ex):
  195. LOG.warning('Volume %(vol)s : %(state)s',
  196. {'vol': volume['name'],
  197. 'state': SM_OBJ_HAS_CLONE})
  198. # set the volume back to be online and raise busy exception
  199. self.APIExecutor.online_vol(volume['name'], True)
  200. raise exception.VolumeIsBusy(volume_name=volume['name'])
  201. raise
  202. # Nimble backend does not delete the snapshot from the parent volume
  203. # if there is a dependent clone. So the deletes need to be in reverse
  204. # order i.e.
  205. # 1. First delete the clone volume used for backup
  206. # 2. Delete the base snapshot used for clone from the parent volume.
  207. # This is only done for the force backup clone operation as it is
  208. # a temporary operation in which we are certain that the snapshot does
  209. # not need to be preserved after the backup is completed.
  210. if (backup_snap_name != "" and backup_vol_name != "") and (
  211. backup_snap_name is not None):
  212. LOG.debug("Delete volume backup vol: %(vol)s snap: %(snap)s",
  213. {'vol': backup_vol_name,
  214. 'snap': backup_snap_name})
  215. self.APIExecutor.online_snap(backup_vol_name,
  216. False,
  217. backup_snap_name)
  218. self.APIExecutor.delete_snap(backup_vol_name,
  219. backup_snap_name)
  220. def _generate_random_string(self, length):
  221. """Generates random_string."""
  222. char_set = string.ascii_lowercase
  223. return ''.join(random.sample(char_set, length))
  224. def _clone_volume_from_snapshot(self, volume, snapshot):
  225. """Clone volume from snapshot.
  226. Extend the volume if the size of the volume is more than the snapshot.
  227. """
  228. reserve = not self.configuration.san_thin_provision
  229. pool_name = self.configuration.nimble_pool_name
  230. self.APIExecutor.clone_vol(volume, snapshot, reserve,
  231. self._group_target_enabled,
  232. self._storage_protocol,
  233. pool_name)
  234. if(volume['size'] > snapshot['volume_size']):
  235. vol_size = volume['size'] * units.Ki
  236. reserve_size = 100 if reserve else 0
  237. data = {"data": {'size': vol_size,
  238. 'reserve': reserve_size,
  239. 'warn_level': int(WARN_LEVEL),
  240. 'limit': 100,
  241. 'snap_limit': DEFAULT_SNAP_QUOTA}}
  242. LOG.debug("Edit Vol request %(data)s", {'data': data})
  243. self.APIExecutor.edit_vol(volume['name'], data)
  244. return self._get_model_info(volume['name'])
  245. def create_cloned_volume(self, volume, src_vref):
  246. """Create a clone of the specified volume."""
  247. snapshot_name = ('openstack-clone-' +
  248. volume['name'] + '-' +
  249. self._generate_random_string(12))
  250. snapshot = {'volume_name': src_vref['name'],
  251. 'name': snapshot_name,
  252. 'volume_size': src_vref['size'],
  253. 'display_name': volume.display_name,
  254. 'display_description': ''}
  255. self.APIExecutor.snap_vol(snapshot)
  256. self._clone_volume_from_snapshot(volume, snapshot)
  257. return self._get_model_info(volume['name'])
  258. def create_export(self, context, volume, connector):
  259. """Driver entry point to get the export info for a new volume."""
  260. return self._get_model_info(volume['name'])
  261. def ensure_export(self, context, volume):
  262. """Driver entry point to get the export info for an existing volume."""
  263. return self._get_model_info(volume['name'])
  264. def create_snapshot(self, snapshot):
  265. """Create a snapshot."""
  266. self.APIExecutor.snap_vol(snapshot)
  267. def delete_snapshot(self, snapshot):
  268. """Delete a snapshot."""
  269. self.APIExecutor.online_snap(
  270. snapshot['volume_name'],
  271. False,
  272. snapshot['name'])
  273. self.APIExecutor.delete_snap(snapshot['volume_name'],
  274. snapshot['name'])
  275. def create_volume_from_snapshot(self, volume, snapshot):
  276. """Create a volume from a snapshot."""
  277. self._clone_volume_from_snapshot(volume, snapshot)
  278. return self._get_model_info(volume['name'])
  279. def _enable_group_scoped_target(self, group_info):
  280. if 'version_current' in group_info:
  281. current_version = group_info['version_current']
  282. major_minor = current_version.split(".")
  283. if len(major_minor) >= 3:
  284. major = major_minor[0]
  285. minor = major_minor[1]
  286. # TODO(rkumar): Fix the major version
  287. if int(major) >= 4 and int(minor) >= 0:
  288. # Enforce group scoped target
  289. if 'group_target_enabled' in group_info:
  290. if group_info['group_target_enabled'] is False:
  291. try:
  292. self.APIExecutor.enable_group_scoped_target()
  293. except Exception:
  294. raise NimbleAPIException(_("Unable to enable"
  295. " GST"))
  296. self._group_target_enabled = True
  297. LOG.info("Group Scoped Target enabled for "
  298. "group %(group)s: %(ip)s",
  299. {'group': group_info['name'],
  300. 'ip': self.configuration.san_ip})
  301. elif 'group_target_enabled' not in group_info:
  302. LOG.info("Group Scoped Target NOT "
  303. "present for group %(group)s: "
  304. "%(ip)s",
  305. {'group': group_info['name'],
  306. 'ip': self.configuration.san_ip})
  307. else:
  308. raise NimbleAPIException(_("Unable to get current software "
  309. "version for %s"),
  310. self.configuration.san_ip)
  311. def get_volume_stats(self, refresh=False):
  312. """Get volume stats. This is more of getting group stats."""
  313. if refresh:
  314. group_info = self.APIExecutor.get_group_info()
  315. if 'usage_valid' not in group_info:
  316. raise NimbleDriverException(_('SpaceInfo returned by'
  317. 'array is invalid'))
  318. total_capacity = (group_info['usable_capacity_bytes'] /
  319. float(units.Gi))
  320. used_space = ((group_info['compressed_vol_usage_bytes'] +
  321. group_info['compressed_snap_usage_bytes'] +
  322. group_info['unused_reserve_bytes']) /
  323. float(units.Gi))
  324. free_space = total_capacity - used_space
  325. LOG.debug('total_capacity=%(capacity)f '
  326. 'used_space=%(used)f free_space=%(free)f',
  327. {'capacity': total_capacity,
  328. 'used': used_space,
  329. 'free': free_space})
  330. backend_name = self.configuration.safe_get(
  331. 'volume_backend_name') or self.__class__.__name__
  332. self.group_stats = {'volume_backend_name': backend_name,
  333. 'vendor_name': 'Nimble',
  334. 'driver_version': DRIVER_VERSION,
  335. 'storage_protocol': self._storage_protocol}
  336. # Just use a single pool for now, FIXME to support multiple
  337. # pools
  338. single_pool = dict(
  339. pool_name=backend_name,
  340. total_capacity_gb=total_capacity,
  341. free_capacity_gb=free_space,
  342. reserved_percentage=0,
  343. QoS_support=False)
  344. self.group_stats['pools'] = [single_pool]
  345. return self.group_stats
  346. def extend_volume(self, volume, new_size):
  347. """Extend an existing volume."""
  348. volume_name = volume['name']
  349. LOG.info('Entering extend_volume volume=%(vol)s '
  350. 'new_size=%(size)s',
  351. {'vol': volume_name, 'size': new_size})
  352. vol_size = int(new_size) * units.Ki
  353. reserve = not self.configuration.san_thin_provision
  354. reserve_size = 100 if reserve else 0
  355. LOG.debug("new volume size in MB (size)s", {'size': vol_size})
  356. data = {"data": {'size': vol_size,
  357. 'reserve': reserve_size,
  358. 'warn_level': int(WARN_LEVEL),
  359. 'limit': 100,
  360. 'snap_limit': DEFAULT_SNAP_QUOTA}}
  361. self.APIExecutor.edit_vol(volume_name, data)
  362. def _get_existing_volume_ref_name(self, existing_ref):
  363. """Returns the volume name of an existing ref"""
  364. vol_name = None
  365. if 'source-name' in existing_ref:
  366. vol_name = existing_ref['source-name']
  367. else:
  368. reason = _("Reference must contain source-name.")
  369. raise exception.ManageExistingInvalidReference(
  370. existing_ref=existing_ref,
  371. reason=reason)
  372. return vol_name
  373. def _get_volumetype_extraspecs_with_type(self, type_id):
  374. specs = {}
  375. if type_id is not None:
  376. specs = volume_types.get_volume_type_extra_specs(type_id)
  377. return specs
  378. def retype(self, context, volume, new_type, diff, host):
  379. """Retype from one volume type to another.
  380. At this point HPE Nimble Storage does not differentiate between
  381. volume types on the same array. This is a no-op for us if there are
  382. no extra specs else honor the extra-specs.
  383. """
  384. if new_type is None:
  385. return True, None
  386. LOG.debug("retype called with volume_type %s", new_type)
  387. volume_type_id = new_type['id']
  388. if volume_type_id is None:
  389. raise NimbleAPIException(_("No volume_type_id present in"
  390. " %(type)s") % {'type': new_type})
  391. LOG.debug("volume_type id is %s", volume_type_id)
  392. specs_map = self._get_volumetype_extraspecs_with_type(
  393. volume_type_id)
  394. if specs_map is None:
  395. # no extra specs to retype
  396. LOG.debug("volume_type %s has no extra specs", volume_type_id)
  397. return True, None
  398. vol_info = self.APIExecutor.get_vol_info(volume['name'])
  399. LOG.debug("new extra specs %s", specs_map)
  400. data = self.APIExecutor.get_valid_nimble_extraspecs(specs_map,
  401. vol_info)
  402. if data is None:
  403. # return if there is no update
  404. LOG.debug("no data to update for %s", new_type)
  405. return True, None
  406. try:
  407. # offline the volume before edit
  408. self.APIExecutor.online_vol(volume['name'], False)
  409. # modify the volume
  410. LOG.debug("updated volume %s", data)
  411. self.APIExecutor.edit_vol(volume['name'], data)
  412. # make the volume online after changing the specs
  413. self.APIExecutor.online_vol(volume['name'], True)
  414. except NimbleAPIException as ex:
  415. raise NimbleAPIException(_("Unable to retype %(vol)s to "
  416. "%(type)s: %(err)s") %
  417. {'vol': volume['name'],
  418. 'type': new_type,
  419. 'err': ex.message})
  420. return True, None
  421. def manage_existing(self, volume, external_ref):
  422. """Manage an existing nimble volume (import to cinder)"""
  423. # Get the volume name from the external reference
  424. target_vol_name = self._get_existing_volume_ref_name(external_ref)
  425. LOG.debug('Entering manage_existing. '
  426. 'Target_volume_name =%s', target_vol_name)
  427. # Get vol info from the volume name obtained from the reference
  428. vol_info = self.APIExecutor.get_vol_info(target_vol_name)
  429. # Check if volume is already managed by OpenStack
  430. if vol_info['agent_type'] == AGENT_TYPE_OPENSTACK or (
  431. vol_info['agent_type'] == AGENT_TYPE_OPENSTACK_GST):
  432. raise exception.ManageExistingAlreadyManaged(
  433. volume_ref=volume['id'])
  434. # If agent-type is not None then raise exception
  435. if vol_info['agent_type'] != AGENT_TYPE_NONE:
  436. msg = (_('Volume should have agent-type set as None.'))
  437. raise exception.InvalidVolume(reason=msg)
  438. new_vol_name = volume['name']
  439. LOG.info("Volume status before managing it : %(status)s",
  440. {'status': vol_info['online']})
  441. if vol_info['online'] is True:
  442. msg = (_('Volume %s is online. Set volume to offline for '
  443. 'managing using OpenStack.') % target_vol_name)
  444. raise exception.InvalidVolume(reason=msg)
  445. # edit the volume
  446. data = {'data': {'name': new_vol_name}}
  447. if self._group_target_enabled is True:
  448. # check if any ACL's are attached to this volume
  449. if 'access_control_records' in vol_info and (
  450. vol_info['access_control_records'] is not None):
  451. msg = (_('Volume %s has ACL associated with it. Remove ACL '
  452. 'for managing using OpenStack') % target_vol_name)
  453. raise exception.InvalidVolume(reason=msg)
  454. data['data']['agent_type'] = AGENT_TYPE_OPENSTACK_GST
  455. else:
  456. data['data']['agent_type'] = AGENT_TYPE_OPENSTACK
  457. LOG.debug("Data for edit %(data)s", {'data': data})
  458. self.APIExecutor.edit_vol(target_vol_name, data)
  459. # make the volume online after rename
  460. self.APIExecutor.online_vol(new_vol_name, True)
  461. return self._get_model_info(new_vol_name)
  462. def manage_existing_get_size(self, volume, external_ref):
  463. """Return size of an existing volume"""
  464. LOG.debug('Volume name : %(name)s External ref : %(ref)s',
  465. {'name': volume['name'], 'ref': external_ref})
  466. target_vol_name = self._get_existing_volume_ref_name(external_ref)
  467. # get vol info
  468. vol_info = self.APIExecutor.get_vol_info(target_vol_name)
  469. LOG.debug('Volume size : %(size)s Volume-name : %(name)s',
  470. {'size': vol_info['size'], 'name': vol_info['name']})
  471. return int(vol_info['size'] / units.Ki)
  472. def unmanage(self, volume):
  473. """Removes the specified volume from Cinder management."""
  474. vol_name = volume['name']
  475. LOG.debug("Entering unmanage_volume volume =%s", vol_name)
  476. # check agent type
  477. vol_info = self.APIExecutor.get_vol_info(vol_name)
  478. if vol_info['agent_type'] != AGENT_TYPE_OPENSTACK and (
  479. vol_info['agent_type'] != AGENT_TYPE_OPENSTACK_GST):
  480. msg = (_('Only volumes managed by OpenStack can be unmanaged.'))
  481. raise exception.InvalidVolume(reason=msg)
  482. data = {'data': {'agent_type': AGENT_TYPE_NONE}}
  483. # update the agent-type to None
  484. self.APIExecutor.edit_vol(vol_name, data)
  485. # offline the volume
  486. self.APIExecutor.online_vol(vol_name, False)
  487. def do_setup(self, context):
  488. """Setup the Nimble Cinder volume driver."""
  489. self._check_config()
  490. # Setup API Executor
  491. try:
  492. self.APIExecutor = NimbleRestAPIExecutor(
  493. username=self.configuration.san_login,
  494. password=self.configuration.san_password,
  495. ip=self.configuration.san_ip,
  496. verify=self.verify)
  497. if self._storage_protocol == "iSCSI":
  498. group_info = self.APIExecutor.get_group_info()
  499. self._enable_group_scoped_target(group_info)
  500. except Exception:
  501. LOG.error('Failed to create REST client. '
  502. 'Check san_ip, username, password'
  503. ' and make sure the array version is compatible')
  504. raise
  505. self._update_existing_vols_agent_type(context)
  506. def _update_existing_vols_agent_type(self, context):
  507. backend_name = self.configuration.safe_get('volume_backend_name')
  508. all_vols = volume.VolumeList.get_all(
  509. context, None, None, None, None, {'status': 'available'})
  510. for vol in all_vols:
  511. if backend_name in vol.host:
  512. try:
  513. vol_info = self.APIExecutor.get_vol_info(vol.name)
  514. # update agent_type only if no ACL's are present
  515. if 'access_control_records' in vol_info and (
  516. vol_info['access_control_records'] is None):
  517. if self._group_target_enabled:
  518. LOG.debug("Updating %(vol)s to have agent_type :"
  519. "%(agent)s",
  520. {'vol': vol.name,
  521. 'agent': AGENT_TYPE_OPENSTACK_GST})
  522. # check if this is an upgrade case from
  523. # openstack to openstackv2
  524. if vol_info['agent_type'] == AGENT_TYPE_NONE:
  525. data = {'data': {'agent_type':
  526. AGENT_TYPE_OPENSTACK_GST}}
  527. self.APIExecutor.edit_vol(vol.name, data)
  528. elif vol_info['agent_type'] == (
  529. AGENT_TYPE_OPENSTACK):
  530. # 1. update the agent type to None
  531. data = {'data': {'agent_type':
  532. AGENT_TYPE_NONE}}
  533. self.APIExecutor.edit_vol(vol.name, data)
  534. # 2. update the agent type to openstack_gst
  535. data = {'data': {'agent_type':
  536. AGENT_TYPE_OPENSTACK_GST}}
  537. self.APIExecutor.edit_vol(vol.name, data)
  538. else:
  539. LOG.debug("Updating %(vol)s to have agent_type :"
  540. "%(agent)s",
  541. {'vol': vol.name,
  542. 'agent': AGENT_TYPE_OPENSTACK_GST})
  543. if vol_info['agent_type'] == AGENT_TYPE_NONE:
  544. data = {'data': {'agent_type':
  545. AGENT_TYPE_OPENSTACK}}
  546. self.APIExecutor.edit_vol(vol.name, data)
  547. elif vol_info['agent_type'] == (
  548. AGENT_TYPE_OPENSTACK_GST):
  549. # 1. update the agent type to None
  550. data = {'data': {'agent_type':
  551. AGENT_TYPE_NONE}}
  552. self.APIExecutor.edit_vol(vol.name, data)
  553. # 2. update the agent type to openstack
  554. data = {'data': {'agent_type':
  555. AGENT_TYPE_OPENSTACK}}
  556. self.APIExecutor.edit_vol(vol.name, data)
  557. except NimbleAPIException:
  558. # just log the error but don't fail driver initialization
  559. LOG.warning('Error updating agent-type for '
  560. 'volume %s.', vol.name)
  561. def _get_model_info(self, volume_name):
  562. """Get model info for the volume."""
  563. return (
  564. {'provider_location': self._get_provider_location(volume_name),
  565. 'provider_auth': None})
  566. @abc.abstractmethod
  567. def _get_provider_location(self, volume_name):
  568. """Volume info for iSCSI and FC"""
  569. pass
  570. def _create_igroup_for_initiator(self, initiator_name, wwpns):
  571. """Creates igroup for an initiator and returns the igroup name."""
  572. igrp_name = 'openstack-' + self._generate_random_string(12)
  573. LOG.info('Creating initiator group %(grp)s '
  574. 'with initiator %(iname)s',
  575. {'grp': igrp_name, 'iname': initiator_name})
  576. if self._storage_protocol == "iSCSI":
  577. self.APIExecutor.create_initiator_group(igrp_name)
  578. self.APIExecutor.add_initiator_to_igroup(igrp_name, initiator_name)
  579. elif self._storage_protocol == "FC":
  580. self.APIExecutor.create_initiator_group_fc(igrp_name)
  581. for wwpn in wwpns:
  582. self.APIExecutor.add_initiator_to_igroup_fc(igrp_name, wwpn)
  583. return igrp_name
  584. def _get_igroupname_for_initiator_fc(self, initiator_wwpns):
  585. initiator_groups = self.APIExecutor.get_initiator_grp_list()
  586. for initiator_group in initiator_groups:
  587. if 'fc_initiators' in initiator_group and initiator_group[
  588. 'fc_initiators'] is not None:
  589. wwpns_list = []
  590. for initiator in initiator_group['fc_initiators']:
  591. wwpn = str(initiator['wwpn']).replace(":", "")
  592. wwpns_list.append(wwpn)
  593. LOG.debug("initiator_wwpns=%(initiator)s "
  594. "wwpns_list_from_array=%(wwpns)s",
  595. {'initiator': initiator_wwpns,
  596. 'wwpns': wwpns_list})
  597. if set(initiator_wwpns) == set(wwpns_list):
  598. LOG.info('igroup %(grp)s found for '
  599. 'initiator %(wwpns_list)s',
  600. {'grp': initiator_group['name'],
  601. 'wwpns_list': wwpns_list})
  602. return initiator_group['name']
  603. LOG.info('No igroup found for initiators %s', initiator_wwpns)
  604. return ''
  605. def _get_igroupname_for_initiator(self, initiator_name):
  606. initiator_groups = self.APIExecutor.get_initiator_grp_list()
  607. for initiator_group in initiator_groups:
  608. if initiator_group['iscsi_initiators'] is not None:
  609. if (len(initiator_group['iscsi_initiators']) == 1 and
  610. initiator_group['iscsi_initiators'][0]['iqn'] ==
  611. initiator_name):
  612. LOG.info('igroup %(grp)s found for '
  613. 'initiator %(iname)s',
  614. {'grp': initiator_group['name'],
  615. 'iname': initiator_name})
  616. return initiator_group['name']
  617. LOG.info('No igroup found for initiator %s', initiator_name)
  618. return ''
  619. def get_lun_number(self, volume, initiator_group_name):
  620. vol_info = self.APIExecutor.get_vol_info(volume['name'])
  621. for acl in vol_info['access_control_records']:
  622. if (initiator_group_name == acl['initiator_group_name']):
  623. LOG.info("access_control_record =%(acl)s",
  624. {'acl': acl})
  625. lun = acl['lun']
  626. LOG.info("LUN : %(lun)s", {"lun": lun})
  627. return lun
  628. raise NimbleAPIException(_("Lun number not found for volume %(vol)s "
  629. "with initiator_group: %(igroup)s") %
  630. {'vol': volume['name'],
  631. 'igroup': initiator_group_name})
  632. @interface.volumedriver
  633. class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
  634. """OpenStack driver to enable Nimble ISCSI Controller."""
  635. def __init__(self, *args, **kwargs):
  636. super(NimbleISCSIDriver, self).__init__(*args, **kwargs)
  637. self._storage_protocol = "iSCSI"
  638. self._group_target_name = None
  639. def _set_gst_for_group(self):
  640. group_info = self.APIExecutor.get_group_info()
  641. if 'group_target_enabled' in group_info and (
  642. group_info['group_target_enabled']) is True and (
  643. 'group_target_name' in group_info) and (
  644. group_info['group_target_name'] is not None):
  645. self._group_target_name = group_info['group_target_name']
  646. def _get_gst_for_group(self):
  647. return self._group_target_name
  648. def initialize_connection(self, volume, connector):
  649. """Driver entry point to attach a volume to an instance."""
  650. LOG.info('Entering initialize_connection volume=%(vol)s'
  651. ' connector=%(conn)s location=%(loc)s',
  652. {'vol': volume,
  653. 'conn': connector,
  654. 'loc': volume['provider_location']})
  655. initiator_name = connector['initiator']
  656. initiator_group_name = self._get_igroupname_for_initiator(
  657. initiator_name)
  658. if not initiator_group_name:
  659. initiator_group_name = self._create_igroup_for_initiator(
  660. initiator_name, None)
  661. LOG.info('Initiator group name is %(grp)s for initiator '
  662. '%(iname)s',
  663. {'grp': initiator_group_name, 'iname': initiator_name})
  664. self.APIExecutor.add_acl(volume, initiator_group_name)
  665. properties = {"driver_volume_type": "iscsi",
  666. "data": {"target_discovered": False},
  667. }
  668. properties['data']['volume_id'] = volume['id'] # used by xen currently
  669. (iscsi_portal, iqn) = volume['provider_location'].split()
  670. if self._get_gst_for_group() is not None:
  671. lun_num = self.get_lun_number(volume, initiator_group_name)
  672. netconfig = self.APIExecutor.get_netconfig('active')
  673. target_portals = self._get_data_ips(netconfig)
  674. LOG.info("target portals %(portals)s", {'portals': target_portals})
  675. target_luns = [int(lun_num)] * len(target_portals)
  676. target_iqns = [iqn] * len(target_portals)
  677. LOG.debug("target iqns %(iqns)s target luns %(luns)s",
  678. {'iqns': target_iqns, 'luns': target_luns})
  679. if target_luns and target_iqns and target_portals:
  680. properties["data"]["target_luns"] = target_luns
  681. properties["data"]["target_iqns"] = target_iqns
  682. properties["data"]["target_portals"] = target_portals
  683. else:
  684. # handling volume scoped target
  685. lun_num = LUN_ID
  686. properties['data']['target_portal'] = iscsi_portal
  687. properties['data']['target_iqn'] = iqn
  688. properties['data']['target_lun'] = int(lun_num)
  689. return properties
  690. def terminate_connection(self, volume, connector, **kwargs):
  691. """Driver entry point to unattach a volume from an instance."""
  692. LOG.info('Entering terminate_connection volume=%(vol)s'
  693. ' connector=%(conn)s location=%(loc)s.',
  694. {'vol': volume['name'],
  695. 'conn': connector,
  696. 'loc': volume['provider_location']})
  697. if connector is None:
  698. LOG.warning("Removing ALL host connections for volume %s",
  699. volume)
  700. self.APIExecutor.remove_all_acls(volume)
  701. return
  702. initiator_name = connector['initiator']
  703. initiator_group_name = self._get_igroupname_for_initiator(
  704. initiator_name)
  705. if not initiator_group_name:
  706. raise NimbleDriverException(_('No initiator group found for '
  707. 'initiator %s') % initiator_name)
  708. self.APIExecutor.remove_acl(volume, initiator_group_name)
  709. eventlet.sleep(DEFAULT_SLEEP)
  710. def _get_provider_location(self, volume_name):
  711. """Get volume iqn for initiator access."""
  712. vol_info = self.APIExecutor.get_vol_info(volume_name)
  713. netconfig = self.APIExecutor.get_netconfig('active')
  714. self._set_gst_for_group()
  715. if self._get_gst_for_group() is not None:
  716. iqn = self._get_gst_for_group()
  717. else:
  718. iqn = vol_info['target_name']
  719. target_ipaddr = self._get_discovery_ip(netconfig)
  720. iscsi_portal = target_ipaddr + ':3260'
  721. provider_location = '%s %s' % (iscsi_portal, iqn)
  722. LOG.info('vol_name=%(name)s provider_location=%(loc)s',
  723. {'name': volume_name, 'loc': provider_location})
  724. return provider_location
  725. def _get_data_ips(self, netconfig):
  726. """Get data ips."""
  727. subnet_label = self.configuration.nimble_subnet_label
  728. LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
  729. {'netlabel': subnet_label, 'netconf': netconfig})
  730. ret_data_ips = []
  731. for subnet in netconfig['array_list'][0]['nic_list']:
  732. LOG.info('Exploring array subnet label %s', subnet[
  733. 'subnet_label'])
  734. if subnet['data_ip']:
  735. if subnet_label == '*':
  736. # if all subnets are mentioned then return all portals
  737. # else just return specific subnet
  738. LOG.info('Data ip %(data_ip)s is used '
  739. 'on data subnet %(net_label)s',
  740. {'data_ip': subnet['data_ip'],
  741. 'net_label': subnet['subnet_label']})
  742. ret_data_ips.append(str(subnet['data_ip']) + ':3260')
  743. elif subnet_label == subnet['subnet_label']:
  744. LOG.info('Data ip %(data_ip)s is used'
  745. ' on subnet %(net_label)s',
  746. {'data_ip': subnet['data_ip'],
  747. 'net_label': subnet['subnet_label']})
  748. data_ips_single_subnet = []
  749. data_ips_single_subnet.append(str(subnet['data_ip']) +
  750. ':3260')
  751. return data_ips_single_subnet
  752. if ret_data_ips:
  753. LOG.info('Data ips %s', ret_data_ips)
  754. return ret_data_ips
  755. else:
  756. raise NimbleDriverException(_('No suitable data ip found'))
  757. def _get_discovery_ip(self, netconfig):
  758. """Get discovery ip."""
  759. subnet_label = self.configuration.nimble_subnet_label
  760. LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
  761. {'netlabel': subnet_label, 'netconf': netconfig})
  762. ret_discovery_ip = ''
  763. for subnet in netconfig['subnet_list']:
  764. LOG.info('Exploring array subnet label %s', subnet['label'])
  765. if subnet_label == '*':
  766. # Use the first data subnet, save mgmt+data for later
  767. if subnet['type'] == SM_SUBNET_DATA:
  768. LOG.info('Discovery ip %(disc_ip)s is used '
  769. 'on data subnet %(net_label)s',
  770. {'disc_ip': subnet['discovery_ip'],
  771. 'net_label': subnet['label']})
  772. return subnet['discovery_ip']
  773. elif (subnet['type'] == SM_SUBNET_MGMT_PLUS_DATA):
  774. LOG.info('Discovery ip %(disc_ip)s is found'
  775. ' on mgmt+data subnet %(net_label)s',
  776. {'disc_ip': subnet['discovery_ip'],
  777. 'net_label': subnet['label']})
  778. ret_discovery_ip = subnet['discovery_ip']
  779. # If subnet is specified and found, use the subnet
  780. elif subnet_label == subnet['label']:
  781. LOG.info('Discovery ip %(disc_ip)s is used'
  782. ' on subnet %(net_label)s',
  783. {'disc_ip': subnet['discovery_ip'],
  784. 'net_label': subnet['label']})
  785. return subnet['discovery_ip']
  786. if ret_discovery_ip:
  787. LOG.info('Discovery ip %s is used on mgmt+data subnet',
  788. ret_discovery_ip)
  789. return ret_discovery_ip
  790. else:
  791. raise NimbleDriverException(_('No suitable discovery ip found'))
  792. @interface.volumedriver
  793. class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
  794. """OpenStack driver to enable Nimble FC Driver Controller."""
  795. def __init__(self, *args, **kwargs):
  796. super(NimbleFCDriver, self).__init__(*args, **kwargs)
  797. self._storage_protocol = "FC"
  798. self._lookup_service = fczm_utils.create_lookup_service()
  799. def _get_provider_location(self, volume_name):
  800. """Get array info wwn details."""
  801. netconfig = self.APIExecutor.get_netconfig('active')
  802. array_name = netconfig['group_leader_array']
  803. provider_location = '%s' % (array_name)
  804. LOG.info('vol_name=%(name)s provider_location=%(loc)s',
  805. {'name': volume_name, 'loc': provider_location})
  806. return provider_location
  807. def _build_initiator_target_map(self, target_wwns, connector):
  808. """Build the target_wwns and the initiator target map."""
  809. LOG.debug("_build_initiator_target_map for %(wwns)s",
  810. {'wwns': target_wwns})
  811. init_targ_map = {}
  812. if self._lookup_service:
  813. # use FC san lookup to determine which wwpns to use
  814. # for the new VLUN.
  815. dev_map = self._lookup_service.get_device_mapping_from_network(
  816. connector['wwpns'],
  817. target_wwns)
  818. map_fabric = dev_map
  819. LOG.info("dev_map =%(fabric)s", {'fabric': map_fabric})
  820. for fabric_name in dev_map:
  821. fabric = dev_map[fabric_name]
  822. for initiator in fabric['initiator_port_wwn_list']:
  823. if initiator not in init_targ_map:
  824. init_targ_map[initiator] = []
  825. init_targ_map[initiator] += fabric['target_port_wwn_list']
  826. init_targ_map[initiator] = list(set(
  827. init_targ_map[initiator]))
  828. else:
  829. init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns)
  830. return init_targ_map
  831. @fczm_utils.add_fc_zone
  832. def initialize_connection(self, volume, connector):
  833. """Driver entry point to attach a volume to an instance."""
  834. LOG.info('Entering initialize_connection volume=%(vol)s'
  835. ' connector=%(conn)s location=%(loc)s',
  836. {'vol': volume,
  837. 'conn': connector,
  838. 'loc': volume['provider_location']})
  839. wwpns = []
  840. initiator_name = connector['initiator']
  841. for wwpn in connector['wwpns']:
  842. wwpns.append(wwpn)
  843. initiator_group_name = self._get_igroupname_for_initiator_fc(wwpns)
  844. if not initiator_group_name:
  845. initiator_group_name = self._create_igroup_for_initiator(
  846. initiator_name, wwpns)
  847. LOG.info('Initiator group name is %(grp)s for initiator '
  848. '%(iname)s',
  849. {'grp': initiator_group_name, 'iname': initiator_name})
  850. self.APIExecutor.add_acl(volume, initiator_group_name)
  851. lun = self.get_lun_number(volume, initiator_group_name)
  852. init_targ_map = {}
  853. (array_name) = volume['provider_location'].split()
  854. target_wwns = self.get_wwpns_from_array(array_name)
  855. init_targ_map = self._build_initiator_target_map(target_wwns,
  856. connector)
  857. data = {'driver_volume_type': 'fibre_channel',
  858. 'data': {'target_lun': lun,
  859. 'target_discovered': True,
  860. 'target_wwn': target_wwns,
  861. 'initiator_target_map': init_targ_map}}
  862. LOG.info("Return FC data for zone addition: %(data)s.",
  863. {'data': data})
  864. return data
  865. @fczm_utils.remove_fc_zone
  866. def terminate_connection(self, volume, connector, **kwargs):
  867. """Driver entry point to unattach a volume from an instance."""
  868. LOG.info('Entering terminate_connection volume=%(vol)s'
  869. ' connector=%(conn)s location=%(loc)s.',
  870. {'vol': volume,
  871. 'conn': connector,
  872. 'loc': volume['provider_location']})
  873. wwpns = []
  874. if connector is None:
  875. LOG.warning("Removing ALL host connections for volume %s",
  876. volume)
  877. self.APIExecutor.remove_all_acls(volume)
  878. return
  879. initiator_name = connector['initiator']
  880. for wwpn in connector['wwpns']:
  881. wwpns.append(wwpn)
  882. (array_name) = volume['provider_location'].split()
  883. target_wwns = self.get_wwpns_from_array(array_name)
  884. init_targ_map = self._build_initiator_target_map(target_wwns,
  885. connector)
  886. initiator_group_name = self._get_igroupname_for_initiator_fc(wwpns)
  887. if not initiator_group_name:
  888. raise NimbleDriverException(
  889. _('No initiator group found for initiator %s') %
  890. initiator_name)
  891. LOG.debug("initiator_target_map %s", init_targ_map)
  892. self.APIExecutor.remove_acl(volume, initiator_group_name)
  893. eventlet.sleep(DEFAULT_SLEEP)
  894. # FIXME to check for other volumes attached to the host and then
  895. # return the data. Bug https://bugs.launchpad.net/cinder/+bug/1617472
  896. data = {'driver_volume_type': 'fibre_channel',
  897. 'data': {'target_wwn': target_wwns}}
  898. return data
  899. def get_wwpns_from_array(self, array_name):
  900. """Retrieve the wwpns from the array"""
  901. LOG.debug("get_wwpns_from_array %s", array_name)
  902. target_wwpns = []
  903. interface_info = self.APIExecutor.get_fc_interface_list(array_name)
  904. LOG.info("interface_info %(interface_info)s",
  905. {"interface_info": interface_info})
  906. for wwpn_list in interface_info:
  907. wwpn = wwpn_list['wwpn']
  908. wwpn = wwpn.replace(":", "")
  909. target_wwpns.append(wwpn)
  910. return target_wwpns
  911. def _convert_string_to_colon_separated_wwnn(self, wwnn):
  912. return ':'.join(a + b for a, b in zip(wwnn[::2], wwnn[1::2]))
  913. def _connection_checker(func):
  914. """Decorator to re-establish and re-run the api if session has expired."""
  915. @functools.wraps(func)
  916. def inner_connection_checker(self, *args, **kwargs):
  917. for attempts in range(2):
  918. try:
  919. return func(self, *args, **kwargs)
  920. except Exception as e:
  921. if attempts < 1 and (re.search("Failed to execute",
  922. six.text_type(e))):
  923. LOG.info('Session might have expired.'
  924. ' Trying to relogin')
  925. self.login()
  926. continue
  927. else:
  928. LOG.error('Re-throwing Exception %s', e)
  929. raise
  930. return inner_connection_checker
  931. class NimbleRestAPIExecutor(object):
  932. """Makes Nimble REST API calls."""
  933. def __init__(self, api_version=NimbleDefaultVersion, *args, **kwargs):
  934. self.token_id = None
  935. self.ip = kwargs['ip']
  936. self.username = kwargs['username']
  937. self.password = kwargs['password']
  938. self.verify = kwargs['verify']
  939. self.api_version = api_version
  940. self.uri = "https://%(ip)s:5392/v%(version)s/" % {
  941. 'ip': self.ip,
  942. 'version': self.api_version}
  943. self.login()
  944. def login(self):
  945. data = {'data': {"username": self.username,
  946. "password": self.password,
  947. "app_name": "NimbleCinderDriver"}}
  948. r = requests.post(self.uri + "tokens",
  949. data=json.dumps(data),
  950. verify=self.verify)
  951. if r.status_code != 201 and r.status_code != 200:
  952. msg = _("Failed to login for user %s"), self.username
  953. raise NimbleAPIException(msg)
  954. self.token_id = r.json()['data']['session_token']
  955. self.headers = {'X-Auth-Token': self.token_id}
  956. def get_group_id(self):
  957. api = 'groups'
  958. r = self.get(api)
  959. if not r.json()['data']:
  960. raise NimbleAPIException(_("Unable to retrieve Group Object for : "
  961. "%s") % self.ip)
  962. return r.json()['data'][0]['id']
  963. def get_group_info(self):
  964. group_id = self.get_group_id()
  965. api = 'groups/' + six.text_type(group_id)
  966. r = self.get(api)
  967. if not r.json()['data']:
  968. raise NimbleAPIException(_("Unable to retrieve Group info for: %s")
  969. % group_id)
  970. return r.json()['data']
  971. def get_folder_id(self, folder_name):
  972. api = 'folders'
  973. filter = {"name": folder_name}
  974. r = self.get_query(api, filter)
  975. if not r.json()['data']:
  976. raise NimbleAPIException(_("Unable to retrieve information for "
  977. "Folder: %s") % folder_name)
  978. return r.json()['data'][0]['id']
  979. def get_folder_info(self, folder_name):
  980. folder_id = self.get_folder_id(folder_name)
  981. api = "folders/" + six.text_type(folder_id)
  982. r = self.get(api)
  983. if not r.json()['data']:
  984. raise NimbleAPIException(_("Unable to retrieve Folder info for: "
  985. "%s") % folder_id)
  986. return r.json()['data']
  987. def get_performance_policy_id(self, perf_policy_name):
  988. api = 'performance_policies/'
  989. filter = {'name': perf_policy_name}
  990. LOG.debug("Performance policy Name %s", perf_policy_name)
  991. r = self.get_query(api, filter)
  992. if not r.json()['data']:
  993. raise NimbleAPIException(_("No performance policy found for:"
  994. "%(perf)s") % {'perf': perf_policy_name})
  995. LOG.debug("Performance policy ID :%(perf)s",
  996. {'perf': r.json()['data'][0]['id']})
  997. return r.json()['data'][0]['id']
  998. def get_netconfig(self, role):
  999. api = "network_configs/detail"
  1000. filter = {'role': role}
  1001. r = self.get_query(api, filter)
  1002. if not r.json()['data']:
  1003. raise NimbleAPIException(_("No %s network config exists") % role)
  1004. return r.json()['data'][0]
  1005. def _get_volumetype_extraspecs(self, volume):
  1006. specs = {}
  1007. type_id = volume['volume_type_id']
  1008. if type_id is not None:
  1009. specs = volume_types.get_volume_type_extra_specs(type_id)
  1010. return specs
  1011. def _get_extra_spec_values(self, extra_specs):
  1012. """Nimble specific extra specs."""
  1013. perf_policy_name = extra_specs.get(EXTRA_SPEC_PERF_POLICY,
  1014. DEFAULT_PERF_POLICY_SETTING)
  1015. encryption = extra_specs.get(EXTRA_SPEC_ENCRYPTION,
  1016. DEFAULT_ENCRYPTION_SETTING)
  1017. multi_initiator = extra_specs.get(EXTRA_SPEC_MULTI_INITIATOR,
  1018. DEFAULT_MULTI_INITIATOR_SETTING)
  1019. iops_limit = extra_specs.get(EXTRA_SPEC_IOPS_LIMIT,
  1020. DEFAULT_IOPS_LIMIT_SETTING)
  1021. folder_name = extra_specs.get(EXTRA_SPEC_FOLDER,
  1022. DEFAULT_FOLDER_SETTING)
  1023. dedupe = extra_specs.get(EXTRA_SPEC_DEDUPE,
  1024. DEFAULT_DEDUPE_SETTING)
  1025. extra_specs_map = {}
  1026. extra_specs_map[EXTRA_SPEC_PERF_POLICY] = perf_policy_name
  1027. extra_specs_map[EXTRA_SPEC_ENCRYPTION] = encryption
  1028. extra_specs_map[EXTRA_SPEC_MULTI_INITIATOR] = multi_initiator
  1029. extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] = iops_limit
  1030. extra_specs_map[EXTRA_SPEC_DEDUPE] = dedupe
  1031. extra_specs_map[EXTRA_SPEC_FOLDER] = folder_name
  1032. return extra_specs_map
  1033. def get_valid_nimble_extraspecs(self, extra_specs_map, vol_info):
  1034. extra_specs_map_updated = self._get_extra_spec_values(extra_specs_map)
  1035. data = {"data": {}}
  1036. perf_policy_name = extra_specs_map_updated[EXTRA_SPEC_PERF_POLICY]
  1037. perf_policy_id = self.get_performance_policy_id(perf_policy_name)
  1038. data['perfpolicy_id'] = perf_policy_id
  1039. encrypt = extra_specs_map_updated[EXTRA_SPEC_ENCRYPTION]
  1040. cipher = DEFAULT_CIPHER
  1041. if encrypt.lower() == 'yes':
  1042. cipher = AES_256_XTS_CIPHER
  1043. data['cipher'] = cipher
  1044. multi_initiator = extra_specs_map_updated[EXTRA_SPEC_MULTI_INITIATOR]
  1045. data['multi_initiator'] = multi_initiator
  1046. folder_name = extra_specs_map_updated[EXTRA_SPEC_FOLDER]
  1047. folder_id = None
  1048. pool_id = vol_info['pool_id']
  1049. pool_name = vol_info['pool_name']
  1050. if folder_name is not None:
  1051. # validate if folder exists in pool_name
  1052. pool_info = self.get_pool_info(pool_id)
  1053. if 'folder_list' in pool_info and (pool_info['folder_list'] is
  1054. not None):
  1055. for folder_list in pool_info['folder_list']:
  1056. LOG.debug("folder_list : %s", folder_list)
  1057. if folder_list['fqn'] == "/" + folder_name:
  1058. LOG.debug("Folder %(folder)s present in pool "
  1059. "%(pool)s",
  1060. {'folder': folder_name,
  1061. 'pool': pool_name})
  1062. folder_id = self.get_folder_id(folder_name)
  1063. if folder_id is not None:
  1064. data['data']["folder_id"] = folder_id
  1065. if folder_id is None:
  1066. raise NimbleAPIException(_("Folder '%(folder)s' not "
  1067. "present in pool '%("
  1068. "pool)s'") %
  1069. {'folder': folder_name,
  1070. 'pool': pool_name})
  1071. else:
  1072. raise NimbleAPIException(_(
  1073. "Folder '%(folder)s' not present in pool '%(pool)s'")
  1074. % {'folder': folder_name,
  1075. 'pool': pool_name})
  1076. iops_limit = extra_specs_map_updated[EXTRA_SPEC_IOPS_LIMIT]
  1077. if iops_limit is not None:
  1078. if not iops_limit.isdigit() or (
  1079. int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS):
  1080. raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]")
  1081. % {'err': IOPS_ERR_MSG,
  1082. 'min': MIN_IOPS,
  1083. 'max': MAX_IOPS})
  1084. data['data']['limit_iops'] = iops_limit
  1085. dedupe = extra_specs_map_updated[EXTRA_SPEC_DEDUPE]
  1086. if dedupe.lower() == 'true':
  1087. data['data']['dedupe_enabled'] = True
  1088. return data
  1089. def create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled):
  1090. response = self._execute_create_vol(volume, pool_name, reserve,
  1091. protocol, is_gst_enabled)
  1092. LOG.info('Successfully created volume %(name)s',
  1093. {'name': response['name']})
  1094. return response['name']
  1095. def _is_ascii(self, value):
  1096. try:
  1097. return all(ord(c) < 128 for c in value)
  1098. except TypeError:
  1099. return False
  1100. def _execute_create_vol(self, volume, pool_name, reserve, protocol,
  1101. is_gst_enabled):
  1102. """Create volume
  1103. :return: r['data']
  1104. """
  1105. # Set volume size, display name and description
  1106. volume_size = volume['size'] * units.Ki
  1107. reserve_size = 100 if reserve else 0
  1108. # Set volume description
  1109. display_name = getattr(volume, 'display_name', '')
  1110. display_description = getattr(volume, 'display_description', '')
  1111. if self._is_ascii(display_name) and self._is_ascii(
  1112. display_description):
  1113. display_list = [getattr(volume, 'display_name', ''),
  1114. getattr(volume, 'display_description', '')]
  1115. description = ':'.join(filter(None, display_list))
  1116. elif self._is_ascii(display_name):
  1117. description = display_name
  1118. elif self._is_ascii(display_description):
  1119. description = display_description
  1120. else:
  1121. description = ""
  1122. # Limit description size to 254 characters
  1123. description = description[:254]
  1124. pool_id = self.get_pool_id(pool_name)
  1125. specs = self._get_volumetype_extraspecs(volume)
  1126. extra_specs_map = self._get_extra_spec_values(specs)
  1127. perf_policy_name = extra_specs_map[EXTRA_SPEC_PERF_POLICY]
  1128. perf_policy_id = self.get_performance_policy_id(perf_policy_name)
  1129. encrypt = extra_specs_map[EXTRA_SPEC_ENCRYPTION]
  1130. multi_initiator = extra_specs_map[EXTRA_SPEC_MULTI_INITIATOR]
  1131. folder_name = extra_specs_map[EXTRA_SPEC_FOLDER]
  1132. iops_limit = extra_specs_map[EXTRA_SPEC_IOPS_LIMIT]
  1133. dedupe = extra_specs_map[EXTRA_SPEC_DEDUPE]
  1134. cipher = DEFAULT_CIPHER
  1135. if encrypt.lower() == 'yes':
  1136. cipher = AES_256_XTS_CIPHER
  1137. if is_gst_enabled is True:
  1138. agent_type = AGENT_TYPE_OPENSTACK_GST
  1139. else:
  1140. agent_type = AGENT_TYPE_OPENSTACK
  1141. LOG.debug('Creating a new volume=%(vol)s size=%(size)s'
  1142. ' reserve=%(reserve)s in pool=%(pool)s'
  1143. ' description=%(description)s with Extra Specs'
  1144. ' perfpol-name=%(perfpol-name)s'
  1145. ' encryption=%(encryption)s cipher=%(cipher)s'
  1146. ' agent-type=%(agent-type)s'
  1147. ' multi-initiator=%(multi-initiator)s',
  1148. {'vol': volume['name'],
  1149. 'size': volume_size,
  1150. 'reserve': reserve_size,
  1151. 'pool': pool_name,
  1152. 'description': description,
  1153. 'perfpol-name': perf_policy_name,
  1154. 'encryption': encrypt,
  1155. 'cipher': cipher,
  1156. 'agent-type': agent_type,
  1157. 'multi-initiator': multi_initiator})
  1158. data = {"data":
  1159. {'name': volume['name'],
  1160. 'description': description,
  1161. 'size': volume_size,
  1162. 'reserve': reserve_size,
  1163. 'warn_level': int(WARN_LEVEL),
  1164. 'limit': 100,
  1165. 'snap_limit': DEFAULT_SNAP_QUOTA,
  1166. 'online': True,
  1167. 'pool_id': pool_id,
  1168. 'agent_type': agent_type,
  1169. 'perfpolicy_id': perf_policy_id,
  1170. 'encryption_cipher': cipher}}
  1171. if protocol == "iSCSI":
  1172. data['data']['multi_initiator'] = multi_initiator
  1173. if dedupe.lower() == 'true':
  1174. data['data']['dedupe_enabled'] = True
  1175. folder_id = None
  1176. if folder_name is not None:
  1177. # validate if folder exists in pool_name
  1178. pool_info = self.get_pool_info(pool_id)
  1179. if 'folder_list' in pool_info and (pool_info['folder_list'] is
  1180. not None):
  1181. for folder_list in pool_info['folder_list']:
  1182. LOG.debug("folder_list : %s", folder_list)
  1183. if folder_list['fqn'] == "/" + folder_name:
  1184. LOG.debug("Folder %(folder)s present in pool "
  1185. "%(pool)s",
  1186. {'folder': folder_name,
  1187. 'pool': pool_name})
  1188. folder_id = self.get_folder_id(folder_name)
  1189. if folder_id is not None:
  1190. data['data']["folder_id"] = folder_id
  1191. if folder_id is None:
  1192. raise NimbleAPIException(_("Folder '%(folder)s' not "
  1193. "present in pool '%(pool)s'") %
  1194. {'folder': folder_name,
  1195. 'pool': pool_name})
  1196. else:
  1197. raise NimbleAPIException(_("Folder '%(folder)s' not present in"
  1198. " pool '%(pool)s'") %
  1199. {'folder': folder_name,
  1200. 'pool': pool_name})
  1201. if iops_limit is not None:
  1202. if not iops_limit.isdigit() or (
  1203. int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS):
  1204. raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") %
  1205. {'err': IOPS_ERR_MSG,
  1206. 'min': MIN_IOPS,
  1207. 'max': MAX_IOPS})
  1208. data['data']['limit_iops'] = iops_limit
  1209. LOG.debug("Volume metadata :%s", volume.metadata)
  1210. for key, value in volume.metadata.items():
  1211. LOG.debug("Key %(key)s Value %(value)s",
  1212. {'key': key, 'value': value})
  1213. if key == EXTRA_SPEC_IOPS_LIMIT and value.isdigit():
  1214. if type(value) == int or int(value) < MIN_IOPS or (
  1215. int(value) > MAX_IOPS):
  1216. raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") %
  1217. {'err': IOPS_ERR_MSG,
  1218. 'min': MIN_IOPS,
  1219. 'max': MAX_IOPS})
  1220. LOG.debug("IOPS Limit %s", value)
  1221. data['data']['limit_iops'] = value
  1222. LOG.debug("Data : %s", data)
  1223. api = 'volumes'
  1224. r = self.post(api, data)
  1225. return r['data']
  1226. def create_initiator_group(self, initiator_grp_name):
  1227. api = "initiator_groups"
  1228. data = {"data": {"name": initiator_grp_name,
  1229. "access_protocol": "iscsi",
  1230. }}
  1231. r = self.post(api, data)
  1232. return r['data']
  1233. def create_initiator_group_fc(self, initiator_grp_name):
  1234. api = "initiator_groups"
  1235. data = {}
  1236. data["data"] = {}
  1237. data["data"]["name"] = initiator_grp_name
  1238. data["data"]["access_protocol"] = "fc"
  1239. r = self.post(api, data)
  1240. return r['data']
  1241. def get_initiator_grp_id(self, initiator_grp_name):
  1242. api = "initiator_groups"
  1243. filter = {'name': initiator_grp_name}
  1244. r = self.get_query(api, filter)
  1245. return r.json()['data'][0]['id']
  1246. def add_initiator_to_igroup(self, initiator_grp_name, initiator_name):
  1247. initiator_group_id = self.get_initiator_grp_id(initiator_grp_name)
  1248. api = "initiators"
  1249. data = {"data": {
  1250. "access_protocol": "iscsi",
  1251. "initiator_group_id": initiator_group_id,
  1252. "label": initiator_name,
  1253. "iqn": initiator_name
  1254. }}
  1255. r = self.post(api, data)
  1256. return r['data']
  1257. def add_initiator_to_igroup_fc(self, initiator_grp_name, wwpn):
  1258. initiator_group_id = self.get_initiator_grp_id(initiator_grp_name)
  1259. api = "initiators"
  1260. data = {"data": {
  1261. "access_protocol": "fc",
  1262. "initiator_group_id": initiator_group_id,
  1263. "wwpn": self._format_to_wwpn(wwpn)
  1264. }}
  1265. r = self.post(api, data)
  1266. return r['data']
  1267. def get_pool_id(self, pool_name):
  1268. api = "pools/"
  1269. filter = {'name': pool_name}
  1270. r = self.get_query(api, filter)
  1271. if not r.json()['data']:
  1272. raise NimbleAPIException(_("Unable to retrieve information for "
  1273. "pool : %(pool)s") %
  1274. {'pool': pool_name})
  1275. return r.json()['data'][0]['id']
  1276. def get_pool_info(self, pool_id):
  1277. api = 'pools/' + six.text_type(pool_id)
  1278. r = self.get(api)
  1279. return r.json()['data']
  1280. def get_initiator_grp_list(self):
  1281. api = "initiator_groups/detail"
  1282. r = self.get(api)
  1283. if 'data' not in r.json():
  1284. raise NimbleAPIException(_("Unable to retrieve initiator group "
  1285. "list"))
  1286. LOG.info('Successfully retrieved InitiatorGrpList')
  1287. return r.json()['data']
  1288. def get_initiator_grp_id_by_name(self, initiator_group_name):
  1289. api = 'initiator_groups'
  1290. filter = {"name": initiator_group_name}
  1291. r = self.get_query(api, filter)
  1292. if not r.json()['data']:
  1293. raise NimbleAPIException(_("Unable to retrieve information for"
  1294. "initiator group : %s") %
  1295. initiator_group_name)
  1296. return r.json()['data'][0]['id']
  1297. def get_volume_id_by_name(self, name):
  1298. api = "volumes"
  1299. filter = {"name": name}
  1300. r = self.get_query(api, filter)
  1301. if not r.json()['data']:
  1302. raise NimbleAPIException(_("Unable to retrieve information for "
  1303. "volume: %s") % name)
  1304. return r.json()['data'][0]['id']
  1305. def get_volume_name(self, volume_id):
  1306. api = "volumes/" + six.text_type(volume_id)
  1307. r = self.get(api)
  1308. if not r.json()['data']:
  1309. raise NimbleAPIException(_("Unable to retrieve information for "
  1310. "volume: %s") % volume_id)
  1311. return r.json()['data']['name']
  1312. def add_acl(self, volume, initiator_group_name):
  1313. initiator_group_id = self.get_initiator_grp_id_by_name(
  1314. initiator_group_name)
  1315. volume_id = self.get_volume_id_by_name(volume['name'])
  1316. data = {'data': {"apply_to": 'both',
  1317. "initiator_group_id": initiator_group_id,
  1318. "vol_id": volume_id
  1319. }}
  1320. api = 'access_control_records'
  1321. try:
  1322. self.post(api, data)
  1323. except NimbleAPIException as ex:
  1324. LOG.debug("add_acl_exception: %s", ex)
  1325. if SM_OBJ_EXIST_MSG in six.text_type(ex):
  1326. LOG.warning('Volume %(vol)s : %(state)s',
  1327. {'vol': volume['name'],
  1328. 'state': SM_OBJ_EXIST_MSG})
  1329. else:
  1330. msg = (_("Add access control failed with error: %s") %
  1331. six.text_type(ex))
  1332. raise NimbleAPIException(msg)
  1333. def get_acl_record(self, volume_id, initiator_group_id):
  1334. filter = {"vol_id": volume_id,
  1335. "initiator_group_id": initiator_group_id}
  1336. api = "access_control_records"
  1337. r = self.get_query(api, filter)
  1338. if not r.json()['data']:
  1339. raise NimbleAPIException(_("Unable to retrieve ACL for volume: "
  1340. "%(vol)s %(igroup)s ") %
  1341. {'vol': volume_id,
  1342. 'igroup': initiator_group_id})
  1343. return r.json()['data'][0]
  1344. def get_volume_acl_records(self, volume_id):
  1345. api = "volumes/" + six.text_type(volume_id)
  1346. r = self.get(api)
  1347. if not r.json()['data']:
  1348. raise NimbleAPIException(_("Unable to retrieve information for "
  1349. "volume: %s") % volume_id)
  1350. return r.json()['data']['access_control_records']
  1351. def remove_all_acls(self, volume):
  1352. LOG.info("removing all access control list from volume=%(vol)s",
  1353. {"vol": volume['name']})
  1354. volume_id = self.get_volume_id_by_name(volume['name'])
  1355. acl_records = self.get_volume_acl_records(volume_id)
  1356. if acl_records is not None:
  1357. for acl_record in acl_records:
  1358. LOG.info("removing acl=%(acl)s with igroup=%(igroup)s",
  1359. {"acl": acl_record['id'],
  1360. "igroup": acl_record['initiator_group_name']})
  1361. self.remove_acl(volume, acl_record['initiator_group_name'])
  1362. def remove_acl(self, volume, initiator_group_name):
  1363. LOG.info("removing ACL from volume=%(vol)s"
  1364. "and %(igroup)s",
  1365. {"vol": volume['name'],
  1366. "igroup": initiator_group_name})
  1367. initiator_group_id = self.get_initiator_grp_id_by_name(
  1368. initiator_group_name)
  1369. volume_id = self.get_volume_id_by_name(volume['name'])
  1370. try:
  1371. acl_record = self.get_acl_record(volume_id, initiator_group_id)
  1372. LOG.debug("ACL Record %(acl)s", {"acl": acl_record})
  1373. acl_id = acl_record['id']
  1374. api = 'access_control_records/' + six.text_type(acl_id)
  1375. self.delete(api)
  1376. except NimbleAPIException as ex:
  1377. LOG.debug("remove_acl_exception: %s", ex)
  1378. if SM_OBJ_ENOENT_MSG in six.text_type(ex):
  1379. LOG.warning('Volume %(vol)s : %(state)s',
  1380. {'vol': volume['name'],
  1381. 'state': SM_OBJ_ENOENT_MSG})
  1382. else:
  1383. msg = (_("Remove access control failed with error: %s") %
  1384. six.text_type(ex))
  1385. raise NimbleAPIException(msg)
  1386. def get_snap_info_by_id(self, snap_id, vol_id):
  1387. filter = {"id": snap_id, "vol_id": vol_id}
  1388. api = 'snapshots'
  1389. r = self.get_query(api, filter)
  1390. if not r.json()['data']:
  1391. raise NimbleAPIException(_("Unable to retrieve snapshot info for "
  1392. "snap_id: %(snap)s volume id: %(vol)s")
  1393. % {'snap': snap_id,
  1394. 'vol': vol_id})
  1395. LOG.debug("SnapInfo :%s", r.json()['data'][0])
  1396. return r.json()['data'][0]
  1397. def get_snap_info(self, snap_name, vol_name):
  1398. filter = {"name": snap_name, "vol_name": vol_name}
  1399. api = 'snapshots'
  1400. r = self.get_query(api, filter)
  1401. if not r.json()['data']:
  1402. raise NimbleAPIException(_("Snapshot: %(snap)s of Volume: %(vol)s "
  1403. "doesn't exist") %
  1404. {'snap': snap_name,
  1405. 'vol': vol_name})
  1406. return r.json()['data'][0]
  1407. def get_snap_info_detail(self, snap_id):
  1408. api = 'snapshots/detail'
  1409. filter = {'id': snap_id}
  1410. r = self.get_query(api, filter)
  1411. if not r.json()['data']:
  1412. raise NimbleAPIException(_("Snapshot: %s doesn't exist") % snap_id)
  1413. return r.json()['data'][0]
  1414. @utils.retry(NimbleAPIException, 2, 3)
  1415. def online_vol(self, volume_name, online_flag):
  1416. volume_id = self.get_volume_id_by_name(volume_name)
  1417. LOG.debug("volume_id %s", six.text_type(volume_id))
  1418. eventlet.sleep(DEFAULT_SLEEP)
  1419. api = "volumes/" + six.text_type(volume_id)
  1420. data = {'data': {"online": online_flag, 'force': True}}
  1421. try:
  1422. LOG.debug("data :%s", data)
  1423. self.put(api, data)
  1424. LOG.debug("Volume %(vol)s is in requested online state :%(flag)s",
  1425. {'vol': volume_name,
  1426. 'flag': online_flag})
  1427. except Exception as ex:
  1428. msg = (_("Error %s") % ex)
  1429. LOG.debug("online_vol_exception: %s", msg)
  1430. if msg.__contains__("Object is %s" % SM_STATE_MSG):
  1431. LOG.warning('Volume %(vol)s : %(state)s',
  1432. {'vol': volume_name,
  1433. 'state': SM_STATE_MSG})
  1434. # TODO(rkumar): Check if we need to ignore the connected
  1435. # initiator
  1436. elif msg.__contains__("Initiators are connected to"):
  1437. raise NimbleAPIException(msg)
  1438. else:
  1439. raise exception.InvalidVolume(reason=msg)
  1440. def online_snap(self, volume_name, online_flag, snap_name):
  1441. snap_info = self.get_snap_info(snap_name, volume_name)
  1442. api = "snapshots/" + six.text_type(snap_info['id'])
  1443. data = {'data': {"online": online_flag}}
  1444. try:
  1445. self.put(api, data)
  1446. LOG.debug("Snapshot %(snap)s is in requested online state "
  1447. ":%(flag)s",
  1448. {'snap': snap_name, 'flag': online_flag})
  1449. except Exception as ex:
  1450. LOG.debug("online_snap_exception: %s", ex)
  1451. if six.text_type(ex).__contains__("Object %s" % SM_STATE_MSG):
  1452. LOG.warning('Snapshot %(snap)s :%(state)s',
  1453. {'snap': snap_name,
  1454. 'state': SM_STATE_MSG})
  1455. else:
  1456. raise
  1457. @utils.retry(NimbleAPIException, 2, 3)
  1458. def get_vol_info(self, volume_name):
  1459. volume_id = self.get_volume_id_by_name(volume_name)
  1460. api = 'volumes/' + six.text_type(volume_id)
  1461. r = self.get(api)
  1462. if not r.json()['data']:
  1463. raise exception.VolumeNotFound(_("Volume: %s not found") %
  1464. volume_name)
  1465. return r.json()['data']
  1466. def delete_vol(self, volume_name):
  1467. volume_id = self.get_volume_id_by_name(volume_name)
  1468. api = "volumes/" + six.text_type(volume_id)
  1469. self.delete(api)
  1470. def snap_vol(self, snapshot):
  1471. api = "snapshots"
  1472. volume_name = snapshot['volume_name']
  1473. vol_id = self.get_volume_id_by_name(volume_name)
  1474. snap_name = snapshot['name']
  1475. # Set snapshot description
  1476. display_list = [
  1477. getattr(snapshot, 'display_name', snapshot['display_name']),
  1478. getattr(snapshot, 'display_description', '')]
  1479. snap_description = ':'.join(filter(None, display_list))
  1480. # Limit to 254 characters
  1481. snap_description = snap_description[:254]
  1482. data = {"data": {"name": snap_name,
  1483. "description": snap_description,
  1484. "vol_id": vol_id
  1485. }
  1486. }
  1487. r = self.post(api, data)
  1488. return r['data']
  1489. def clone_vol(self, volume, snapshot, reserve, is_gst_enabled,
  1490. protocol, pool_name):
  1491. api = "volumes"
  1492. volume_name = snapshot['volume_name']
  1493. snap_name = snapshot['name']
  1494. snap_info = self.get_snap_info(snap_name, volume_name)
  1495. clone_name = volume['name']
  1496. snap_size = snapshot['volume_size']
  1497. reserve_size = 100 if reserve else 0
  1498. specs = self._get_volumetype_extraspecs(volume)
  1499. extra_specs_map = self._get_extra_spec_values(specs)
  1500. perf_policy_name = extra_specs_map.get(EXTRA_SPEC_PERF_POLICY)
  1501. perf_policy_id = self.get_performance_policy_id(perf_policy_name)
  1502. encrypt = extra_specs_map.get(EXTRA_SPEC_ENCRYPTION)
  1503. multi_initiator = extra_specs_map.get(EXTRA_SPEC_MULTI_INITIATOR)
  1504. iops_limit = extra_specs_map[EXTRA_SPEC_IOPS_LIMIT]
  1505. folder_name = extra_specs_map[EXTRA_SPEC_FOLDER]
  1506. pool_id = self.get_pool_id(pool_name)
  1507. # default value of cipher for encryption
  1508. cipher = DEFAULT_CIPHER
  1509. if encrypt.lower() == 'yes':
  1510. cipher = AES_256_XTS_CIPHER
  1511. if is_gst_enabled is True:
  1512. agent_type = AGENT_TYPE_OPENSTACK_GST
  1513. else:
  1514. agent_type = AGENT_TYPE_OPENSTACK
  1515. LOG.info('Cloning volume from snapshot volume=%(vol)s '
  1516. 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s '
  1517. 'reserve=%(reserve)s' 'agent-type=%(agent-type)s '
  1518. 'perfpol-name=%(perfpol-name)s '
  1519. 'encryption=%(encryption)s cipher=%(cipher)s '
  1520. 'multi-initiator=%(multi-initiator)s',
  1521. {'vol': volume_name,
  1522. 'snap': snap_name,
  1523. 'clone': clone_name,
  1524. 'size': snap_size,
  1525. 'reserve': reserve_size,
  1526. 'agent-type': agent_type,
  1527. 'perfpol-name': perf_policy_name,
  1528. 'encryption': encrypt,
  1529. 'cipher': cipher,
  1530. 'multi-initiator': multi_initiator})
  1531. data = {"data": {"name": clone_name,
  1532. "clone": 'true',
  1533. "base_snap_id": snap_info['id'],
  1534. 'snap_limit': DEFAULT_SNAP_QUOTA,
  1535. 'warn_level': int(WARN_LEVEL),
  1536. 'limit': 100,
  1537. "online": 'true',
  1538. "reserve": reserve_size,
  1539. "agent_type": agent_type,
  1540. "perfpolicy_id": perf_policy_id,
  1541. "encryption_cipher": cipher
  1542. }
  1543. }
  1544. if protocol == "iSCSI":
  1545. data['data']['multi_initiator'] = multi_initiator
  1546. folder_id = None
  1547. if folder_name is not None:
  1548. # validate if folder exists in pool_name
  1549. pool_info = self.get_pool_info(pool_id)
  1550. if 'folder_list' in pool_info and (pool_info['folder_list'] is
  1551. not None):
  1552. for folder_list in pool_info['folder_list']:
  1553. LOG.debug("folder_list : %s", folder_list)
  1554. if folder_list['fqn'] == "/" + folder_name:
  1555. LOG.debug("Folder %(folder)s present in pool "
  1556. "%(pool)s",
  1557. {'folder': folder_name,
  1558. 'pool': pool_name})
  1559. folder_id = self.get_folder_id(folder_name)
  1560. if folder_id is not None:
  1561. data['data']["folder_id"] = folder_id
  1562. if folder_id is None:
  1563. raise NimbleAPIException(_("Folder '%(folder)s' not "
  1564. "present in pool '%(pool)s'") %
  1565. {'folder': folder_name,
  1566. 'pool': pool_name})
  1567. else:
  1568. raise NimbleAPIException(_("Folder '%(folder)s' not present in"
  1569. " pool '%(pool)s'") %
  1570. {'folder': folder_name,
  1571. 'pool': pool_name})
  1572. if iops_limit is not None:
  1573. if not iops_limit.isdigit() or (
  1574. int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS):
  1575. raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") %
  1576. {'err': IOPS_ERR_MSG,
  1577. 'min': MIN_IOPS,
  1578. 'max': MAX_IOPS})
  1579. data['data']['limit_iops'] = iops_limit
  1580. if iops_limit is not None:
  1581. if not iops_limit.isdigit() or (
  1582. int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS):
  1583. raise NimbleAPIException(_("Please set valid IOPS limit"
  1584. " in the range [%(min)s, %(max)s]") %
  1585. {'min': MIN_IOPS,
  1586. 'max': MAX_IOPS})
  1587. data['data']['limit_iops'] = iops_limit
  1588. LOG.debug("Volume metadata :%s", volume.metadata)
  1589. for key, value in volume.metadata.items():
  1590. LOG.debug("Key %(key)s Value %(value)s",
  1591. {'key': key, 'value': value})
  1592. if key == EXTRA_SPEC_IOPS_LIMIT and value.isdigit():
  1593. if type(value) == int or int(value) < MIN_IOPS or (
  1594. int(value) > MAX_IOPS):
  1595. raise NimbleAPIException(_("Please enter valid IOPS "
  1596. "limit in the range ["
  1597. "%(min)s, %(max)s]") %
  1598. {'min': MIN_IOPS,
  1599. 'max': MAX_IOPS})
  1600. LOG.debug("IOPS Limit %s", value)
  1601. data['data']['limit_iops'] = value
  1602. r = self.post(api, data)
  1603. return r['data']
  1604. def edit_vol(self, volume_name, data):
  1605. vol_id = self.get_volume_id_by_name(volume_name)
  1606. api = "volumes/" + six.text_type(vol_id)
  1607. self.put(api, data)
  1608. def delete_snap(self, volume_name, snap_name):
  1609. snap_info = self.get_snap_info(snap_name, volume_name)
  1610. api = "snapshots/" + six.text_type(snap_info['id'])
  1611. try:
  1612. self.delete(api)
  1613. except NimbleAPIException as ex:
  1614. LOG.debug("delete snapshot exception: %s", ex)
  1615. if SM_OBJ_HAS_CLONE in six.text_type(ex):
  1616. # if snap has a clone log the error and continue ahead
  1617. LOG.warning('Snapshot %(snap)s : %(state)s',
  1618. {'snap': snap_name,
  1619. 'state': SM_OBJ_HAS_CLONE})
  1620. else:
  1621. raise
  1622. @_connection_checker
  1623. def get(self, api):
  1624. return self.get_query(api, None)
  1625. @_connection_checker
  1626. def get_query(self, api, query):
  1627. url = self.uri + api
  1628. return requests.get(url, headers=self.headers,
  1629. params=query, verify=self.verify)
  1630. @_connection_checker
  1631. def put(self, api, payload):
  1632. url = self.uri + api
  1633. r = requests.put(url, data=json.dumps(payload),
  1634. headers=self.headers, verify=self.verify)
  1635. if r.status_code != 201 and r.status_code != 200:
  1636. base = "Failed to execute api %(api)s : Error Code :%(code)s" % {
  1637. 'api': api,
  1638. 'code': r.status_code}
  1639. LOG.debug("Base error : %(base)s", {'base': base})
  1640. try:
  1641. msg = _("%(base)s Message: %(msg)s") % {
  1642. 'base': base,
  1643. 'msg': r.json()['messages'][1]['text']}
  1644. except IndexError:
  1645. msg = _("%(base)s Message: %(msg)s") % {
  1646. 'base': base,
  1647. 'msg': six.text_type(r.json())}
  1648. raise NimbleAPIException(msg)
  1649. return r.json()
  1650. @_connection_checker
  1651. def post(self, api, payload):
  1652. url = self.uri + api
  1653. r = requests.post(url, data=json.dumps(payload),
  1654. headers=self.headers, verify=self.verify)
  1655. if r.status_code != 201 and r.status_code != 200:
  1656. msg = _("Failed to execute api %(api)s : %(msg)s : %(code)s") % {
  1657. 'api': api,
  1658. 'msg': r.json()['messages'][1]['text'],
  1659. 'code': r.status_code}
  1660. raise NimbleAPIException(msg)
  1661. return r.json()
  1662. @_connection_checker
  1663. def delete(self, api):
  1664. url = self.uri + api
  1665. r = requests.delete(url, headers=self.headers, verify=self.verify)
  1666. if r.status_code != 201 and r.status_code != 200:
  1667. base = "Failed to execute api %(api)s: Error Code: %(code)s" % {
  1668. 'api': api,
  1669. 'code': r.status_code}
  1670. LOG.debug("Base error : %(base)s", {'base': base})
  1671. try:
  1672. msg = _("%(base)s Message: %(msg)s") % {
  1673. 'base': base,
  1674. 'msg': r.json()['messages'][1]['text']}
  1675. except IndexError:
  1676. msg = _("%(base)s Message: %(msg)s") % {
  1677. 'base': base,
  1678. 'msg': six.text_type(r.json())}
  1679. raise NimbleAPIException(msg)
  1680. return r.json()
  1681. def _format_to_wwpn(self, string_wwpn):
  1682. return ':'.join(a + b for a, b in zip(* [iter(string_wwpn)] * 2))
  1683. def get_fc_interface_list(self, array_name):
  1684. """getFibreChannelInterfaceList API to get FC interfaces on array."""
  1685. api = 'fibre_channel_interfaces/detail'
  1686. filter = {'array_name_or_serial': array_name}
  1687. r = self.get_query(api, filter)
  1688. if not r.json()['data']:
  1689. raise NimbleAPIException(_("No fc interfaces for array %s") %
  1690. array_name)
  1691. return r.json()['data']
  1692. def enable_group_scoped_target(self):
  1693. group_id = self.get_group_id()
  1694. api = "groups/" + six.text_type(group_id)
  1695. data = {'data': {'group_target_enabled': True}}
  1696. self.put(api, data)