OpenStack Block Storage (Cinder)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

2934 lines
122 KiB

  1. # Copyright 2010 United States Government as represented by the
  2. # Administrator of the National Aeronautics and Space Administration.
  3. # All Rights Reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  6. # not use this file except in compliance with the License. You may obtain
  7. # a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  13. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  14. # License for the specific language governing permissions and limitations
  15. # under the License.
  16. """Drivers for volumes."""
  17. import abc
  18. import time
  19. from os_brick import exception as brick_exception
  20. from oslo_concurrency import processutils
  21. from oslo_config import cfg
  22. from oslo_config import types
  23. from oslo_log import log as logging
  24. from oslo_utils import excutils
  25. import six
  26. from cinder import exception
  27. from cinder.i18n import _
  28. from cinder.image import image_utils
  29. from cinder import objects
  30. from cinder.objects import fields
  31. from cinder import utils
  32. from cinder.volume import configuration
  33. from cinder.volume import driver_utils
  34. from cinder.volume import rpcapi as volume_rpcapi
  35. from cinder.volume import throttling
  36. LOG = logging.getLogger(__name__)
  37. volume_opts = [
  38. cfg.IntOpt('num_shell_tries',
  39. default=3,
  40. help='Number of times to attempt to run flakey shell commands'),
  41. cfg.IntOpt('reserved_percentage',
  42. default=0,
  43. min=0, max=100,
  44. help='The percentage of backend capacity is reserved'),
  45. cfg.StrOpt('target_prefix',
  46. default='iqn.2010-10.org.openstack:',
  47. help='Prefix for iSCSI volumes'),
  48. cfg.StrOpt('target_ip_address',
  49. default='$my_ip',
  50. help='The IP address that the iSCSI daemon is listening on'),
  51. cfg.ListOpt('iscsi_secondary_ip_addresses',
  52. default=[],
  53. help='The list of secondary IP addresses of the iSCSI daemon'),
  54. cfg.PortOpt('target_port',
  55. default=3260,
  56. help='The port that the iSCSI daemon is listening on'),
  57. cfg.IntOpt('num_volume_device_scan_tries',
  58. default=3,
  59. help='The maximum number of times to rescan targets'
  60. ' to find volume'),
  61. cfg.StrOpt('volume_backend_name',
  62. help='The backend name for a given driver implementation'),
  63. cfg.StrOpt('volume_clear',
  64. default='zero',
  65. choices=['none', 'zero'],
  66. help='Method used to wipe old volumes'),
  67. cfg.IntOpt('volume_clear_size',
  68. default=0,
  69. max=1024,
  70. help='Size in MiB to wipe at start of old volumes. 1024 MiB '
  71. 'at max. 0 => all'),
  72. cfg.StrOpt('volume_clear_ionice',
  73. help='The flag to pass to ionice to alter the i/o priority '
  74. 'of the process used to zero a volume after deletion, '
  75. 'for example "-c3" for idle only priority.'),
  76. cfg.StrOpt('target_helper',
  77. default='tgtadm',
  78. choices=['tgtadm', 'lioadm', 'scstadmin', 'iscsictl',
  79. 'ietadm', 'nvmet', 'spdk-nvmeof', 'fake'],
  80. help='Target user-land tool to use. tgtadm is default, '
  81. 'use lioadm for LIO iSCSI support, scstadmin for SCST '
  82. 'target support, ietadm for iSCSI Enterprise Target, '
  83. 'iscsictl for Chelsio iSCSI Target, nvmet for NVMEoF '
  84. 'support, spdk-nvmeof for SPDK NVMe-oF, '
  85. 'or fake for testing.'),
  86. cfg.StrOpt('volumes_dir',
  87. default='$state_path/volumes',
  88. help='Volume configuration file storage '
  89. 'directory'),
  90. cfg.StrOpt('iet_conf',
  91. default='/etc/iet/ietd.conf',
  92. help='IET configuration file'),
  93. cfg.StrOpt('chiscsi_conf',
  94. default='/etc/chelsio-iscsi/chiscsi.conf',
  95. help='Chiscsi (CXT) global defaults configuration file'),
  96. cfg.StrOpt('iscsi_iotype',
  97. default='fileio',
  98. choices=['blockio', 'fileio', 'auto'],
  99. help=('Sets the behavior of the iSCSI target '
  100. 'to either perform blockio or fileio '
  101. 'optionally, auto can be set and Cinder '
  102. 'will autodetect type of backing device')),
  103. cfg.StrOpt('volume_dd_blocksize',
  104. default='1M',
  105. help='The default block size used when copying/clearing '
  106. 'volumes'),
  107. cfg.StrOpt('volume_copy_blkio_cgroup_name',
  108. default='cinder-volume-copy',
  109. help='The blkio cgroup name to be used to limit bandwidth '
  110. 'of volume copy'),
  111. cfg.IntOpt('volume_copy_bps_limit',
  112. default=0,
  113. help='The upper limit of bandwidth of volume copy. '
  114. '0 => unlimited'),
  115. cfg.StrOpt('iscsi_write_cache',
  116. default='on',
  117. choices=['on', 'off'],
  118. help='Sets the behavior of the iSCSI target to either '
  119. 'perform write-back(on) or write-through(off). '
  120. 'This parameter is valid if target_helper is set '
  121. 'to tgtadm.'),
  122. cfg.StrOpt('iscsi_target_flags',
  123. default='',
  124. help='Sets the target-specific flags for the iSCSI target. '
  125. 'Only used for tgtadm to specify backing device flags '
  126. 'using bsoflags option. The specified string is passed '
  127. 'as is to the underlying tool.'),
  128. cfg.StrOpt('target_protocol',
  129. default='iscsi',
  130. choices=['iscsi', 'iser', 'nvmet_rdma'],
  131. help='Determines the target protocol for new volumes, '
  132. 'created with tgtadm, lioadm and nvmet target helpers. '
  133. 'In order to enable RDMA, this parameter should be set '
  134. 'with the value "iser". The supported iSCSI protocol '
  135. 'values are "iscsi" and "iser", in case of nvmet target '
  136. 'set to "nvmet_rdma".'),
  137. cfg.StrOpt('driver_client_cert_key',
  138. help='The path to the client certificate key for verification, '
  139. 'if the driver supports it.'),
  140. cfg.StrOpt('driver_client_cert',
  141. help='The path to the client certificate for verification, '
  142. 'if the driver supports it.'),
  143. cfg.BoolOpt('driver_use_ssl',
  144. default=False,
  145. help='Tell driver to use SSL for connection to backend '
  146. 'storage if the driver supports it.'),
  147. cfg.StrOpt('max_over_subscription_ratio',
  148. default='20.0',
  149. regex=r'^(auto|\d*\.\d+|\d+)$',
  150. help='Representation of the over subscription ratio '
  151. 'when thin provisioning is enabled. Default ratio is '
  152. '20.0, meaning provisioned capacity can be 20 times of '
  153. 'the total physical capacity. If the ratio is 10.5, it '
  154. 'means provisioned capacity can be 10.5 times of the '
  155. 'total physical capacity. A ratio of 1.0 means '
  156. 'provisioned capacity cannot exceed the total physical '
  157. 'capacity. If ratio is \'auto\', Cinder will '
  158. 'automatically calculate the ratio based on the '
  159. 'provisioned capacity and the used space. If not set to '
  160. 'auto, the ratio has to be a minimum of 1.0.'),
  161. cfg.BoolOpt('use_chap_auth',
  162. default=False,
  163. help='Option to enable/disable CHAP authentication for '
  164. 'targets.'),
  165. cfg.StrOpt('chap_username',
  166. default='',
  167. help='CHAP user name.'),
  168. cfg.StrOpt('chap_password',
  169. default='',
  170. help='Password for specified CHAP account name.',
  171. secret=True),
  172. cfg.StrOpt('driver_data_namespace',
  173. help='Namespace for driver private data values to be '
  174. 'saved in.'),
  175. cfg.StrOpt('filter_function',
  176. help='String representation for an equation that will be '
  177. 'used to filter hosts. Only used when the driver '
  178. 'filter is set to be used by the Cinder scheduler.'),
  179. cfg.StrOpt('goodness_function',
  180. help='String representation for an equation that will be '
  181. 'used to determine the goodness of a host. Only used '
  182. 'when using the goodness weigher is set to be used by '
  183. 'the Cinder scheduler.'),
  184. cfg.BoolOpt('driver_ssl_cert_verify',
  185. default=False,
  186. help='If set to True the http client will validate the SSL '
  187. 'certificate of the backend endpoint.'),
  188. cfg.StrOpt('driver_ssl_cert_path',
  189. help='Can be used to specify a non default path to a '
  190. 'CA_BUNDLE file or directory with certificates of '
  191. 'trusted CAs, which will be used to validate the backend'),
  192. cfg.ListOpt('trace_flags',
  193. help='List of options that control which trace info '
  194. 'is written to the DEBUG log level to assist '
  195. 'developers. Valid values are method and api.'),
  196. cfg.MultiOpt('replication_device',
  197. item_type=types.Dict(),
  198. secret=True,
  199. help="Multi opt of dictionaries to represent a replication "
  200. "target device. This option may be specified multiple "
  201. "times in a single config section to specify multiple "
  202. "replication target devices. Each entry takes the "
  203. "standard dict config form: replication_device = "
  204. "target_device_id:<required>,"
  205. "key1:value1,key2:value2..."),
  206. cfg.BoolOpt('report_discard_supported',
  207. default=False,
  208. help='Report to clients of Cinder that the backend supports '
  209. 'discard (aka. trim/unmap). This will not actually '
  210. 'change the behavior of the backend or the client '
  211. 'directly, it will only notify that it can be used.'),
  212. cfg.StrOpt('storage_protocol',
  213. ignore_case=True,
  214. default='iscsi',
  215. choices=['iscsi', 'fc'],
  216. help='Protocol for transferring data between host and '
  217. 'storage back-end.'),
  218. cfg.BoolOpt('enable_unsupported_driver',
  219. default=False,
  220. help="Set this to True when you want to allow an unsupported "
  221. "driver to start. Drivers that haven't maintained a "
  222. "working CI system and testing are marked as unsupported "
  223. "until CI is working again. This also marks a driver as "
  224. "deprecated and may be removed in the next release."),
  225. cfg.StrOpt('backend_availability_zone',
  226. default=None,
  227. help='Availability zone for this volume backend. If not set, '
  228. 'the storage_availability_zone option value is used as '
  229. 'the default for all backends.'),
  230. ]
  231. # for backward compatibility
  232. iser_opts = [
  233. cfg.IntOpt('num_iser_scan_tries',
  234. default=3,
  235. help='The maximum number of times to rescan iSER target '
  236. 'to find volume'),
  237. cfg.StrOpt('iser_target_prefix',
  238. default='iqn.2010-10.org.openstack:',
  239. help='Prefix for iSER volumes'),
  240. cfg.StrOpt('iser_ip_address',
  241. default='$my_ip',
  242. help='The IP address that the iSER daemon is listening on'),
  243. cfg.PortOpt('iser_port',
  244. default=3260,
  245. help='The port that the iSER daemon is listening on'),
  246. cfg.StrOpt('iser_helper',
  247. default='tgtadm',
  248. help='The name of the iSER target user-land tool to use'),
  249. ]
  250. nvmet_opts = [
  251. cfg.PortOpt('nvmet_port_id',
  252. default=1,
  253. help='The port that the NVMe target is listening on.'),
  254. cfg.IntOpt('nvmet_ns_id',
  255. default=10,
  256. help='The namespace id associated with the subsystem '
  257. 'that will be created with the path for the LVM volume.'),
  258. ]
  259. scst_opts = [
  260. cfg.StrOpt('scst_target_iqn_name',
  261. help='Certain ISCSI targets have predefined target names, '
  262. 'SCST target driver uses this name.'),
  263. cfg.StrOpt('scst_target_driver',
  264. default='iscsi',
  265. help='SCST target implementation can choose from multiple '
  266. 'SCST target drivers.'),
  267. ]
  268. backup_opts = [
  269. cfg.BoolOpt('backup_use_temp_snapshot',
  270. default=False,
  271. help='If this is set to True, a temporary snapshot will '
  272. 'be created for performing non-disruptive backups. '
  273. 'Otherwise a temporary volume will be cloned '
  274. 'in order to perform a backup.'),
  275. ]
  276. image_opts = [
  277. cfg.BoolOpt('image_upload_use_cinder_backend',
  278. default=False,
  279. help='If set to True, upload-to-image in raw format will '
  280. 'create a cloned volume and register its location to '
  281. 'the image service, instead of uploading the volume '
  282. 'content. The cinder backend and locations support '
  283. 'must be enabled in the image service.'),
  284. cfg.BoolOpt('image_upload_use_internal_tenant',
  285. default=False,
  286. help='If set to True, the image volume created by '
  287. 'upload-to-image will be placed in the internal tenant. '
  288. 'Otherwise, the image volume is created in the current '
  289. 'context\'s tenant.'),
  290. cfg.BoolOpt('image_volume_cache_enabled',
  291. default=False,
  292. help='Enable the image volume cache for this backend.'),
  293. cfg.IntOpt('image_volume_cache_max_size_gb',
  294. default=0,
  295. help='Max size of the image volume cache for this backend in '
  296. 'GB. 0 => unlimited.'),
  297. cfg.IntOpt('image_volume_cache_max_count',
  298. default=0,
  299. help='Max number of entries allowed in the image volume cache. '
  300. '0 => unlimited.'),
  301. cfg.BoolOpt('use_multipath_for_image_xfer',
  302. default=False,
  303. help='Do we attach/detach volumes in cinder using multipath '
  304. 'for volume to image and image to volume transfers?'),
  305. cfg.BoolOpt('enforce_multipath_for_image_xfer',
  306. default=False,
  307. help='If this is set to True, attachment of volumes for '
  308. 'image transfer will be aborted when multipathd is not '
  309. 'running. Otherwise, it will fallback to single path.'),
  310. ]
  311. CONF = cfg.CONF
  312. CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
  313. CONF.register_opts(iser_opts, group=configuration.SHARED_CONF_GROUP)
  314. CONF.register_opts(nvmet_opts, group=configuration.SHARED_CONF_GROUP)
  315. CONF.register_opts(scst_opts, group=configuration.SHARED_CONF_GROUP)
  316. CONF.register_opts(image_opts, group=configuration.SHARED_CONF_GROUP)
  317. CONF.register_opts(volume_opts)
  318. CONF.register_opts(iser_opts)
  319. CONF.register_opts(nvmet_opts)
  320. CONF.register_opts(scst_opts)
  321. CONF.register_opts(backup_opts)
  322. CONF.register_opts(image_opts)
  323. CONF.import_opt('backup_use_same_host', 'cinder.backup.api')
  324. @six.add_metaclass(abc.ABCMeta)
  325. class BaseVD(object):
  326. """Executes commands relating to Volumes.
  327. Base Driver for Cinder Volume Control Path,
  328. This includes supported/required implementation
  329. for API calls. Also provides *generic* implementation
  330. of core features like cloning, copy_image_to_volume etc,
  331. this way drivers that inherit from this base class and
  332. don't offer their own impl can fall back on a general
  333. solution here.
  334. Key thing to keep in mind with this driver is that it's
  335. intended that these drivers ONLY implement Control Path
  336. details (create, delete, extend...), while transport or
  337. data path related implementation should be a *member object*
  338. that we call a connector. The point here is that for example
  339. don't allow the LVM driver to implement iSCSI methods, instead
  340. call whatever connector it has configured via conf file
  341. (iSCSI{LIO, TGT, IET}, FC, etc).
  342. In the base class and for example the LVM driver we do this via a has-a
  343. relationship and just provide an interface to the specific connector
  344. methods. How you do this in your own driver is of course up to you.
  345. """
  346. VERSION = "N/A"
  347. # NOTE(geguileo): By default we assume drivers don't support Active-Active
  348. # configurations. If driver supports it then they can set this class
  349. # attribute on the driver, and if support depends on configuration options
  350. # then they can set it at the instance level on the driver's __init__
  351. # method since the manager will do the check after that.
  352. SUPPORTS_ACTIVE_ACTIVE = False
  353. # If a driver hasn't maintained their CI system, this will get
  354. # set to False, which prevents the driver from starting.
  355. # Add enable_unsupported_driver = True in cinder.conf to get
  356. # the unsupported driver started.
  357. SUPPORTED = True
  358. # Methods checked to detect a driver implements a replication feature
  359. REPLICATION_FEATURE_CHECKERS = {'v2.1': 'failover_host',
  360. 'a/a': 'failover_completed'}
  361. def __init__(self, execute=utils.execute, *args, **kwargs):
  362. # NOTE(vish): db is set by Manager
  363. self.db = kwargs.get('db')
  364. self.host = kwargs.get('host')
  365. self.cluster_name = kwargs.get('cluster_name')
  366. self.configuration = kwargs.get('configuration', None)
  367. if self.configuration:
  368. self.configuration.append_config_values(volume_opts)
  369. self.configuration.append_config_values(iser_opts)
  370. self.configuration.append_config_values(nvmet_opts)
  371. self.configuration.append_config_values(scst_opts)
  372. self.configuration.append_config_values(backup_opts)
  373. self.configuration.append_config_values(image_opts)
  374. utils.setup_tracing(self.configuration.safe_get('trace_flags'))
  375. # NOTE(geguileo): Don't allow to start if we are enabling
  376. # replication on a cluster service with a backend that doesn't
  377. # support the required mechanism for Active-Active.
  378. replication_devices = self.configuration.safe_get(
  379. 'replication_device')
  380. if (self.cluster_name and replication_devices and
  381. not self.supports_replication_feature('a/a')):
  382. raise exception.Invalid(_("Driver doesn't support clustered "
  383. "replication."))
  384. self.driver_utils = driver_utils.VolumeDriverUtils(
  385. self._driver_data_namespace(), self.db)
  386. self._execute = execute
  387. self._stats = {}
  388. self._throttle = None
  389. self.pools = []
  390. self.capabilities = {}
  391. # We set these mappings up in the base driver so they
  392. # can be used by children
  393. # (intended for LVM and BlockDevice, but others could use as well)
  394. self.target_mapping = {
  395. 'fake': 'cinder.volume.targets.fake.FakeTarget',
  396. 'ietadm': 'cinder.volume.targets.iet.IetAdm',
  397. 'lioadm': 'cinder.volume.targets.lio.LioAdm',
  398. 'tgtadm': 'cinder.volume.targets.tgt.TgtAdm',
  399. 'scstadmin': 'cinder.volume.targets.scst.SCSTAdm',
  400. 'iscsictl': 'cinder.volume.targets.cxt.CxtAdm',
  401. 'nvmet': 'cinder.volume.targets.nvmet.NVMET',
  402. 'spdk-nvmeof': 'cinder.volume.targets.spdknvmf.SpdkNvmf'}
  403. # set True by manager after successful check_for_setup
  404. self._initialized = False
  405. def _driver_data_namespace(self):
  406. namespace = self.__class__.__name__
  407. if self.configuration:
  408. namespace = self.configuration.safe_get('driver_data_namespace')
  409. if not namespace:
  410. namespace = self.configuration.safe_get('volume_backend_name')
  411. return namespace
  412. def _is_non_recoverable(self, err, non_recoverable_list):
  413. for item in non_recoverable_list:
  414. if item in err:
  415. return True
  416. return False
  417. def _try_execute(self, *command, **kwargs):
  418. # NOTE(vish): Volume commands can partially fail due to timing, but
  419. # running them a second time on failure will usually
  420. # recover nicely.
  421. non_recoverable = kwargs.pop('no_retry_list', [])
  422. tries = 0
  423. while True:
  424. try:
  425. self._execute(*command, **kwargs)
  426. return True
  427. except processutils.ProcessExecutionError as ex:
  428. tries = tries + 1
  429. if tries >= self.configuration.num_shell_tries or\
  430. self._is_non_recoverable(ex.stderr, non_recoverable):
  431. raise
  432. LOG.exception("Recovering from a failed execute. "
  433. "Try number %s", tries)
  434. time.sleep(tries ** 2)
  435. def _detach_volume(self, context, attach_info, volume, properties,
  436. force=False, remote=False, ignore_errors=False):
  437. """Disconnect the volume from the host.
  438. With the force parameter we can indicate if we give more importance to
  439. cleaning up as much as possible or if data integrity has higher
  440. priority. This requires the latests OS-Brick code that adds this
  441. feature.
  442. We can also force errors to be ignored using ignore_errors.
  443. """
  444. # Use Brick's code to do attach/detach
  445. exc = brick_exception.ExceptionChainer()
  446. if attach_info:
  447. connector = attach_info['connector']
  448. with exc.context(force, 'Disconnect failed'):
  449. connector.disconnect_volume(attach_info['conn']['data'],
  450. attach_info['device'], force=force,
  451. ignore_errors=ignore_errors)
  452. if remote:
  453. # Call remote manager's terminate_connection which includes
  454. # driver's terminate_connection and remove export
  455. rpcapi = volume_rpcapi.VolumeAPI()
  456. with exc.context(force, 'Remote terminate connection failed'):
  457. rpcapi.terminate_connection(context, volume, properties,
  458. force=force)
  459. else:
  460. # Call local driver's terminate_connection and remove export.
  461. # NOTE(avishay) This is copied from the manager's code - need to
  462. # clean this up in the future.
  463. with exc.context(force,
  464. _('Unable to terminate volume connection')):
  465. try:
  466. self.terminate_connection(volume, properties, force=force)
  467. except Exception as err:
  468. err_msg = (
  469. _('Unable to terminate volume connection: %(err)s')
  470. % {'err': err})
  471. LOG.error(err_msg)
  472. raise exception.VolumeBackendAPIException(data=err_msg)
  473. with exc.context(force, _('Unable to remove export')):
  474. try:
  475. LOG.debug("volume %s: removing export", volume['id'])
  476. self.remove_export(context, volume)
  477. except Exception as ex:
  478. LOG.exception("Error detaching volume %(volume)s, "
  479. "due to remove export failure.",
  480. {"volume": volume['id']})
  481. raise exception.RemoveExportException(volume=volume['id'],
  482. reason=ex)
  483. if exc and not ignore_errors:
  484. raise exc
  485. def set_initialized(self):
  486. self._initialized = True
  487. @property
  488. def initialized(self):
  489. return self._initialized
  490. @property
  491. def supported(self):
  492. return self.SUPPORTED
  493. def set_throttle(self):
  494. bps_limit = ((self.configuration and
  495. self.configuration.safe_get('volume_copy_bps_limit')) or
  496. CONF.volume_copy_bps_limit)
  497. cgroup_name = ((self.configuration and
  498. self.configuration.safe_get(
  499. 'volume_copy_blkio_cgroup_name')) or
  500. CONF.volume_copy_blkio_cgroup_name)
  501. self._throttle = None
  502. if bps_limit:
  503. try:
  504. self._throttle = throttling.BlkioCgroup(int(bps_limit),
  505. cgroup_name)
  506. except processutils.ProcessExecutionError as err:
  507. LOG.warning('Failed to activate volume copy throttling: '
  508. '%(err)s', {'err': err})
  509. throttling.Throttle.set_default(self._throttle)
  510. def get_version(self):
  511. """Get the current version of this driver."""
  512. return self.VERSION
  513. @abc.abstractmethod
  514. def check_for_setup_error(self):
  515. return
  516. @staticmethod
  517. def get_driver_options():
  518. """Return the oslo_config options specific to the driver."""
  519. return volume_opts
  520. @abc.abstractmethod
  521. def create_volume(self, volume):
  522. """Creates a volume.
  523. Can optionally return a Dictionary of changes to the volume object to
  524. be persisted.
  525. If volume_type extra specs includes
  526. 'capabilities:replication <is> True' the driver
  527. needs to create a volume replica (secondary), and setup replication
  528. between the newly created volume and the secondary volume.
  529. Returned dictionary should include:
  530. .. code-block:: python
  531. volume['replication_status'] = 'copying'
  532. volume['replication_extended_status'] = <driver specific value>
  533. volume['driver_data'] = <driver specific value>
  534. """
  535. return
  536. @abc.abstractmethod
  537. def delete_volume(self, volume):
  538. """Deletes a volume.
  539. If volume_type extra specs includes 'replication: <is> True'
  540. then the driver needs to delete the volume replica too.
  541. """
  542. return
  543. def secure_file_operations_enabled(self):
  544. """Determine if driver is running in Secure File Operations mode.
  545. The Cinder Volume driver needs to query if this driver is running
  546. in a secure file operations mode. By default, it is False: any driver
  547. that does support secure file operations should override this method.
  548. """
  549. return False
  550. def get_volume_stats(self, refresh=False):
  551. """Return the current state of the volume service.
  552. If 'refresh' is True, run the update first.
  553. For replication the following state should be reported:
  554. replication = True (None or false disables replication)
  555. """
  556. return
  557. def get_prefixed_property(self, property):
  558. """Return prefixed property name
  559. :returns: a prefixed property name string or None
  560. """
  561. if property and self.capabilities.get('vendor_prefix'):
  562. return self.capabilities.get('vendor_prefix') + ':' + property
  563. def _set_property(self, properties, entry, title, description,
  564. type, **kwargs):
  565. prop = dict(title=title, description=description, type=type)
  566. allowed_keys = ('enum', 'default', 'minimum', 'maximum')
  567. for key in kwargs:
  568. if key in allowed_keys:
  569. prop[key] = kwargs[key]
  570. properties[entry] = prop
  571. def _init_standard_capabilities(self):
  572. """Create a dictionary of Cinder standard capabilities.
  573. This method creates a dictionary of Cinder standard capabilities
  574. and returns the created dictionary.
  575. The keys of this dictionary don't contain prefix and separator(:).
  576. """
  577. properties = {}
  578. self._set_property(
  579. properties,
  580. "thin_provisioning",
  581. "Thin Provisioning",
  582. _("Sets thin provisioning."),
  583. "boolean")
  584. self._set_property(
  585. properties,
  586. "compression",
  587. "Compression",
  588. _("Enables compression."),
  589. "boolean")
  590. self._set_property(
  591. properties,
  592. "qos",
  593. "QoS",
  594. _("Enables QoS."),
  595. "boolean")
  596. self._set_property(
  597. properties,
  598. "replication_enabled",
  599. "Replication",
  600. _("Enables replication."),
  601. "boolean")
  602. return properties
  603. def _init_vendor_properties(self):
  604. """Create a dictionary of vendor unique properties.
  605. This method creates a dictionary of vendor unique properties
  606. and returns both created dictionary and vendor name.
  607. Returned vendor name is used to check for name of vendor
  608. unique properties.
  609. - Vendor name shouldn't include colon(:) because of the separator
  610. and it is automatically replaced by underscore(_).
  611. ex. abc:d -> abc_d
  612. - Vendor prefix is equal to vendor name.
  613. ex. abcd
  614. - Vendor unique properties must start with vendor prefix + ':'.
  615. ex. abcd:maxIOPS
  616. Each backend driver needs to override this method to expose
  617. its own properties using _set_property() like this:
  618. self._set_property(
  619. properties,
  620. "vendorPrefix:specific_property",
  621. "Title of property",
  622. _("Description of property"),
  623. "type")
  624. : return dictionary of vendor unique properties
  625. : return vendor name
  626. Example of implementation::
  627. properties = {}
  628. self._set_property(
  629. properties,
  630. "abcd:compression_type",
  631. "Compression type",
  632. _("Specifies compression type."),
  633. "string",
  634. enum=["lossy", "lossless", "special"])
  635. self._set_property(
  636. properties,
  637. "abcd:minIOPS",
  638. "Minimum IOPS QoS",
  639. _("Sets minimum IOPS if QoS is enabled."),
  640. "integer",
  641. minimum=10,
  642. default=100)
  643. return properties, 'abcd'
  644. """
  645. LOG.info("Driver hasn't implemented _init_vendor_properties()")
  646. return {}, None
  647. def init_capabilities(self):
  648. """Obtain backend volume stats and capabilities list.
  649. This stores a dictionary which is consisted of two parts.
  650. First part includes static backend capabilities which are
  651. obtained by get_volume_stats(). Second part is properties,
  652. which includes parameters correspond to extra specs.
  653. This properties part is consisted of cinder standard
  654. capabilities and vendor unique properties.
  655. Using this capabilities list, operator can manage/configure
  656. backend using key/value from capabilities without specific
  657. knowledge of backend.
  658. """
  659. # Set static backend capabilities from get_volume_stats()
  660. stats = self.get_volume_stats(True)
  661. if stats:
  662. self.capabilities = stats.copy()
  663. # Set cinder standard capabilities
  664. self.capabilities['properties'] = self._init_standard_capabilities()
  665. # Set Vendor unique properties
  666. vendor_prop, vendor_name = self._init_vendor_properties()
  667. if vendor_name and vendor_prop:
  668. updated_vendor_prop = {}
  669. old_name = None
  670. # Replace colon in vendor name to underscore.
  671. if ':' in vendor_name:
  672. old_name = vendor_name
  673. vendor_name = vendor_name.replace(':', '_')
  674. LOG.warning('The colon in vendor name was replaced '
  675. 'by underscore. Updated vendor name is '
  676. '%(name)s".', {'name': vendor_name})
  677. for key in vendor_prop:
  678. # If key has colon in vendor name field, we replace it to
  679. # underscore.
  680. # ex. abc:d:storagetype:provisioning
  681. # -> abc_d:storagetype:provisioning
  682. if old_name and key.startswith(old_name + ':'):
  683. new_key = key.replace(old_name, vendor_name, 1)
  684. updated_vendor_prop[new_key] = vendor_prop[key]
  685. continue
  686. if not key.startswith(vendor_name + ':'):
  687. LOG.warning('Vendor unique property "%(property)s" '
  688. 'must start with vendor prefix with colon '
  689. '"%(prefix)s". The property was '
  690. 'not registered on capabilities list.',
  691. {'prefix': vendor_name + ':',
  692. 'property': key})
  693. continue
  694. updated_vendor_prop[key] = vendor_prop[key]
  695. # Update vendor unique properties to the dictionary
  696. self.capabilities['vendor_prefix'] = vendor_name
  697. self.capabilities['properties'].update(updated_vendor_prop)
  698. LOG.debug("Initialized capabilities list: %s.", self.capabilities)
  699. def _update_pools_and_stats(self, data):
  700. """Updates data for pools and volume stats based on provided data."""
  701. # provisioned_capacity_gb is set to None by default below, but
  702. # None won't be used in calculation. It will be overridden by
  703. # driver's provisioned_capacity_gb if reported, otherwise it
  704. # defaults to allocated_capacity_gb in host_manager.py.
  705. if self.pools:
  706. for pool in self.pools:
  707. new_pool = {}
  708. new_pool.update(dict(
  709. pool_name=pool,
  710. total_capacity_gb=0,
  711. free_capacity_gb=0,
  712. provisioned_capacity_gb=None,
  713. reserved_percentage=100,
  714. QoS_support=False,
  715. filter_function=self.get_filter_function(),
  716. goodness_function=self.get_goodness_function()
  717. ))
  718. data["pools"].append(new_pool)
  719. else:
  720. # No pool configured, the whole backend will be treated as a pool
  721. single_pool = {}
  722. single_pool.update(dict(
  723. pool_name=data["volume_backend_name"],
  724. total_capacity_gb=0,
  725. free_capacity_gb=0,
  726. provisioned_capacity_gb=None,
  727. reserved_percentage=100,
  728. QoS_support=False,
  729. filter_function=self.get_filter_function(),
  730. goodness_function=self.get_goodness_function()
  731. ))
  732. data["pools"].append(single_pool)
  733. self._stats = data
  734. def copy_image_to_volume(self, context, volume, image_service, image_id):
  735. """Fetch image from image_service and write to unencrypted volume.
  736. This does not attach an encryptor layer when connecting to the volume.
  737. """
  738. self._copy_image_data_to_volume(
  739. context, volume, image_service, image_id, encrypted=False)
  740. def copy_image_to_encrypted_volume(
  741. self, context, volume, image_service, image_id):
  742. """Fetch image from image_service and write to encrypted volume.
  743. This attaches the encryptor layer when connecting to the volume.
  744. """
  745. self._copy_image_data_to_volume(
  746. context, volume, image_service, image_id, encrypted=True)
  747. def _copy_image_data_to_volume(self, context, volume, image_service,
  748. image_id, encrypted=False):
  749. """Fetch the image from image_service and write it to the volume."""
  750. LOG.debug('copy_image_to_volume %s.', volume['name'])
  751. use_multipath = self.configuration.use_multipath_for_image_xfer
  752. enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
  753. properties = utils.brick_get_connector_properties(use_multipath,
  754. enforce_multipath)
  755. attach_info, volume = self._attach_volume(context, volume, properties)
  756. try:
  757. if encrypted:
  758. encryption = self.db.volume_encryption_metadata_get(context,
  759. volume.id)
  760. utils.brick_attach_volume_encryptor(context,
  761. attach_info,
  762. encryption)
  763. try:
  764. image_utils.fetch_to_raw(
  765. context,
  766. image_service,
  767. image_id,
  768. attach_info['device']['path'],
  769. self.configuration.volume_dd_blocksize,
  770. size=volume['size'])
  771. except exception.ImageTooBig:
  772. with excutils.save_and_reraise_exception():
  773. LOG.exception("Copying image %(image_id)s "
  774. "to volume failed due to "
  775. "insufficient available space.",
  776. {'image_id': image_id})
  777. finally:
  778. if encrypted:
  779. utils.brick_detach_volume_encryptor(attach_info,
  780. encryption)
  781. finally:
  782. self._detach_volume(context, attach_info, volume, properties,
  783. force=True)
  784. def copy_volume_to_image(self, context, volume, image_service, image_meta):
  785. """Copy the volume to the specified image."""
  786. LOG.debug('copy_volume_to_image %s.', volume['name'])
  787. use_multipath = self.configuration.use_multipath_for_image_xfer
  788. enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
  789. properties = utils.brick_get_connector_properties(use_multipath,
  790. enforce_multipath)
  791. attach_info, volume = self._attach_volume(context, volume, properties)
  792. try:
  793. image_utils.upload_volume(context,
  794. image_service,
  795. image_meta,
  796. attach_info['device']['path'],
  797. compress=True)
  798. finally:
  799. # Since attached volume was not used for writing we can force
  800. # detach it
  801. self._detach_volume(context, attach_info, volume, properties,
  802. force=True, ignore_errors=True)
  803. def before_volume_copy(self, context, src_vol, dest_vol, remote=None):
  804. """Driver-specific actions before copyvolume data.
  805. This method will be called before _copy_volume_data during volume
  806. migration
  807. """
  808. pass
  809. def after_volume_copy(self, context, src_vol, dest_vol, remote=None):
  810. """Driver-specific actions after copyvolume data.
  811. This method will be called after _copy_volume_data during volume
  812. migration
  813. """
  814. pass
  815. def get_filter_function(self):
  816. """Get filter_function string.
  817. Returns either the string from the driver instance or global section
  818. in cinder.conf. If nothing is specified in cinder.conf, then try to
  819. find the default filter_function. When None is returned the scheduler
  820. will always pass the driver instance.
  821. :returns: a filter_function string or None
  822. """
  823. ret_function = self.configuration.filter_function
  824. if not ret_function:
  825. ret_function = CONF.filter_function
  826. if not ret_function:
  827. ret_function = self.get_default_filter_function()
  828. return ret_function
  829. def get_goodness_function(self):
  830. """Get good_function string.
  831. Returns either the string from the driver instance or global section
  832. in cinder.conf. If nothing is specified in cinder.conf, then try to
  833. find the default goodness_function. When None is returned the scheduler
  834. will give the lowest score to the driver instance.
  835. :returns: a goodness_function string or None
  836. """
  837. ret_function = self.configuration.goodness_function
  838. if not ret_function:
  839. ret_function = CONF.goodness_function
  840. if not ret_function:
  841. ret_function = self.get_default_goodness_function()
  842. return ret_function
  843. def get_default_filter_function(self):
  844. """Get the default filter_function string.
  845. Each driver could overwrite the method to return a well-known
  846. default string if it is available.
  847. :returns: None
  848. """
  849. return None
  850. def get_default_goodness_function(self):
  851. """Get the default goodness_function string.
  852. Each driver could overwrite the method to return a well-known
  853. default string if it is available.
  854. :returns: None
  855. """
  856. return None
  857. def _attach_volume(self, context, volume, properties, remote=False):
  858. """Attach the volume."""
  859. if remote:
  860. # Call remote manager's initialize_connection which includes
  861. # driver's create_export and initialize_connection
  862. rpcapi = volume_rpcapi.VolumeAPI()
  863. try:
  864. conn = rpcapi.initialize_connection(context, volume,
  865. properties)
  866. except Exception:
  867. with excutils.save_and_reraise_exception():
  868. # It is possible that initialize_connection fails due to
  869. # timeout. In fact, the volume is already attached after
  870. # the timeout error is raised, so the connection worths
  871. # a try of terminating.
  872. try:
  873. rpcapi.terminate_connection(context, volume,
  874. properties, force=True)
  875. except Exception:
  876. LOG.warning("Failed terminating the connection "
  877. "of volume %(volume_id)s, but it is "
  878. "acceptable.",
  879. {'volume_id': volume['id']})
  880. else:
  881. # Call local driver's create_export and initialize_connection.
  882. # NOTE(avishay) This is copied from the manager's code - need to
  883. # clean this up in the future.
  884. model_update = None
  885. try:
  886. LOG.debug("Volume %s: creating export", volume['id'])
  887. model_update = self.create_export(context, volume, properties)
  888. if model_update:
  889. volume.update(model_update)
  890. volume.save()
  891. except exception.CinderException as ex:
  892. if model_update:
  893. LOG.exception("Failed updating model of volume "
  894. "%(volume_id)s with driver provided "
  895. "model %(model)s",
  896. {'volume_id': volume['id'],
  897. 'model': model_update})
  898. raise exception.ExportFailure(reason=ex)
  899. try:
  900. conn = self.initialize_connection(volume, properties)
  901. except Exception as err:
  902. try:
  903. err_msg = (_('Unable to fetch connection information from '
  904. 'backend: %(err)s') %
  905. {'err': six.text_type(err)})
  906. LOG.error(err_msg)
  907. LOG.debug("Cleaning up failed connect initialization.")
  908. self.remove_export(context, volume)
  909. except Exception as ex:
  910. ex_msg = (_('Error encountered during cleanup '
  911. 'of a failed attach: %(ex)s') %
  912. {'ex': six.text_type(ex)})
  913. LOG.error(err_msg)
  914. raise exception.VolumeBackendAPIException(data=ex_msg)
  915. raise exception.VolumeBackendAPIException(data=err_msg)
  916. # Add encrypted flag to connection_info if not set in the driver.
  917. if conn['data'].get('encrypted') is None:
  918. encrypted = bool(volume.encryption_key_id)
  919. conn['data']['encrypted'] = encrypted
  920. try:
  921. attach_info = self._connect_device(conn)
  922. except Exception as exc:
  923. # We may have reached a point where we have attached the volume,
  924. # so we have to detach it (do the cleanup).
  925. attach_info = getattr(exc, 'kwargs', {}).get('attach_info', None)
  926. try:
  927. LOG.debug('Device for volume %s is unavailable but did '
  928. 'attach, detaching it.', volume['id'])
  929. self._detach_volume(context, attach_info, volume,
  930. properties, force=True,
  931. remote=remote)
  932. except Exception:
  933. LOG.exception('Error detaching volume %s',
  934. volume['id'])
  935. raise
  936. return (attach_info, volume)
  937. def _attach_snapshot(self, ctxt, snapshot, properties):
  938. """Attach the snapshot."""
  939. model_update = None
  940. try:
  941. LOG.debug("Snapshot %s: creating export.", snapshot.id)
  942. model_update = self.create_export_snapshot(ctxt, snapshot,
  943. properties)
  944. if model_update:
  945. snapshot.provider_location = model_update.get(
  946. 'provider_location', None)
  947. snapshot.provider_auth = model_update.get(
  948. 'provider_auth', None)
  949. snapshot.save()
  950. except exception.CinderException as ex:
  951. if model_update:
  952. LOG.exception("Failed updating model of snapshot "
  953. "%(snapshot_id)s with driver provided "
  954. "model %(model)s.",
  955. {'snapshot_id': snapshot.id,
  956. 'model': model_update})
  957. raise exception.ExportFailure(reason=ex)
  958. try:
  959. conn = self.initialize_connection_snapshot(
  960. snapshot, properties)
  961. except Exception as err:
  962. try:
  963. err_msg = (_('Unable to fetch connection information from '
  964. 'backend: %(err)s') %
  965. {'err': six.text_type(err)})
  966. LOG.error(err_msg)
  967. LOG.debug("Cleaning up failed connect initialization.")
  968. self.remove_export_snapshot(ctxt, snapshot)
  969. except Exception as ex:
  970. ex_msg = (_('Error encountered during cleanup '
  971. 'of a failed attach: %(ex)s') %
  972. {'ex': six.text_type(ex)})
  973. LOG.error(err_msg)
  974. raise exception.VolumeBackendAPIException(data=ex_msg)
  975. raise exception.VolumeBackendAPIException(data=err_msg)
  976. return conn
  977. def _connect_device(self, conn):
  978. # Use Brick's code to do attach/detach
  979. use_multipath = self.configuration.use_multipath_for_image_xfer
  980. device_scan_attempts = self.configuration.num_volume_device_scan_tries
  981. protocol = conn['driver_volume_type']
  982. connector = utils.brick_get_connector(
  983. protocol,
  984. use_multipath=use_multipath,
  985. device_scan_attempts=device_scan_attempts,
  986. conn=conn)
  987. device = connector.connect_volume(conn['data'])
  988. host_device = device['path']
  989. attach_info = {'conn': conn, 'device': device, 'connector': connector}
  990. unavailable = True
  991. try:
  992. # Secure network file systems will NOT run as root.
  993. root_access = not self.secure_file_operations_enabled()
  994. unavailable = not connector.check_valid_device(host_device,
  995. root_access)
  996. except Exception:
  997. LOG.exception('Could not validate device %s', host_device)
  998. if unavailable:
  999. raise exception.DeviceUnavailable(path=host_device,
  1000. attach_info=attach_info,
  1001. reason=(_("Unable to access "
  1002. "the backend storage "
  1003. "via the path "
  1004. "%(path)s.") %
  1005. {'path': host_device}))
  1006. return attach_info
  1007. def clone_image(self, context, volume,
  1008. image_location, image_meta,
  1009. image_service):
  1010. return None, False
  1011. def backup_use_temp_snapshot(self):
  1012. """Get the configured setting for backup from snapshot.
  1013. If an inheriting driver does not support this operation,
  1014. the driver should override this method to return false
  1015. and log a warning letting the administrator know they
  1016. have configured something that cannot be done.
  1017. """
  1018. return self.configuration.safe_get("backup_use_temp_snapshot")
  1019. def snapshot_revert_use_temp_snapshot(self):
  1020. # Specify whether a temporary backup snapshot should be used when
  1021. # reverting a snapshot. For some backends, this operation is not
  1022. # needed or not supported, in which case the driver should override
  1023. # this method.
  1024. return True
  1025. def snapshot_remote_attachable(self):
  1026. # TODO(lixiaoy1): the method will be deleted later when remote
  1027. # attach snapshot is implemented.
  1028. return False
  1029. def get_backup_device(self, context, backup):
  1030. """Get a backup device from an existing volume.
  1031. The function returns a volume or snapshot to backup service,
  1032. and then backup service attaches the device and does backup.
  1033. """
  1034. backup_device = None
  1035. is_snapshot = False
  1036. if self.backup_use_temp_snapshot():
  1037. (backup_device, is_snapshot) = (
  1038. self._get_backup_volume_temp_snapshot(context, backup))
  1039. else:
  1040. backup_device = self._get_backup_volume_temp_volume(
  1041. context, backup)
  1042. is_snapshot = False
  1043. return (backup_device, is_snapshot)
  1044. def _get_backup_volume_temp_volume(self, context, backup):
  1045. """Return a volume to do backup.
  1046. To backup a snapshot, create a temp volume from the snapshot and
  1047. back it up.
  1048. Otherwise to backup an in-use volume, create a temp volume and
  1049. back it up.
  1050. """
  1051. volume = objects.Volume.get_by_id(context, backup.volume_id)
  1052. snapshot = None
  1053. if backup.snapshot_id:
  1054. snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
  1055. LOG.debug('Creating a new backup for volume %s.', volume['name'])
  1056. temp_vol_ref = None
  1057. device_to_backup = volume
  1058. # NOTE(xyang): If it is to backup from snapshot, create a temp
  1059. # volume from the source snapshot, backup the temp volume, and
  1060. # then clean up the temp volume.
  1061. if snapshot:
  1062. temp_vol_ref = self._create_temp_volume_from_snapshot(
  1063. context, volume, snapshot)
  1064. backup.temp_volume_id = temp_vol_ref.id
  1065. backup.save()
  1066. device_to_backup = temp_vol_ref
  1067. else:
  1068. # NOTE(xyang): Check volume status if it is not to backup from
  1069. # snapshot; if 'in-use', create a temp volume from the source
  1070. # volume, backup the temp volume, and then clean up the temp
  1071. # volume; if 'available', just backup the volume.
  1072. previous_status = volume.get('previous_status')
  1073. if previous_status == "in-use":
  1074. temp_vol_ref = self._create_temp_cloned_volume(
  1075. context, volume)
  1076. backup.temp_volume_id = temp_vol_ref.id
  1077. backup.save()
  1078. device_to_backup = temp_vol_ref
  1079. return device_to_backup
  1080. def _get_backup_volume_temp_snapshot(self, context, backup):
  1081. """Return a device to backup.
  1082. If it is to backup from snapshot, back it up directly.
  1083. Otherwise for in-use volume, create a temp snapshot and back it up.
  1084. """
  1085. volume = objects.Volume.get_by_id(context, backup.volume_id)
  1086. snapshot = None
  1087. if backup.snapshot_id:
  1088. snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
  1089. LOG.debug('Creating a new backup for volume %s.', volume['name'])
  1090. device_to_backup = volume
  1091. is_snapshot = False
  1092. temp_snapshot = None
  1093. # NOTE(xyang): If it is to backup from snapshot, back it up
  1094. # directly. No need to clean it up.
  1095. if snapshot:
  1096. device_to_backup = snapshot
  1097. is_snapshot = True
  1098. else:
  1099. # NOTE(xyang): If it is not to backup from snapshot, check volume
  1100. # status. If the volume status is 'in-use', create a temp snapshot
  1101. # from the source volume, backup the temp snapshot, and then clean
  1102. # up the temp snapshot; if the volume status is 'available', just
  1103. # backup the volume.
  1104. previous_status = volume.get('previous_status')
  1105. if previous_status == "in-use":
  1106. temp_snapshot = self._create_temp_snapshot(context, volume)
  1107. backup.temp_snapshot_id = temp_snapshot.id
  1108. backup.save()
  1109. device_to_backup = temp_snapshot
  1110. is_snapshot = True
  1111. return (device_to_backup, is_snapshot)
  1112. def _create_temp_snapshot(self, context, volume):
  1113. kwargs = {
  1114. 'volume_id': volume['id'],
  1115. 'cgsnapshot_id': None,
  1116. 'user_id': context.user_id,
  1117. 'project_id': context.project_id,
  1118. 'status': fields.SnapshotStatus.CREATING,
  1119. 'progress': '0%',
  1120. 'volume_size': volume['size'],
  1121. 'display_name': 'backup-snap-%s' % volume['id'],
  1122. 'display_description': None,
  1123. 'volume_type_id': volume['volume_type_id'],
  1124. 'encryption_key_id': volume['encryption_key_id'],
  1125. 'metadata': {},
  1126. }
  1127. temp_snap_ref = objects.Snapshot(context=context, **kwargs)
  1128. temp_snap_ref.create()
  1129. try:
  1130. model_update = self.create_snapshot(temp_snap_ref)
  1131. if model_update:
  1132. temp_snap_ref.update(model_update)
  1133. except Exception:
  1134. with excutils.save_and_reraise_exception():
  1135. with temp_snap_ref.obj_as_admin():
  1136. self.db.volume_glance_metadata_delete_by_snapshot(
  1137. context, temp_snap_ref.id)
  1138. temp_snap_ref.destroy()
  1139. temp_snap_ref.status = fields.SnapshotStatus.AVAILABLE
  1140. temp_snap_ref.progress = '100%'
  1141. temp_snap_ref.save()
  1142. return temp_snap_ref
  1143. def _create_temp_volume(self, context, volume, volume_options=None):
  1144. kwargs = {
  1145. 'size': volume.size,
  1146. 'display_name': 'backup-vol-%s' % volume.id,
  1147. 'host': volume.host,
  1148. 'cluster_name': volume.cluster_name,
  1149. 'user_id': context.user_id,
  1150. 'project_id': context.project_id,
  1151. 'status': 'creating',
  1152. 'attach_status': fields.VolumeAttachStatus.DETACHED,
  1153. 'availability_zone': volume.availability_zone,
  1154. 'volume_type_id': volume.volume_type_id,
  1155. 'admin_metadata': {'temporary': 'True'},
  1156. }
  1157. kwargs.update(volume_options or {})
  1158. temp_vol_ref = objects.Volume(context=context.elevated(), **kwargs)
  1159. temp_vol_ref.create()
  1160. return temp_vol_ref
  1161. def _create_temp_cloned_volume(self, context, volume):
  1162. temp_vol_ref = self._create_temp_volume(context, volume)
  1163. try:
  1164. model_update = self.create_cloned_volume(temp_vol_ref, volume)
  1165. if model_update:
  1166. temp_vol_ref.update(model_update)
  1167. except Exception:
  1168. with excutils.save_and_reraise_exception():
  1169. temp_vol_ref.destroy()
  1170. temp_vol_ref.status = 'available'
  1171. temp_vol_ref.save()
  1172. return temp_vol_ref
  1173. def _create_temp_volume_from_snapshot(self, context, volume, snapshot,
  1174. volume_options=None):
  1175. temp_vol_ref = self._create_temp_volume(context, volume,
  1176. volume_options=volume_options)
  1177. try:
  1178. model_update = self.create_volume_from_snapshot(temp_vol_ref,
  1179. snapshot)
  1180. if model_update:
  1181. temp_vol_ref.update(model_update)
  1182. except Exception:
  1183. with excutils.save_and_reraise_exception():
  1184. temp_vol_ref.destroy()
  1185. temp_vol_ref.status = 'available'
  1186. temp_vol_ref.save()
  1187. return temp_vol_ref
  1188. def clear_download(self, context, volume):
  1189. """Clean up after an interrupted image copy."""
  1190. pass
  1191. def attach_volume(self, context, volume, instance_uuid, host_name,
  1192. mountpoint):
  1193. """Callback for volume attached to instance or host."""
  1194. pass
  1195. def detach_volume(self, context, volume, attachment=None):
  1196. """Callback for volume detached."""
  1197. pass
  1198. def do_setup(self, context):
  1199. """Any initialization the volume driver does while starting."""
  1200. pass
  1201. def validate_connector(self, connector):
  1202. """Fail if connector doesn't contain all the data needed by driver."""
  1203. pass
  1204. def update_migrated_volume(self, ctxt, volume, new_volume,
  1205. original_volume_status):
  1206. """Return model update for migrated volume.
  1207. Each driver implementing this method needs to be responsible for the
  1208. values of _name_id and provider_location. If None is returned or either
  1209. key is not set, it means the volume table does not need to change the
  1210. value(s) for the key(s).
  1211. The return format is {"_name_id": value, "provider_location": value}.
  1212. :param volume: The original volume that was migrated to this backend
  1213. :param new_volume: The migration volume object that was created on
  1214. this backend as part of the migration process
  1215. :param original_volume_status: The status of the original volume
  1216. :returns: model_update to update DB with any needed changes
  1217. """
  1218. msg = _("The method update_migrated_volume is not implemented.")
  1219. raise NotImplementedError(msg)
  1220. @staticmethod
  1221. def validate_connector_has_setting(connector, setting):
  1222. pass
  1223. def retype(self, context, volume, new_type, diff, host):
  1224. return False, None
  1225. def create_cloned_volume(self, volume, src_vref):
  1226. """Creates a clone of the specified volume.
  1227. If volume_type extra specs includes 'replication: <is> True' the
  1228. driver needs to create a volume replica (secondary)
  1229. and setup replication between the newly created volume
  1230. and the secondary volume.
  1231. """
  1232. raise NotImplementedError()
  1233. # ####### Interface methods for DataPath (Connector) ########
  1234. @abc.abstractmethod
  1235. def ensure_export(self, context, volume):
  1236. """Synchronously recreates an export for a volume."""
  1237. return
  1238. @abc.abstractmethod
  1239. def create_export(self, context, volume, connector):
  1240. """Exports the volume.
  1241. Can optionally return a Dictionary of changes
  1242. to the volume object to be persisted.
  1243. """
  1244. return
  1245. def create_export_snapshot(self, context, snapshot, connector):
  1246. """Exports the snapshot.
  1247. Can optionally return a Dictionary of changes
  1248. to the snapshot object to be persisted.
  1249. """
  1250. return
  1251. @abc.abstractmethod
  1252. def remove_export(self, context, volume):
  1253. """Removes an export for a volume."""
  1254. return
  1255. def remove_export_snapshot(self, context, snapshot):
  1256. """Removes an export for a snapshot."""
  1257. return
  1258. @abc.abstractmethod
  1259. def initialize_connection(self, volume, connector):
  1260. """Allow connection to connector and return connection info.
  1261. :param volume: The volume to be attached
  1262. :param connector: Dictionary containing information about what is being
  1263. connected to.
  1264. :returns conn_info: A dictionary of connection information.
  1265. """
  1266. return
  1267. def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
  1268. """Allow connection to connector and return connection info.
  1269. :param snapshot: The snapshot to be attached
  1270. :param connector: Dictionary containing information about what
  1271. is being connected to.
  1272. :returns conn_info: A dictionary of connection information. This
  1273. can optionally include a "initiator_updates"
  1274. field.
  1275. The "initiator_updates" field must be a dictionary containing a
  1276. "set_values" and/or "remove_values" field. The "set_values" field must
  1277. be a dictionary of key-value pairs to be set/updated in the db. The
  1278. "remove_values" field must be a list of keys, previously set with
  1279. "set_values", that will be deleted from the db.
  1280. """
  1281. return
  1282. @abc.abstractmethod
  1283. def terminate_connection(self, volume, connector, **kwargs):
  1284. """Disallow connection from connector.
  1285. :param volume: The volume to be disconnected.
  1286. :param connector: A dictionary describing the connection with details
  1287. about the initiator. Can be None.
  1288. """
  1289. return
  1290. def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
  1291. """Disallow connection from connector."""
  1292. return
  1293. def get_pool(self, volume):
  1294. """Return pool name where volume reside on.
  1295. :param volume: The volume hosted by the driver.
  1296. :returns: name of the pool where given volume is in.
  1297. """
  1298. return None
  1299. def update_provider_info(self, volumes, snapshots):
  1300. """Get provider info updates from driver.
  1301. :param volumes: List of Cinder volumes to check for updates
  1302. :param snapshots: List of Cinder snapshots to check for updates
  1303. :returns: tuple (volume_updates, snapshot_updates)
  1304. where volume updates {'id': uuid, provider_id: <provider-id>}
  1305. and snapshot updates {'id': uuid, provider_id: <provider-id>}
  1306. """
  1307. return None, None
  1308. def migrate_volume(self, context, volume, host):
  1309. """Migrate volume stub.
  1310. This is for drivers that don't implement an enhanced version
  1311. of this operation.
  1312. """
  1313. return (False, None)
  1314. def manage_existing(self, volume, existing_ref):
  1315. """Manage exiting stub.
  1316. This is for drivers that don't implement manage_existing().
  1317. """
  1318. msg = _("Manage existing volume not implemented.")
  1319. raise NotImplementedError(msg)
  1320. def unmanage(self, volume):
  1321. """Unmanage stub.
  1322. This is for drivers that don't implement unmanage().
  1323. """
  1324. msg = _("Unmanage volume not implemented.")
  1325. raise NotImplementedError(msg)
  1326. def freeze_backend(self, context):
  1327. """Notify the backend that it's frozen.
  1328. We use set to prohibit the creation of any new resources
  1329. on the backend, or any modifications to existing items on
  1330. a backend. We set/enforce this by not allowing scheduling
  1331. of new volumes to the specified backend, and checking at the
  1332. api for modifications to resources and failing.
  1333. In most cases the driver may not need to do anything, but
  1334. this provides a handle if they need it.
  1335. :param context: security context
  1336. :response: True|False
  1337. """
  1338. return True
  1339. def thaw_backend(self, context):
  1340. """Notify the backend that it's unfrozen/thawed.
  1341. Returns the backend to a normal state after a freeze
  1342. operation.
  1343. In most cases the driver may not need to do anything, but
  1344. this provides a handle if they need it.
  1345. :param context: security context
  1346. :response: True|False
  1347. """
  1348. return True
  1349. def failover_host(self, context, volumes, secondary_id=None, groups=None):
  1350. """Failover a backend to a secondary replication target.
  1351. Instructs a replication capable/configured backend to failover
  1352. to one of it's secondary replication targets. host=None is
  1353. an acceptable input, and leaves it to the driver to failover
  1354. to the only configured target, or to choose a target on it's
  1355. own. All of the hosts volumes will be passed on to the driver
  1356. in order for it to determine the replicated volumes on the host,
  1357. if needed.
  1358. Response is a tuple, including the new target backend_id
  1359. AND a lit of dictionaries with volume_id and updates.
  1360. Key things to consider (attaching failed-over volumes):
  1361. - provider_location
  1362. - provider_auth
  1363. - provider_id
  1364. - replication_status
  1365. :param context: security context
  1366. :param volumes: list of volume objects, in case the driver needs
  1367. to take action on them in some way
  1368. :param secondary_id: Specifies rep target backend to fail over to
  1369. :param groups: replication groups
  1370. :returns: ID of the backend that was failed-over to,
  1371. model update for volumes, and model update for groups
  1372. """
  1373. # Example volume_updates data structure:
  1374. # [{'volume_id': <cinder-uuid>,
  1375. # 'updates': {'provider_id': 8,
  1376. # 'replication_status': 'failed-over',
  1377. # 'replication_extended_status': 'whatever',...}},]
  1378. # Example group_updates data structure:
  1379. # [{'group_id': <cinder-uuid>,
  1380. # 'updates': {'replication_status': 'failed-over',...}},]
  1381. raise NotImplementedError()
  1382. def failover(self, context, volumes, secondary_id=None, groups=None):
  1383. """Like failover but for a host that is clustered.
  1384. Most of the time this will be the exact same behavior as failover_host,
  1385. so if it's not overwritten, it is assumed to be the case.
  1386. """
  1387. return self.failover_host(context, volumes, secondary_id, groups)
  1388. def failover_completed(self, context, active_backend_id=None):
  1389. """This method is called after failover for clustered backends."""
  1390. raise NotImplementedError()
  1391. @classmethod
  1392. def _is_base_method(cls, method_name):
  1393. method = getattr(cls, method_name)
  1394. return method.__module__ == getattr(BaseVD, method_name).__module__
  1395. # Replication Group (Tiramisu)
  1396. def enable_replication(self, context, group, volumes):
  1397. """Enables replication for a group and volumes in the group.
  1398. :param group: group object
  1399. :param volumes: list of volume objects in the group
  1400. :returns: model_update - dict of group updates
  1401. :returns: volume_model_updates - list of dicts of volume updates
  1402. """
  1403. raise NotImplementedError()
  1404. # Replication Group (Tiramisu)
  1405. def disable_replication(self, context, group, volumes):
  1406. """Disables replication for a group and volumes in the group.
  1407. :param group: group object
  1408. :param volumes: list of volume objects in the group
  1409. :returns: model_update - dict of group updates
  1410. :returns: volume_model_updates - list of dicts of volume updates
  1411. """
  1412. raise NotImplementedError()
  1413. # Replication Group (Tiramisu)
  1414. def failover_replication(self, context, group, volumes,
  1415. secondary_backend_id=None):
  1416. """Fails over replication for a group and volumes in the group.
  1417. :param group: group object
  1418. :param volumes: list of volume objects in the group
  1419. :param secondary_backend_id: backend_id of the secondary site
  1420. :returns: model_update - dict of group updates
  1421. :returns: volume_model_updates - list of dicts of volume updates
  1422. """
  1423. raise NotImplementedError()
  1424. def get_replication_error_status(self, context, groups):
  1425. """Returns error info for replicated groups and its volumes.
  1426. :returns: group_model_updates - list of dicts of group updates
  1427. if error happens. For example, a dict of a group can be as follows:
  1428. .. code:: python
  1429. {'group_id': xxxx,
  1430. 'replication_status': fields.ReplicationStatus.ERROR}
  1431. :returns: volume_model_updates - list of dicts of volume updates
  1432. if error happens. For example, a dict of a volume can be as follows:
  1433. .. code:: python
  1434. {'volume_id': xxxx,
  1435. 'replication_status': fields.ReplicationStatus.ERROR}
  1436. """
  1437. return [], []
  1438. @classmethod
  1439. def supports_replication_feature(cls, feature):
  1440. """Check if driver class supports replication features.
  1441. Feature is a string that must be one of:
  1442. - v2.1
  1443. - a/a
  1444. """
  1445. if feature not in cls.REPLICATION_FEATURE_CHECKERS:
  1446. return False
  1447. # Check if method is being implemented/overwritten by the driver
  1448. method_name = cls.REPLICATION_FEATURE_CHECKERS[feature]
  1449. return not cls._is_base_method(method_name)
  1450. def get_replication_updates(self, context):
  1451. """Old replication update method, deprecate."""
  1452. raise NotImplementedError()
  1453. def create_group(self, context, group):
  1454. """Creates a group.
  1455. :param context: the context of the caller.
  1456. :param group: the Group object of the group to be created.
  1457. :returns: model_update
  1458. model_update will be in this format: {'status': xxx, ......}.
  1459. If the status in model_update is 'error', the manager will throw
  1460. an exception and it will be caught in the try-except block in the
  1461. manager. If the driver throws an exception, the manager will also
  1462. catch it in the try-except block. The group status in the db will
  1463. be changed to 'error'.
  1464. For a successful operation, the driver can either build the
  1465. model_update and return it or return None. The group status will
  1466. be set to 'available'.
  1467. """
  1468. raise NotImplementedError()
  1469. def delete_group(self, context, group, volumes):
  1470. """Deletes a group.
  1471. :param context: the context of the caller.
  1472. :param group: the Group object of the group to be deleted.
  1473. :param volumes: a list of Volume objects in the group.
  1474. :returns: model_update, volumes_model_update
  1475. param volumes is a list of objects retrieved from the db. It cannot
  1476. be assigned to volumes_model_update. volumes_model_update is a list
  1477. of dictionaries. It has to be built by the driver. An entry will be
  1478. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  1479. will be in this format: {'status': xxx, ......}.
  1480. The driver should populate volumes_model_update and model_update
  1481. and return them.
  1482. The manager will check volumes_model_update and update db accordingly
  1483. for each volume. If the driver successfully deleted some volumes
  1484. but failed to delete others, it should set statuses of the volumes
  1485. accordingly so that the manager can update db correctly.
  1486. If the status in any entry of volumes_model_update is 'error_deleting'
  1487. or 'error', the status in model_update will be set to the same if it
  1488. is not already 'error_deleting' or 'error'.
  1489. If the status in model_update is 'error_deleting' or 'error', the
  1490. manager will raise an exception and the status of the group will be
  1491. set to 'error' in the db. If volumes_model_update is not returned by
  1492. the driver, the manager will set the status of every volume in the
  1493. group to 'error' in the except block.
  1494. If the driver raises an exception during the operation, it will be
  1495. caught by the try-except block in the manager. The statuses of the
  1496. group and all volumes in it will be set to 'error'.
  1497. For a successful operation, the driver can either build the
  1498. model_update and volumes_model_update and return them or
  1499. return None, None. The statuses of the group and all volumes
  1500. will be set to 'deleted' after the manager deletes them from db.
  1501. """
  1502. raise NotImplementedError()
  1503. def update_group(self, context, group,
  1504. add_volumes=None, remove_volumes=None):
  1505. """Updates a group.
  1506. :param context: the context of the caller.
  1507. :param group: the Group object of the group to be updated.
  1508. :param add_volumes: a list of Volume objects to be added.
  1509. :param remove_volumes: a list of Volume objects to be removed.
  1510. :returns: model_update, add_volumes_update, remove_volumes_update
  1511. model_update is a dictionary that the driver wants the manager
  1512. to update upon a successful return. If None is returned, the manager
  1513. will set the status to 'available'.
  1514. add_volumes_update and remove_volumes_update are lists of dictionaries
  1515. that the driver wants the manager to update upon a successful return.
  1516. Note that each entry requires a {'id': xxx} so that the correct
  1517. volume entry can be updated. If None is returned, the volume will
  1518. remain its original status. Also note that you cannot directly
  1519. assign add_volumes to add_volumes_update as add_volumes is a list of
  1520. volume objects and cannot be used for db update directly. Same with
  1521. remove_volumes.
  1522. If the driver throws an exception, the status of the group as well as
  1523. those of the volumes to be added/removed will be set to 'error'.
  1524. """
  1525. raise NotImplementedError()
  1526. def create_group_from_src(self, context, group, volumes,
  1527. group_snapshot=None, snapshots=None,
  1528. source_group=None, source_vols=None):
  1529. """Creates a group from source.
  1530. :param context: the context of the caller.
  1531. :param group: the Group object to be created.
  1532. :param volumes: a list of Volume objects in the group.
  1533. :param group_snapshot: the GroupSnapshot object as source.
  1534. :param snapshots: a list of Snapshot objects in group_snapshot.
  1535. :param source_group: the Group object as source.
  1536. :param source_vols: a list of Volume objects in the source_group.
  1537. :returns: model_update, volumes_model_update
  1538. The source can be group_snapshot or a source_group.
  1539. param volumes is a list of objects retrieved from the db. It cannot
  1540. be assigned to volumes_model_update. volumes_model_update is a list
  1541. of dictionaries. It has to be built by the driver. An entry will be
  1542. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  1543. will be in this format: {'status': xxx, ......}.
  1544. To be consistent with other volume operations, the manager will
  1545. assume the operation is successful if no exception is thrown by
  1546. the driver. For a successful operation, the driver can either build
  1547. the model_update and volumes_model_update and return them or
  1548. return None, None.
  1549. """
  1550. raise NotImplementedError()
  1551. def create_group_snapshot(self, context, group_snapshot, snapshots):
  1552. """Creates a group_snapshot.
  1553. :param context: the context of the caller.
  1554. :param group_snapshot: the GroupSnapshot object to be created.
  1555. :param snapshots: a list of Snapshot objects in the group_snapshot.
  1556. :returns: model_update, snapshots_model_update
  1557. param snapshots is a list of Snapshot objects. It cannot be assigned
  1558. to snapshots_model_update. snapshots_model_update is a list of
  1559. dictionaries. It has to be built by the driver. An entry will be
  1560. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  1561. will be in this format: {'status': xxx, ......}.
  1562. The driver should populate snapshots_model_update and model_update
  1563. and return them.
  1564. The manager will check snapshots_model_update and update db accordingly
  1565. for each snapshot. If the driver successfully deleted some snapshots
  1566. but failed to delete others, it should set statuses of the snapshots
  1567. accordingly so that the manager can update db correctly.
  1568. If the status in any entry of snapshots_model_update is 'error', the
  1569. status in model_update will be set to the same if it is not already
  1570. 'error'.
  1571. If the status in model_update is 'error', the manager will raise an
  1572. exception and the status of group_snapshot will be set to 'error' in
  1573. the db. If snapshots_model_update is not returned by the driver, the
  1574. manager will set the status of every snapshot to 'error' in the except
  1575. block.
  1576. If the driver raises an exception during the operation, it will be
  1577. caught by the try-except block in the manager and the statuses of
  1578. group_snapshot and all snapshots will be set to 'error'.
  1579. For a successful operation, the driver can either build the
  1580. model_update and snapshots_model_update and return them or
  1581. return None, None. The statuses of group_snapshot and all snapshots
  1582. will be set to 'available' at the end of the manager function.
  1583. """
  1584. raise NotImplementedError()
  1585. def delete_group_snapshot(self, context, group_snapshot, snapshots):
  1586. """Deletes a group_snapshot.
  1587. :param context: the context of the caller.
  1588. :param group_snapshot: the GroupSnapshot object to be deleted.
  1589. :param snapshots: a list of Snapshot objects in the group_snapshot.
  1590. :returns: model_update, snapshots_model_update
  1591. param snapshots is a list of objects. It cannot be assigned to
  1592. snapshots_model_update. snapshots_model_update is a list of of
  1593. dictionaries. It has to be built by the driver. An entry will be
  1594. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  1595. will be in this format: {'status': xxx, ......}.
  1596. The driver should populate snapshots_model_update and model_update
  1597. and return them.
  1598. The manager will check snapshots_model_update and update db accordingly
  1599. for each snapshot. If the driver successfully deleted some snapshots
  1600. but failed to delete others, it should set statuses of the snapshots
  1601. accordingly so that the manager can update db correctly.
  1602. If the status in any entry of snapshots_model_update is
  1603. 'error_deleting' or 'error', the status in model_update will be set to
  1604. the same if it is not already 'error_deleting' or 'error'.
  1605. If the status in model_update is 'error_deleting' or 'error', the
  1606. manager will raise an exception and the status of group_snapshot will
  1607. be set to 'error' in the db. If snapshots_model_update is not returned
  1608. by the driver, the manager will set the status of every snapshot to
  1609. 'error' in the except block.
  1610. If the driver raises an exception during the operation, it will be
  1611. caught by the try-except block in the manager and the statuses of
  1612. group_snapshot and all snapshots will be set to 'error'.
  1613. For a successful operation, the driver can either build the
  1614. model_update and snapshots_model_update and return them or
  1615. return None, None. The statuses of group_snapshot and all snapshots
  1616. will be set to 'deleted' after the manager deletes them from db.
  1617. """
  1618. raise NotImplementedError()
  1619. def extend_volume(self, volume, new_size):
  1620. msg = _("Extend volume not implemented")
  1621. raise NotImplementedError(msg)
  1622. def accept_transfer(self, context, volume, new_user, new_project):
  1623. pass
  1624. def create_volume_from_backup(self, volume, backup):
  1625. """Creates a volume from a backup.
  1626. Can optionally return a Dictionary of changes to the volume object to
  1627. be persisted.
  1628. :param volume: the volume object to be created.
  1629. :param backup: the backup object as source.
  1630. :returns: volume_model_update
  1631. """
  1632. raise NotImplementedError()
  1633. @six.add_metaclass(abc.ABCMeta)
  1634. class CloneableImageVD(object):
  1635. @abc.abstractmethod
  1636. def clone_image(self, volume, image_location,
  1637. image_id, image_meta, image_service):
  1638. """Create a volume efficiently from an existing image.
  1639. image_location is a string whose format depends on the
  1640. image service backend in use. The driver should use it
  1641. to determine whether cloning is possible.
  1642. image_id is a string which represents id of the image.
  1643. It can be used by the driver to introspect internal
  1644. stores or registry to do an efficient image clone.
  1645. image_meta is a dictionary that includes 'disk_format' (e.g.
  1646. raw, qcow2) and other image attributes that allow drivers to
  1647. decide whether they can clone the image without first requiring
  1648. conversion.
  1649. image_service is the reference of the image_service to use.
  1650. Note that this is needed to be passed here for drivers that
  1651. will want to fetch images from the image service directly.
  1652. Returns a dict of volume properties eg. provider_location,
  1653. boolean indicating whether cloning occurred
  1654. """
  1655. return None, False
  1656. @six.add_metaclass(abc.ABCMeta)
  1657. class MigrateVD(object):
  1658. @abc.abstractmethod
  1659. def migrate_volume(self, context, volume, host):
  1660. """Migrate the volume to the specified host.
  1661. Returns a boolean indicating whether the migration occurred, as well as
  1662. model_update.
  1663. :param context: Context
  1664. :param volume: A dictionary describing the volume to migrate
  1665. :param host: A dictionary describing the host to migrate to, where
  1666. host['host'] is its name, and host['capabilities'] is a
  1667. dictionary of its reported capabilities.
  1668. """
  1669. return (False, None)
  1670. @six.add_metaclass(abc.ABCMeta)
  1671. class ManageableVD(object):
  1672. @abc.abstractmethod
  1673. def manage_existing(self, volume, existing_ref):
  1674. """Brings an existing backend storage object under Cinder management.
  1675. existing_ref is passed straight through from the API request's
  1676. manage_existing_ref value, and it is up to the driver how this should
  1677. be interpreted. It should be sufficient to identify a storage object
  1678. that the driver should somehow associate with the newly-created cinder
  1679. volume structure.
  1680. There are two ways to do this:
  1681. 1. Rename the backend storage object so that it matches the,
  1682. volume['name'] which is how drivers traditionally map between a
  1683. cinder volume and the associated backend storage object.
  1684. 2. Place some metadata on the volume, or somewhere in the backend, that
  1685. allows other driver requests (e.g. delete, clone, attach, detach...)
  1686. to locate the backend storage object when required.
  1687. If the existing_ref doesn't make sense, or doesn't refer to an existing
  1688. backend storage object, raise a ManageExistingInvalidReference
  1689. exception.
  1690. The volume may have a volume_type, and the driver can inspect that and
  1691. compare against the properties of the referenced backend storage
  1692. object. If they are incompatible, raise a
  1693. ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
  1694. :param volume: Cinder volume to manage
  1695. :param existing_ref: Driver-specific information used to identify a
  1696. volume
  1697. """
  1698. return
  1699. @abc.abstractmethod
  1700. def manage_existing_get_size(self, volume, existing_ref):
  1701. """Return size of volume to be managed by manage_existing.
  1702. When calculating the size, round up to the next GB.
  1703. :param volume: Cinder volume to manage
  1704. :param existing_ref: Driver-specific information used to identify a
  1705. volume
  1706. :returns size: Volume size in GiB (integer)
  1707. """
  1708. return
  1709. def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
  1710. sort_keys, sort_dirs):
  1711. """List volumes on the backend available for management by Cinder.
  1712. Returns a list of dictionaries, each specifying a volume in the host,
  1713. with the following keys:
  1714. - reference (dictionary): The reference for a volume, which can be
  1715. passed to "manage_existing".
  1716. - size (int): The size of the volume according to the storage
  1717. backend, rounded up to the nearest GB.
  1718. - safe_to_manage (boolean): Whether or not this volume is safe to
  1719. manage according to the storage backend. For example, is the volume
  1720. in use or invalid for any reason.
  1721. - reason_not_safe (string): If safe_to_manage is False, the reason why.
  1722. - cinder_id (string): If already managed, provide the Cinder ID.
  1723. - extra_info (string): Any extra information to return to the user
  1724. :param cinder_volumes: A list of volumes in this host that Cinder
  1725. currently manages, used to determine if
  1726. a volume is manageable or not.
  1727. :param marker: The last item of the previous page; we return the
  1728. next results after this value (after sorting)
  1729. :param limit: Maximum number of items to return
  1730. :param offset: Number of items to skip after marker
  1731. :param sort_keys: List of keys to sort results by (valid keys are
  1732. 'identifier' and 'size')
  1733. :param sort_dirs: List of directions to sort by, corresponding to
  1734. sort_keys (valid directions are 'asc' and 'desc')
  1735. """
  1736. return []
  1737. @abc.abstractmethod
  1738. def unmanage(self, volume):
  1739. """Removes the specified volume from Cinder management.
  1740. Does not delete the underlying backend storage object.
  1741. For most drivers, this will not need to do anything. However, some
  1742. drivers might use this call as an opportunity to clean up any
  1743. Cinder-specific configuration that they have associated with the
  1744. backend storage object.
  1745. :param volume: Cinder volume to unmanage
  1746. """
  1747. pass
  1748. @six.add_metaclass(abc.ABCMeta)
  1749. class ManageableSnapshotsVD(object):
  1750. # NOTE: Can't use abstractmethod before all drivers implement it
  1751. def manage_existing_snapshot(self, snapshot, existing_ref):
  1752. """Brings an existing backend storage object under Cinder management.
  1753. existing_ref is passed straight through from the API request's
  1754. manage_existing_ref value, and it is up to the driver how this should
  1755. be interpreted. It should be sufficient to identify a storage object
  1756. that the driver should somehow associate with the newly-created cinder
  1757. snapshot structure.
  1758. There are two ways to do this:
  1759. 1. Rename the backend storage object so that it matches the
  1760. snapshot['name'] which is how drivers traditionally map between a
  1761. cinder snapshot and the associated backend storage object.
  1762. 2. Place some metadata on the snapshot, or somewhere in the backend,
  1763. that allows other driver requests (e.g. delete) to locate the
  1764. backend storage object when required.
  1765. If the existing_ref doesn't make sense, or doesn't refer to an existing
  1766. backend storage object, raise a ManageExistingInvalidReference
  1767. exception.
  1768. :param snapshot: Cinder volume snapshot to manage
  1769. :param existing_ref: Driver-specific information used to identify a
  1770. volume snapshot
  1771. """
  1772. return
  1773. # NOTE: Can't use abstractmethod before all drivers implement it
  1774. def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
  1775. """Return size of snapshot to be managed by manage_existing.
  1776. When calculating the size, round up to the next GB.
  1777. :param snapshot: Cinder volume snapshot to manage
  1778. :param existing_ref: Driver-specific information used to identify a
  1779. volume snapshot
  1780. :returns size: Volume snapshot size in GiB (integer)
  1781. """
  1782. return
  1783. def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
  1784. sort_keys, sort_dirs):
  1785. """List snapshots on the backend available for management by Cinder.
  1786. Returns a list of dictionaries, each specifying a snapshot in the host,
  1787. with the following keys:
  1788. - reference (dictionary): The reference for a snapshot, which can be
  1789. passed to "manage_existing_snapshot".
  1790. - size (int): The size of the snapshot according to the storage
  1791. backend, rounded up to the nearest GB.
  1792. - safe_to_manage (boolean): Whether or not this snapshot is safe to
  1793. manage according to the storage backend. For example, is the snapshot
  1794. in use or invalid for any reason.
  1795. - reason_not_safe (string): If safe_to_manage is False, the reason why.
  1796. - cinder_id (string): If already managed, provide the Cinder ID.
  1797. - extra_info (string): Any extra information to return to the user
  1798. - source_reference (string): Similar to "reference", but for the
  1799. snapshot's source volume.
  1800. :param cinder_snapshots: A list of snapshots in this host that Cinder
  1801. currently manages, used to determine if
  1802. a snapshot is manageable or not.
  1803. :param marker: The last item of the previous page; we return the
  1804. next results after this value (after sorting)
  1805. :param limit: Maximum number of items to return
  1806. :param offset: Number of items to skip after marker
  1807. :param sort_keys: List of keys to sort results by (valid keys are
  1808. 'identifier' and 'size')
  1809. :param sort_dirs: List of directions to sort by, corresponding to
  1810. sort_keys (valid directions are 'asc' and 'desc')
  1811. """
  1812. return []
  1813. # NOTE: Can't use abstractmethod before all drivers implement it
  1814. def unmanage_snapshot(self, snapshot):
  1815. """Removes the specified snapshot from Cinder management.
  1816. Does not delete the underlying backend storage object.
  1817. For most drivers, this will not need to do anything. However, some
  1818. drivers might use this call as an opportunity to clean up any
  1819. Cinder-specific configuration that they have associated with the
  1820. backend storage object.
  1821. :param snapshot: Cinder volume snapshot to unmanage
  1822. """
  1823. pass
  1824. class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD,
  1825. MigrateVD, BaseVD):
  1826. def check_for_setup_error(self):
  1827. raise NotImplementedError()
  1828. def create_volume(self, volume):
  1829. raise NotImplementedError()
  1830. def create_volume_from_snapshot(self, volume, snapshot):
  1831. """Creates a volume from a snapshot.
  1832. If volume_type extra specs includes 'replication: <is> True'
  1833. the driver needs to create a volume replica (secondary),
  1834. and setup replication between the newly created volume and
  1835. the secondary volume.
  1836. """
  1837. raise NotImplementedError()
  1838. def delete_volume(self, volume):
  1839. raise NotImplementedError()
  1840. def create_snapshot(self, snapshot):
  1841. """Creates a snapshot."""
  1842. raise NotImplementedError()
  1843. def delete_snapshot(self, snapshot):
  1844. """Deletes a snapshot."""
  1845. raise NotImplementedError()
  1846. def local_path(self, volume):
  1847. raise NotImplementedError()
  1848. def clear_download(self, context, volume):
  1849. pass
  1850. def extend_volume(self, volume, new_size):
  1851. msg = _("Extend volume not implemented")
  1852. raise NotImplementedError(msg)
  1853. def manage_existing(self, volume, existing_ref):
  1854. msg = _("Manage existing volume not implemented.")
  1855. raise NotImplementedError(msg)
  1856. def revert_to_snapshot(self, context, volume, snapshot):
  1857. """Revert volume to snapshot.
  1858. Note: the revert process should not change the volume's
  1859. current size, that means if the driver shrank
  1860. the volume during the process, it should extend the
  1861. volume internally.
  1862. """
  1863. msg = _("Revert volume to snapshot not implemented.")
  1864. raise NotImplementedError(msg)
  1865. def manage_existing_get_size(self, volume, existing_ref):
  1866. msg = _("Manage existing volume not implemented.")
  1867. raise NotImplementedError(msg)
  1868. def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
  1869. sort_keys, sort_dirs):
  1870. msg = _("Get manageable volumes not implemented.")
  1871. raise NotImplementedError(msg)
  1872. def unmanage(self, volume):
  1873. pass
  1874. def manage_existing_snapshot(self, snapshot, existing_ref):
  1875. msg = _("Manage existing snapshot not implemented.")
  1876. raise NotImplementedError(msg)
  1877. def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
  1878. msg = _("Manage existing snapshot not implemented.")
  1879. raise NotImplementedError(msg)
  1880. def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
  1881. sort_keys, sort_dirs):
  1882. msg = _("Get manageable snapshots not implemented.")
  1883. raise NotImplementedError(msg)
  1884. def unmanage_snapshot(self, snapshot):
  1885. """Unmanage the specified snapshot from Cinder management."""
  1886. def retype(self, context, volume, new_type, diff, host):
  1887. return False, None
  1888. # ####### Interface methods for DataPath (Connector) ########
  1889. def ensure_export(self, context, volume):
  1890. raise NotImplementedError()
  1891. def create_export(self, context, volume, connector):
  1892. raise NotImplementedError()
  1893. def create_export_snapshot(self, context, snapshot, connector):
  1894. raise NotImplementedError()
  1895. def remove_export(self, context, volume):
  1896. raise NotImplementedError()
  1897. def remove_export_snapshot(self, context, snapshot):
  1898. raise NotImplementedError()
  1899. def initialize_connection(self, volume, connector, **kwargs):
  1900. raise NotImplementedError()
  1901. def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
  1902. """Allow connection from connector for a snapshot."""
  1903. def terminate_connection(self, volume, connector, **kwargs):
  1904. """Disallow connection from connector
  1905. :param volume: The volume to be disconnected.
  1906. :param connector: A dictionary describing the connection with details
  1907. about the initiator. Can be None.
  1908. """
  1909. def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
  1910. """Disallow connection from connector for a snapshot."""
  1911. def create_consistencygroup(self, context, group):
  1912. """Creates a consistencygroup.
  1913. :param context: the context of the caller.
  1914. :param group: the dictionary of the consistency group to be created.
  1915. :returns: model_update
  1916. model_update will be in this format: {'status': xxx, ......}.
  1917. If the status in model_update is 'error', the manager will throw
  1918. an exception and it will be caught in the try-except block in the
  1919. manager. If the driver throws an exception, the manager will also
  1920. catch it in the try-except block. The group status in the db will
  1921. be changed to 'error'.
  1922. For a successful operation, the driver can either build the
  1923. model_update and return it or return None. The group status will
  1924. be set to 'available'.
  1925. """
  1926. raise NotImplementedError()
  1927. def create_consistencygroup_from_src(self, context, group, volumes,
  1928. cgsnapshot=None, snapshots=None,
  1929. source_cg=None, source_vols=None):
  1930. """Creates a consistencygroup from source.
  1931. :param context: the context of the caller.
  1932. :param group: the dictionary of the consistency group to be created.
  1933. :param volumes: a list of volume dictionaries in the group.
  1934. :param cgsnapshot: the dictionary of the cgsnapshot as source.
  1935. :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
  1936. :param source_cg: the dictionary of a consistency group as source.
  1937. :param source_vols: a list of volume dictionaries in the source_cg.
  1938. :returns: model_update, volumes_model_update
  1939. The source can be cgsnapshot or a source cg.
  1940. param volumes is retrieved directly from the db. It is a list of
  1941. cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
  1942. assigned to volumes_model_update. volumes_model_update is a list of
  1943. dictionaries. It has to be built by the driver. An entry will be
  1944. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  1945. will be in this format: {'status': xxx, ......}.
  1946. To be consistent with other volume operations, the manager will
  1947. assume the operation is successful if no exception is thrown by
  1948. the driver. For a successful operation, the driver can either build
  1949. the model_update and volumes_model_update and return them or
  1950. return None, None.
  1951. """
  1952. raise NotImplementedError()
  1953. def delete_consistencygroup(self, context, group, volumes):
  1954. """Deletes a consistency group.
  1955. :param context: the context of the caller.
  1956. :param group: the dictionary of the consistency group to be deleted.
  1957. :param volumes: a list of volume dictionaries in the group.
  1958. :returns: model_update, volumes_model_update
  1959. param volumes is retrieved directly from the db. It is a list of
  1960. cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
  1961. assigned to volumes_model_update. volumes_model_update is a list of
  1962. dictionaries. It has to be built by the driver. An entry will be
  1963. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  1964. will be in this format: {'status': xxx, ......}.
  1965. The driver should populate volumes_model_update and model_update
  1966. and return them.
  1967. The manager will check volumes_model_update and update db accordingly
  1968. for each volume. If the driver successfully deleted some volumes
  1969. but failed to delete others, it should set statuses of the volumes
  1970. accordingly so that the manager can update db correctly.
  1971. If the status in any entry of volumes_model_update is 'error_deleting'
  1972. or 'error', the status in model_update will be set to the same if it
  1973. is not already 'error_deleting' or 'error'.
  1974. If the status in model_update is 'error_deleting' or 'error', the
  1975. manager will raise an exception and the status of the group will be
  1976. set to 'error' in the db. If volumes_model_update is not returned by
  1977. the driver, the manager will set the status of every volume in the
  1978. group to 'error' in the except block.
  1979. If the driver raises an exception during the operation, it will be
  1980. caught by the try-except block in the manager. The statuses of the
  1981. group and all volumes in it will be set to 'error'.
  1982. For a successful operation, the driver can either build the
  1983. model_update and volumes_model_update and return them or
  1984. return None, None. The statuses of the group and all volumes
  1985. will be set to 'deleted' after the manager deletes them from db.
  1986. """
  1987. raise NotImplementedError()
  1988. def update_consistencygroup(self, context, group,
  1989. add_volumes=None, remove_volumes=None):
  1990. """Updates a consistency group.
  1991. :param context: the context of the caller.
  1992. :param group: the dictionary of the consistency group to be updated.
  1993. :param add_volumes: a list of volume dictionaries to be added.
  1994. :param remove_volumes: a list of volume dictionaries to be removed.
  1995. :returns: model_update, add_volumes_update, remove_volumes_update
  1996. model_update is a dictionary that the driver wants the manager
  1997. to update upon a successful return. If None is returned, the manager
  1998. will set the status to 'available'.
  1999. add_volumes_update and remove_volumes_update are lists of dictionaries
  2000. that the driver wants the manager to update upon a successful return.
  2001. Note that each entry requires a {'id': xxx} so that the correct
  2002. volume entry can be updated. If None is returned, the volume will
  2003. remain its original status. Also note that you cannot directly
  2004. assign add_volumes to add_volumes_update as add_volumes is a list of
  2005. cinder.db.sqlalchemy.models.Volume objects and cannot be used for
  2006. db update directly. Same with remove_volumes.
  2007. If the driver throws an exception, the status of the group as well as
  2008. those of the volumes to be added/removed will be set to 'error'.
  2009. """
  2010. raise NotImplementedError()
  2011. def create_cgsnapshot(self, context, cgsnapshot, snapshots):
  2012. """Creates a cgsnapshot.
  2013. :param context: the context of the caller.
  2014. :param cgsnapshot: the dictionary of the cgsnapshot to be created.
  2015. :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
  2016. :returns: model_update, snapshots_model_update
  2017. param snapshots is retrieved directly from the db. It is a list of
  2018. cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be
  2019. assigned to snapshots_model_update. snapshots_model_update is a list
  2020. of dictionaries. It has to be built by the driver. An entry will be
  2021. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  2022. will be in this format: {'status': xxx, ......}.
  2023. The driver should populate snapshots_model_update and model_update
  2024. and return them.
  2025. The manager will check snapshots_model_update and update db accordingly
  2026. for each snapshot. If the driver successfully deleted some snapshots
  2027. but failed to delete others, it should set statuses of the snapshots
  2028. accordingly so that the manager can update db correctly.
  2029. If the status in any entry of snapshots_model_update is 'error', the
  2030. status in model_update will be set to the same if it is not already
  2031. 'error'.
  2032. If the status in model_update is 'error', the manager will raise an
  2033. exception and the status of cgsnapshot will be set to 'error' in the
  2034. db. If snapshots_model_update is not returned by the driver, the
  2035. manager will set the status of every snapshot to 'error' in the except
  2036. block.
  2037. If the driver raises an exception during the operation, it will be
  2038. caught by the try-except block in the manager and the statuses of
  2039. cgsnapshot and all snapshots will be set to 'error'.
  2040. For a successful operation, the driver can either build the
  2041. model_update and snapshots_model_update and return them or
  2042. return None, None. The statuses of cgsnapshot and all snapshots
  2043. will be set to 'available' at the end of the manager function.
  2044. """
  2045. raise NotImplementedError()
  2046. def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
  2047. """Deletes a cgsnapshot.
  2048. :param context: the context of the caller.
  2049. :param cgsnapshot: the dictionary of the cgsnapshot to be deleted.
  2050. :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
  2051. :returns: model_update, snapshots_model_update
  2052. param snapshots is retrieved directly from the db. It is a list of
  2053. cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be
  2054. assigned to snapshots_model_update. snapshots_model_update is a list
  2055. of dictionaries. It has to be built by the driver. An entry will be
  2056. in this format: {'id': xxx, 'status': xxx, ......}. model_update
  2057. will be in this format: {'status': xxx, ......}.
  2058. The driver should populate snapshots_model_update and model_update
  2059. and return them.
  2060. The manager will check snapshots_model_update and update db accordingly
  2061. for each snapshot. If the driver successfully deleted some snapshots
  2062. but failed to delete others, it should set statuses of the snapshots
  2063. accordingly so that the manager can update db correctly.
  2064. If the status in any entry of snapshots_model_update is
  2065. 'error_deleting' or 'error', the status in model_update will be set to
  2066. the same if it is not already 'error_deleting' or 'error'.
  2067. If the status in model_update is 'error_deleting' or 'error', the
  2068. manager will raise an exception and the status of cgsnapshot will be
  2069. set to 'error' in the db. If snapshots_model_update is not returned by
  2070. the driver, the manager will set the status of every snapshot to
  2071. 'error' in the except block.
  2072. If the driver raises an exception during the operation, it will be
  2073. caught by the try-except block in the manager and the statuses of
  2074. cgsnapshot and all snapshots will be set to 'error'.
  2075. For a successful operation, the driver can either build the
  2076. model_update and snapshots_model_update and return them or
  2077. return None, None. The statuses of cgsnapshot and all snapshots
  2078. will be set to 'deleted' after the manager deletes them from db.
  2079. """
  2080. raise NotImplementedError()
  2081. def clone_image(self, volume, image_location, image_id, image_meta,
  2082. image_service):
  2083. return None, False
  2084. def get_pool(self, volume):
  2085. """Return pool name where volume reside on.
  2086. :param volume: The volume hosted by the driver.
  2087. :returns: name of the pool where given volume is in.
  2088. """
  2089. return None
  2090. def migrate_volume(self, context, volume, host):
  2091. return (False, None)
  2092. def accept_transfer(self, context, volume, new_user, new_project):
  2093. pass
  2094. class ProxyVD(object):
  2095. """Proxy Volume Driver to mark proxy drivers
  2096. If a driver uses a proxy class (e.g. by using __setattr__ and
  2097. __getattr__) without directly inheriting from base volume driver this
  2098. class can help marking them and retrieve the actual used driver object.
  2099. """
  2100. def _get_driver(self):
  2101. """Returns the actual driver object.
  2102. Can be overloaded by the proxy.
  2103. """
  2104. return getattr(self, "driver", None)
  2105. class ISCSIDriver(VolumeDriver):
  2106. """Executes commands relating to ISCSI volumes.
  2107. We make use of model provider properties as follows:
  2108. ``provider_location``
  2109. if present, contains the iSCSI target information in the same
  2110. format as an ietadm discovery
  2111. i.e. '<ip>:<port>,<portal> <target IQN>'
  2112. ``provider_auth``
  2113. if present, contains a space-separated triple:
  2114. '<auth method> <auth username> <auth password>'.
  2115. `CHAP` is the only auth_method in use at the moment.
  2116. """
  2117. def __init__(self, *args, **kwargs):
  2118. super(ISCSIDriver, self).__init__(*args, **kwargs)
  2119. def _do_iscsi_discovery(self, volume):
  2120. # TODO(justinsb): Deprecate discovery and use stored info
  2121. # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
  2122. LOG.warning("ISCSI provider_location not stored, using discovery")
  2123. volume_name = volume['name']
  2124. try:
  2125. # NOTE(griff) We're doing the split straight away which should be
  2126. # safe since using '@' in hostname is considered invalid
  2127. (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
  2128. '-t', 'sendtargets', '-p',
  2129. volume['host'].split('@')[0],
  2130. run_as_root=True)
  2131. except processutils.ProcessExecutionError as ex:
  2132. LOG.error("ISCSI discovery attempt failed for:%s",
  2133. volume['host'].split('@')[0])
  2134. LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
  2135. return None
  2136. for target in out.splitlines():
  2137. if (self.configuration.target_ip_address in target
  2138. and volume_name in target):
  2139. return target
  2140. return None
  2141. def _get_iscsi_properties(self, volume, multipath=False):
  2142. """Gets iscsi configuration
  2143. We ideally get saved information in the volume entity, but fall back
  2144. to discovery if need be. Discovery may be completely removed in future
  2145. The properties are:
  2146. :target_discovered: boolean indicating whether discovery was used
  2147. :target_iqn: the IQN of the iSCSI target
  2148. :target_portal: the portal of the iSCSI target
  2149. :target_lun: the lun of the iSCSI target
  2150. :volume_id: the id of the volume (currently used by xen)
  2151. :auth_method:, :auth_username:, :auth_password:
  2152. the authentication details. Right now, either auth_method is not
  2153. present meaning no authentication, or auth_method == `CHAP`
  2154. meaning use CHAP with the specified credentials.
  2155. :discard: boolean indicating if discard is supported
  2156. In some of drivers that support multiple connections (for multipath
  2157. and for single path with failover on connection failure), it returns
  2158. :target_iqns, :target_portals, :target_luns, which contain lists of
  2159. multiple values. The main portal information is also returned in
  2160. :target_iqn, :target_portal, :target_lun for backward compatibility.
  2161. Note that some of drivers don't return :target_portals even if they
  2162. support multipath. Then the connector should use sendtargets discovery
  2163. to find the other portals if it supports multipath.
  2164. """
  2165. properties = {}
  2166. location = volume['provider_location']
  2167. if location:
  2168. # provider_location is the same format as iSCSI discovery output
  2169. properties['target_discovered'] = False
  2170. else:
  2171. location = self._do_iscsi_discovery(volume)
  2172. if not location:
  2173. msg = (_("Could not find iSCSI export for volume %s") %
  2174. (volume['name']))
  2175. raise exception.InvalidVolume(reason=msg)
  2176. LOG.debug("ISCSI Discovery: Found %s", location)
  2177. properties['target_discovered'] = True
  2178. results = location.split(" ")
  2179. portals = results[0].split(",")[0].split(";")
  2180. iqn = results[1]
  2181. nr_portals = len(portals)
  2182. try:
  2183. lun = int(results[2])
  2184. except (IndexError, ValueError):
  2185. if (self.configuration.volume_driver ==
  2186. 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and
  2187. self.configuration.target_helper == 'tgtadm'):
  2188. lun = 1
  2189. else:
  2190. lun = 0
  2191. if nr_portals > 1:
  2192. properties['target_portals'] = portals
  2193. properties['target_iqns'] = [iqn] * nr_portals
  2194. properties['target_luns'] = [lun] * nr_portals
  2195. properties['target_portal'] = portals[0]
  2196. properties['target_iqn'] = iqn
  2197. properties['target_lun'] = lun
  2198. properties['volume_id'] = volume['id']
  2199. auth = volume['provider_auth']
  2200. if auth:
  2201. (auth_method, auth_username, auth_secret) = auth.split()
  2202. properties['auth_method'] = auth_method
  2203. properties['auth_username'] = auth_username
  2204. properties['auth_password'] = auth_secret
  2205. geometry = volume.get('provider_geometry', None)
  2206. if geometry:
  2207. (physical_block_size, logical_block_size) = geometry.split()
  2208. properties['physical_block_size'] = physical_block_size
  2209. properties['logical_block_size'] = logical_block_size
  2210. encryption_key_id = volume.get('encryption_key_id', None)
  2211. properties['encrypted'] = encryption_key_id is not None
  2212. return properties
  2213. def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
  2214. check_exit_code = kwargs.pop('check_exit_code', 0)
  2215. (out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
  2216. iscsi_properties['target_iqn'],
  2217. '-p', iscsi_properties['target_portal'],
  2218. *iscsi_command, run_as_root=True,
  2219. check_exit_code=check_exit_code)
  2220. LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
  2221. {'command': iscsi_command, 'out': out, 'err': err})
  2222. return (out, err)
  2223. def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
  2224. check_exit_code = kwargs.pop('check_exit_code', 0)
  2225. (out, err) = self._execute('iscsiadm',
  2226. *iscsi_command,
  2227. run_as_root=True,
  2228. check_exit_code=check_exit_code)
  2229. LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
  2230. {'command': iscsi_command, 'out': out, 'err': err})
  2231. return (out, err)
  2232. def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
  2233. **kwargs):
  2234. iscsi_command = ('--op', 'update', '-n', property_key,
  2235. '-v', property_value)
  2236. return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
  2237. def initialize_connection(self, volume, connector):
  2238. """Initializes the connection and returns connection info.
  2239. The iscsi driver returns a driver_volume_type of 'iscsi'.
  2240. The format of the driver data is defined in _get_iscsi_properties.
  2241. Example return value::
  2242. {
  2243. 'driver_volume_type': 'iscsi',
  2244. 'data': {
  2245. 'target_discovered': True,
  2246. 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
  2247. 'target_portal': '127.0.0.0.1:3260',
  2248. 'volume_id': 1,
  2249. 'discard': False,
  2250. }
  2251. }
  2252. If the backend driver supports multiple connections for multipath and
  2253. for single path with failover, "target_portals", "target_iqns",
  2254. "target_luns" are also populated::
  2255. {
  2256. 'driver_volume_type': 'iscsi',
  2257. 'data': {
  2258. 'target_discovered': False,
  2259. 'target_iqn': 'iqn.2010-10.org.openstack:volume1',
  2260. 'target_iqns': ['iqn.2010-10.org.openstack:volume1',
  2261. 'iqn.2010-10.org.openstack:volume1-2'],
  2262. 'target_portal': '10.0.0.1:3260',
  2263. 'target_portals': ['10.0.0.1:3260', '10.0.1.1:3260'],
  2264. 'target_lun': 1,
  2265. 'target_luns': [1, 1],
  2266. 'volume_id': 1,
  2267. 'discard': False,
  2268. }
  2269. }
  2270. """
  2271. # NOTE(jdg): Yes, this is duplicated in the volume/target
  2272. # drivers, for now leaving it as there are 3'rd party
  2273. # drivers that don't use target drivers, but inherit from
  2274. # this base class and use this init data
  2275. iscsi_properties = self._get_iscsi_properties(volume)
  2276. return {
  2277. 'driver_volume_type':
  2278. self.configuration.safe_get('target_protocol'),
  2279. 'data': iscsi_properties
  2280. }
  2281. def validate_connector(self, connector):
  2282. # iSCSI drivers require the initiator information
  2283. required = 'initiator'
  2284. if required not in connector:
  2285. LOG.error('The volume driver requires %(data)s '
  2286. 'in the connector.', {'data': required})
  2287. raise exception.InvalidConnectorException(missing=required)
  2288. def terminate_connection(self, volume, connector, **kwargs):
  2289. pass
  2290. def get_volume_stats(self, refresh=False):
  2291. """Get volume stats.
  2292. If 'refresh' is True, run update the stats first.
  2293. """
  2294. if refresh:
  2295. self._update_volume_stats()
  2296. return self._stats
  2297. def _update_volume_stats(self):
  2298. """Retrieve stats info from volume group."""
  2299. LOG.debug("Updating volume stats...")
  2300. data = {}
  2301. backend_name = self.configuration.safe_get('volume_backend_name')
  2302. data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
  2303. data["vendor_name"] = 'Open Source'
  2304. data["driver_version"] = '1.0'
  2305. data["storage_protocol"] = 'iSCSI'
  2306. data["pools"] = []
  2307. data["replication_enabled"] = False
  2308. self._update_pools_and_stats(data)
  2309. class ISERDriver(ISCSIDriver):
  2310. """Executes commands relating to ISER volumes.
  2311. We make use of model provider properties as follows:
  2312. ``provider_location``
  2313. if present, contains the iSER target information in the same
  2314. format as an ietadm discovery
  2315. i.e. '<ip>:<port>,<portal> <target IQN>'
  2316. ``provider_auth``
  2317. if present, contains a space-separated triple:
  2318. '<auth method> <auth username> <auth password>'.
  2319. `CHAP` is the only auth_method in use at the moment.
  2320. """
  2321. def __init__(self, *args, **kwargs):
  2322. super(ISERDriver, self).__init__(*args, **kwargs)
  2323. # for backward compatibility
  2324. self.configuration.num_volume_device_scan_tries = \
  2325. self.configuration.num_iser_scan_tries
  2326. self.configuration.target_prefix = \
  2327. self.configuration.iser_target_prefix
  2328. self.configuration.target_ip_address = \
  2329. self.configuration.iser_ip_address
  2330. self.configuration.target_port = self.configuration.iser_port
  2331. def initialize_connection(self, volume, connector):
  2332. """Initializes the connection and returns connection info.
  2333. The iser driver returns a driver_volume_type of 'iser'.
  2334. The format of the driver data is defined in _get_iser_properties.
  2335. Example return value:
  2336. .. code-block:: default
  2337. {
  2338. 'driver_volume_type': 'iser',
  2339. 'data': {
  2340. 'target_discovered': True,
  2341. 'target_iqn':
  2342. 'iqn.2010-10.org.iser.openstack:volume-00000001',
  2343. 'target_portal': '127.0.0.0.1:3260',
  2344. 'volume_id': 1,
  2345. }
  2346. }
  2347. """
  2348. iser_properties = self._get_iscsi_properties(volume)
  2349. return {
  2350. 'driver_volume_type': 'iser',
  2351. 'data': iser_properties
  2352. }
  2353. def _update_volume_stats(self):
  2354. """Retrieve stats info from volume group."""
  2355. LOG.debug("Updating volume stats...")
  2356. data = {}
  2357. backend_name = self.configuration.safe_get('volume_backend_name')
  2358. data["volume_backend_name"] = backend_name or 'Generic_iSER'
  2359. data["vendor_name"] = 'Open Source'
  2360. data["driver_version"] = '1.0'
  2361. data["storage_protocol"] = 'iSER'
  2362. data["pools"] = []
  2363. self._update_pools_and_stats(data)
  2364. class FibreChannelDriver(VolumeDriver):
  2365. """Executes commands relating to Fibre Channel volumes."""
  2366. def __init__(self, *args, **kwargs):
  2367. super(FibreChannelDriver, self).__init__(*args, **kwargs)
  2368. def initialize_connection(self, volume, connector):
  2369. """Initializes the connection and returns connection info.
  2370. The driver returns a driver_volume_type of 'fibre_channel'.
  2371. The target_wwn can be a single entry or a list of wwns that
  2372. correspond to the list of remote wwn(s) that will export the volume.
  2373. Example return values:
  2374. .. code-block:: default
  2375. {
  2376. 'driver_volume_type': 'fibre_channel',
  2377. 'data': {
  2378. 'target_discovered': True,
  2379. 'target_lun': 1,
  2380. 'target_wwn': '1234567890123',
  2381. 'discard': False,
  2382. }
  2383. }
  2384. or
  2385. .. code-block:: default
  2386. {
  2387. 'driver_volume_type': 'fibre_channel',
  2388. 'data': {
  2389. 'target_discovered': True,
  2390. 'target_lun': 1,
  2391. 'target_wwn': ['1234567890123', '0987654321321'],
  2392. 'discard': False,
  2393. }
  2394. }
  2395. """
  2396. msg = _("Driver must implement initialize_connection")
  2397. raise NotImplementedError(msg)
  2398. def validate_connector(self, connector):
  2399. """Fail if connector doesn't contain all the data needed by driver.
  2400. Do a check on the connector and ensure that it has wwnns, wwpns.
  2401. """
  2402. self.validate_connector_has_setting(connector, 'wwpns')
  2403. self.validate_connector_has_setting(connector, 'wwnns')
  2404. @staticmethod
  2405. def validate_connector_has_setting(connector, setting):
  2406. """Test for non-empty setting in connector."""
  2407. if setting not in connector or not connector[setting]:
  2408. LOG.error(
  2409. "FibreChannelDriver validate_connector failed. "
  2410. "No '%(setting)s'. Make sure HBA state is Online.",
  2411. {'setting': setting})
  2412. raise exception.InvalidConnectorException(missing=setting)
  2413. def get_volume_stats(self, refresh=False):
  2414. """Get volume stats.
  2415. If 'refresh' is True, run update the stats first.
  2416. """
  2417. if refresh:
  2418. self._update_volume_stats()
  2419. return self._stats
  2420. def _update_volume_stats(self):
  2421. """Retrieve stats info from volume group."""
  2422. LOG.debug("Updating volume stats...")
  2423. data = {}
  2424. backend_name = self.configuration.safe_get('volume_backend_name')
  2425. data["volume_backend_name"] = backend_name or 'Generic_FC'
  2426. data["vendor_name"] = 'Open Source'
  2427. data["driver_version"] = '1.0'
  2428. data["storage_protocol"] = 'FC'
  2429. data["pools"] = []
  2430. self._update_pools_and_stats(data)