5568b40d06
Pools reported by NFS and block drivers for CDOT and 7mode now report multiattach as a capability. NetApp cDOT and 7mode LUNs have always supported multi-attach, but their capability was not previously reported as such. Closes-Bug: #1612763 Change-Id: Ib7545438998b02fb7670df44a6486764c401c5f6
705 lines
31 KiB
Python
705 lines
31 KiB
Python
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
|
|
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
|
|
# Copyright (c) 2014 Navneet Singh. All rights reserved.
|
|
# Copyright (c) 2014 Clinton Knight. All rights reserved.
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
|
|
# Copyright (c) 2014 Bob Callaway. All rights reserved.
|
|
# Copyright (c) 2015 Tom Barron. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
"""
|
|
Volume driver for NetApp NFS storage.
|
|
"""
|
|
|
|
import os
|
|
import uuid
|
|
|
|
from oslo_log import log as logging
|
|
from oslo_service import loopingcall
|
|
from oslo_utils import excutils
|
|
import six
|
|
|
|
from cinder import exception
|
|
from cinder.i18n import _, _LE, _LI, _LW
|
|
from cinder.image import image_utils
|
|
from cinder import interface
|
|
from cinder import utils
|
|
from cinder.volume.drivers.netapp.dataontap import nfs_base
|
|
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
|
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
|
|
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
|
|
from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils
|
|
from cinder.volume.drivers.netapp import options as na_opts
|
|
from cinder.volume.drivers.netapp import utils as na_utils
|
|
from cinder.volume import utils as volume_utils
|
|
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
|
|
|
|
|
|
@interface.volumedriver
|
|
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
|
|
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
|
|
data_motion.DataMotionMixin):
|
|
"""NetApp NFS driver for Data ONTAP (Cluster-mode)."""
|
|
|
|
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
|
|
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
|
|
self.failed_over_backend_name = kwargs.get('active_backend_id')
|
|
self.failed_over = self.failed_over_backend_name is not None
|
|
self.replication_enabled = (
|
|
True if self.get_replication_backend_names(
|
|
self.configuration) else False)
|
|
|
|
def do_setup(self, context):
|
|
"""Do the customized set up on client for cluster mode."""
|
|
super(NetAppCmodeNfsDriver, self).do_setup(context)
|
|
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
|
|
|
|
# cDOT API client
|
|
self.zapi_client = cmode_utils.get_client_for_backend(
|
|
self.failed_over_backend_name or self.backend_name)
|
|
self.vserver = self.zapi_client.vserver
|
|
|
|
# Performance monitoring library
|
|
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
|
|
self.zapi_client)
|
|
|
|
# Storage service catalog
|
|
self.ssc_library = capabilities.CapabilitiesLibrary(
|
|
'nfs', self.vserver, self.zapi_client, self.configuration)
|
|
|
|
def _update_zapi_client(self, backend_name):
|
|
"""Set cDOT API client for the specified config backend stanza name."""
|
|
|
|
self.zapi_client = cmode_utils.get_client_for_backend(backend_name)
|
|
self.vserver = self.zapi_client.vserver
|
|
self.ssc_library._update_for_failover(self.zapi_client,
|
|
self._get_flexvol_to_pool_map())
|
|
ssc = self.ssc_library.get_ssc()
|
|
self.perf_library._update_for_failover(self.zapi_client, ssc)
|
|
|
|
@utils.trace_method
|
|
def check_for_setup_error(self):
|
|
"""Check that the driver is working and can communicate."""
|
|
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
|
|
self.ssc_library.check_api_permissions()
|
|
|
|
def _start_periodic_tasks(self):
|
|
"""Start recurring tasks for NetApp cDOT NFS driver."""
|
|
|
|
# Note(cknight): Run the task once in the current thread to prevent a
|
|
# race with the first invocation of _update_volume_stats.
|
|
self._update_ssc()
|
|
|
|
# Start the task that updates the slow-changing storage service catalog
|
|
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
|
|
self._update_ssc)
|
|
ssc_periodic_task.start(
|
|
interval=SSC_UPDATE_INTERVAL_SECONDS,
|
|
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
|
|
|
|
super(NetAppCmodeNfsDriver, self)._start_periodic_tasks()
|
|
|
|
def _handle_housekeeping_tasks(self):
|
|
"""Handle various cleanup activities."""
|
|
super(NetAppCmodeNfsDriver, self)._handle_housekeeping_tasks()
|
|
|
|
# Harvest soft-deleted QoS policy groups
|
|
self.zapi_client.remove_unused_qos_policy_groups()
|
|
|
|
active_backend = self.failed_over_backend_name or self.backend_name
|
|
|
|
LOG.debug("Current service state: Replication enabled: %("
|
|
"replication)s. Failed-Over: %(failed)s. Active Backend "
|
|
"ID: %(active)s",
|
|
{
|
|
'replication': self.replication_enabled,
|
|
'failed': self.failed_over,
|
|
'active': active_backend,
|
|
})
|
|
|
|
# Create pool mirrors if whole-backend replication configured
|
|
if self.replication_enabled and not self.failed_over:
|
|
self.ensure_snapmirrors(
|
|
self.configuration, self.backend_name,
|
|
self.ssc_library.get_ssc_flexvol_names())
|
|
|
|
def _do_qos_for_volume(self, volume, extra_specs, cleanup=True):
|
|
try:
|
|
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
|
|
volume, extra_specs)
|
|
self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
|
|
self._set_qos_policy_group_on_volume(volume, qos_policy_group_info)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.error(_LE("Setting QoS for %s failed"), volume['id'])
|
|
if cleanup:
|
|
LOG.debug("Cleaning volume %s", volume['id'])
|
|
self._cleanup_volume_on_failure(volume)
|
|
|
|
def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info):
|
|
if qos_policy_group_info is None:
|
|
return
|
|
qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info(
|
|
qos_policy_group_info)
|
|
if qos_policy_group_name is None:
|
|
return
|
|
target_path = '%s' % (volume['name'])
|
|
share = volume_utils.extract_host(volume['host'], level='pool')
|
|
export_path = share.split(':')[1]
|
|
flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
|
|
export_path)
|
|
self.zapi_client.file_assign_qos(flex_vol_name,
|
|
qos_policy_group_name,
|
|
target_path)
|
|
|
|
def _clone_backing_file_for_volume(self, volume_name, clone_name,
|
|
volume_id, share=None,
|
|
is_snapshot=False,
|
|
source_snapshot=None):
|
|
"""Clone backing file for Cinder volume."""
|
|
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
|
|
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
|
|
vserver, is_snapshot=is_snapshot)
|
|
|
|
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
|
|
"""Gets the vserver and export volume for share."""
|
|
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
|
|
ifs = self.zapi_client.get_if_info_by_ip(host_ip)
|
|
vserver = ifs[0].get_child_content('vserver')
|
|
exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
|
|
export_path)
|
|
return vserver, exp_volume
|
|
|
|
def _update_volume_stats(self):
|
|
"""Retrieve stats info from vserver."""
|
|
|
|
LOG.debug('Updating volume stats')
|
|
data = {}
|
|
netapp_backend = 'NetApp_NFS_Cluster_direct'
|
|
backend_name = self.configuration.safe_get('volume_backend_name')
|
|
data['volume_backend_name'] = backend_name or netapp_backend
|
|
data['vendor_name'] = 'NetApp'
|
|
data['driver_version'] = self.VERSION
|
|
data['storage_protocol'] = 'nfs'
|
|
data['pools'] = self._get_pool_stats(
|
|
filter_function=self.get_filter_function(),
|
|
goodness_function=self.get_goodness_function())
|
|
data['sparse_copy_volume'] = True
|
|
data.update(self.get_replication_backend_stats(self.configuration))
|
|
|
|
self._spawn_clean_cache_job()
|
|
self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
|
|
self._stats = data
|
|
|
|
def _get_pool_stats(self, filter_function=None, goodness_function=None):
|
|
"""Retrieve pool (Data ONTAP flexvol) stats.
|
|
|
|
Pool statistics are assembled from static driver capabilities, the
|
|
Storage Service Catalog of flexvol attributes, and real-time capacity
|
|
and controller utilization metrics. The pool name is the NFS share
|
|
path.
|
|
"""
|
|
|
|
pools = []
|
|
|
|
ssc = self.ssc_library.get_ssc()
|
|
if not ssc:
|
|
return pools
|
|
|
|
# Get up-to-date node utilization metrics just once
|
|
self.perf_library.update_performance_cache(ssc)
|
|
|
|
# Get up-to-date aggregate capacities just once
|
|
aggregates = self.ssc_library.get_ssc_aggregates()
|
|
aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates)
|
|
|
|
for ssc_vol_name, ssc_vol_info in ssc.items():
|
|
|
|
pool = dict()
|
|
|
|
# Add storage service catalog data
|
|
pool.update(ssc_vol_info)
|
|
|
|
# Add driver capabilities and config info
|
|
pool['QoS_support'] = True
|
|
pool['consistencygroup_support'] = True
|
|
pool['multiattach'] = True
|
|
|
|
# Add up-to-date capacity info
|
|
nfs_share = ssc_vol_info['pool_name']
|
|
capacity = self._get_share_capacity_info(nfs_share)
|
|
pool.update(capacity)
|
|
|
|
aggregate_name = ssc_vol_info.get('netapp_aggregate')
|
|
aggr_capacity = aggr_capacities.get(aggregate_name, {})
|
|
pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
|
|
'percent-used', 0)
|
|
|
|
# Add utilization data
|
|
utilization = self.perf_library.get_node_utilization_for_pool(
|
|
ssc_vol_name)
|
|
pool['utilization'] = na_utils.round_down(utilization)
|
|
pool['filter_function'] = filter_function
|
|
pool['goodness_function'] = goodness_function
|
|
|
|
pools.append(pool)
|
|
|
|
return pools
|
|
|
|
def _update_ssc(self):
|
|
"""Refresh the storage service catalog with the latest set of pools."""
|
|
|
|
self._ensure_shares_mounted()
|
|
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
|
|
|
|
def _get_flexvol_to_pool_map(self):
|
|
"""Get the flexvols that back all mounted shares.
|
|
|
|
The map is of the format suitable for seeding the storage service
|
|
catalog: {<flexvol_name> : {'pool_name': <share_path>}}
|
|
"""
|
|
|
|
pools = {}
|
|
vserver_addresses = self.zapi_client.get_operational_lif_addresses()
|
|
|
|
for share in self._mounted_shares:
|
|
|
|
host = share.split(':')[0]
|
|
junction_path = share.split(':')[1]
|
|
address = na_utils.resolve_hostname(host)
|
|
|
|
if address not in vserver_addresses:
|
|
msg = _LW('Address not found for NFS share %s.')
|
|
LOG.warning(msg, share)
|
|
continue
|
|
|
|
try:
|
|
flexvol = self.zapi_client.get_flexvol(
|
|
flexvol_path=junction_path)
|
|
pools[flexvol['name']] = {'pool_name': share}
|
|
except exception.VolumeBackendAPIException:
|
|
msg = _LE('Flexvol not found for NFS share %s.')
|
|
LOG.exception(msg, share)
|
|
|
|
return pools
|
|
|
|
def _shortlist_del_eligible_files(self, share, old_files):
|
|
"""Prepares list of eligible files to be deleted from cache."""
|
|
file_list = []
|
|
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
|
|
volume_id=None, share=share)
|
|
for old_file in old_files:
|
|
path = '/vol/%s/%s' % (exp_volume, old_file)
|
|
u_bytes = self.zapi_client.get_file_usage(path, vserver)
|
|
file_list.append((old_file, u_bytes))
|
|
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
|
|
return file_list
|
|
|
|
def _share_match_for_ip(self, ip, shares):
|
|
"""Returns the share that is served by ip.
|
|
|
|
Multiple shares can have same dir path but
|
|
can be served using different ips. It finds the
|
|
share which is served by ip on same nfs server.
|
|
"""
|
|
ip_vserver = self._get_vserver_for_ip(ip)
|
|
if ip_vserver and shares:
|
|
for share in shares:
|
|
ip_sh = share.split(':')[0]
|
|
sh_vserver = self._get_vserver_for_ip(ip_sh)
|
|
if sh_vserver == ip_vserver:
|
|
LOG.debug('Share match found for ip %s', ip)
|
|
return share
|
|
LOG.debug('No share match found for ip %s', ip)
|
|
return None
|
|
|
|
def _get_vserver_for_ip(self, ip):
|
|
"""Get vserver for the mentioned ip."""
|
|
try:
|
|
ifs = self.zapi_client.get_if_info_by_ip(ip)
|
|
vserver = ifs[0].get_child_content('vserver')
|
|
return vserver
|
|
except Exception:
|
|
return None
|
|
|
|
def _is_share_clone_compatible(self, volume, share):
|
|
"""Checks if share is compatible with volume to host its clone."""
|
|
flexvol_name = self._get_flexvol_name_for_share(share)
|
|
thin = self._is_volume_thin_provisioned(flexvol_name)
|
|
return (
|
|
self._share_has_space_for_clone(share, volume['size'], thin) and
|
|
self._is_share_vol_type_match(volume, share, flexvol_name)
|
|
)
|
|
|
|
def _is_volume_thin_provisioned(self, flexvol_name):
|
|
"""Checks if a flexvol is thin (sparse file or thin provisioned)."""
|
|
ssc_info = self.ssc_library.get_ssc_for_flexvol(flexvol_name)
|
|
return ssc_info.get('thin_provisioning_support') or False
|
|
|
|
def _is_share_vol_type_match(self, volume, share, flexvol_name):
|
|
"""Checks if share matches volume type."""
|
|
LOG.debug("Found volume %(vol)s for share %(share)s.",
|
|
{'vol': flexvol_name, 'share': share})
|
|
extra_specs = na_utils.get_volume_extra_specs(volume)
|
|
flexvol_names = self.ssc_library.get_matching_flexvols_for_extra_specs(
|
|
extra_specs)
|
|
return flexvol_name in flexvol_names
|
|
|
|
def _get_flexvol_name_for_share(self, nfs_share):
|
|
"""Queries the SSC for the flexvol containing an NFS share."""
|
|
ssc = self.ssc_library.get_ssc()
|
|
for ssc_vol_name, ssc_vol_info in ssc.items():
|
|
if nfs_share == ssc_vol_info.get('pool_name'):
|
|
return ssc_vol_name
|
|
return None
|
|
|
|
@utils.trace_method
|
|
def delete_volume(self, volume):
|
|
"""Deletes a logical volume."""
|
|
self._delete_backing_file_for_volume(volume)
|
|
try:
|
|
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
|
|
volume)
|
|
self.zapi_client.mark_qos_policy_group_for_deletion(
|
|
qos_policy_group_info)
|
|
except Exception:
|
|
# Don't blow up here if something went wrong de-provisioning the
|
|
# QoS policy for the volume.
|
|
pass
|
|
|
|
def _delete_backing_file_for_volume(self, volume):
|
|
"""Deletes file on nfs share that backs a cinder volume."""
|
|
try:
|
|
LOG.debug('Deleting backing file for volume %s.', volume['id'])
|
|
self._delete_file(volume['id'], volume['name'])
|
|
except Exception:
|
|
LOG.exception(_LE('Could not delete volume %s on backend, '
|
|
'falling back to exec of "rm" command.'),
|
|
volume['id'])
|
|
try:
|
|
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
|
|
except Exception:
|
|
LOG.exception(_LE('Exec of "rm" command on backing file for '
|
|
'%s was unsuccessful.'), volume['id'])
|
|
|
|
def _delete_file(self, file_id, file_name):
|
|
(_vserver, flexvol) = self._get_export_ip_path(volume_id=file_id)
|
|
path_on_backend = '/vol' + flexvol + '/' + file_name
|
|
LOG.debug('Attempting to delete file %(path)s for ID %(file_id)s on '
|
|
'backend.', {'path': path_on_backend, 'file_id': file_id})
|
|
self.zapi_client.delete_file(path_on_backend)
|
|
|
|
@utils.trace_method
|
|
def delete_snapshot(self, snapshot):
|
|
"""Deletes a snapshot."""
|
|
self._delete_backing_file_for_snapshot(snapshot)
|
|
|
|
def _delete_backing_file_for_snapshot(self, snapshot):
|
|
"""Deletes file on nfs share that backs a cinder volume."""
|
|
try:
|
|
LOG.debug('Deleting backing file for snapshot %s.', snapshot['id'])
|
|
self._delete_file(snapshot['volume_id'], snapshot['name'])
|
|
except Exception:
|
|
LOG.exception(_LE('Could not delete snapshot %s on backend, '
|
|
'falling back to exec of "rm" command.'),
|
|
snapshot['id'])
|
|
try:
|
|
# delete_file_from_share
|
|
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
|
|
except Exception:
|
|
LOG.exception(_LE('Exec of "rm" command on backing file for'
|
|
' %s was unsuccessful.'), snapshot['id'])
|
|
|
|
@utils.trace_method
|
|
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
|
"""Fetch the image from image_service and write it to the volume."""
|
|
copy_success = False
|
|
try:
|
|
major, minor = self.zapi_client.get_ontapi_version()
|
|
col_path = self.configuration.netapp_copyoffload_tool_path
|
|
# Search the local image cache before attempting copy offload
|
|
cache_result = self._find_image_in_cache(image_id)
|
|
if cache_result:
|
|
copy_success = self._copy_from_cache(volume, image_id,
|
|
cache_result)
|
|
if copy_success:
|
|
LOG.info(_LI('Copied image %(img)s to volume %(vol)s '
|
|
'using local image cache.'),
|
|
{'img': image_id, 'vol': volume['id']})
|
|
# Image cache was not present, attempt copy offload workflow
|
|
if (not copy_success and col_path and
|
|
major == 1 and minor >= 20):
|
|
LOG.debug('No result found in image cache')
|
|
self._copy_from_img_service(context, volume, image_service,
|
|
image_id)
|
|
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using'
|
|
' copy offload workflow.'),
|
|
{'img': image_id, 'vol': volume['id']})
|
|
copy_success = True
|
|
except Exception as e:
|
|
LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
|
|
finally:
|
|
if not copy_success:
|
|
super(NetAppCmodeNfsDriver, self).copy_image_to_volume(
|
|
context, volume, image_service, image_id)
|
|
|
|
def _get_ip_verify_on_cluster(self, host):
|
|
"""Verifies if host on same cluster and returns ip."""
|
|
ip = na_utils.resolve_hostname(host)
|
|
vserver = self._get_vserver_for_ip(ip)
|
|
if not vserver:
|
|
raise exception.NotFound(_("Unable to locate an SVM that is "
|
|
"managing the IP address '%s'") % ip)
|
|
return ip
|
|
|
|
def _copy_from_cache(self, volume, image_id, cache_result):
|
|
"""Try copying image file_name from cached file_name."""
|
|
LOG.debug("Trying copy from cache using copy offload.")
|
|
copied = False
|
|
cache_copy, found_local = self._find_image_location(cache_result,
|
|
volume['id'])
|
|
|
|
try:
|
|
if found_local:
|
|
(nfs_share, file_name) = cache_copy
|
|
self._clone_file_dst_exists(
|
|
nfs_share, file_name, volume['name'], dest_exists=True)
|
|
LOG.debug("Copied image from cache to volume %s using "
|
|
"cloning.", volume['id'])
|
|
copied = True
|
|
elif cache_copy:
|
|
self._copy_from_remote_cache(volume, image_id, cache_copy)
|
|
copied = True
|
|
|
|
if copied:
|
|
self._post_clone_image(volume)
|
|
|
|
except Exception as e:
|
|
LOG.exception(_LE('Error in workflow copy from cache. %s.'), e)
|
|
return copied
|
|
|
|
def _find_image_location(self, cache_result, volume_id):
|
|
"""Finds the location of a cached image.
|
|
|
|
Returns image location local to the NFS share, that matches the
|
|
volume_id, if it exists. Otherwise returns the last entry in
|
|
cache_result or None if cache_result is empty.
|
|
"""
|
|
|
|
found_local_copy = False
|
|
cache_copy = None
|
|
provider_location = self._get_provider_location(volume_id)
|
|
for res in cache_result:
|
|
(share, file_name) = res
|
|
if share == provider_location:
|
|
cache_copy = res
|
|
found_local_copy = True
|
|
break
|
|
else:
|
|
cache_copy = res
|
|
return cache_copy, found_local_copy
|
|
|
|
def _copy_from_remote_cache(self, volume, image_id, cache_copy):
|
|
"""Copies the remote cached image to the provided volume.
|
|
|
|
Executes the copy offload binary which copies the cached image to
|
|
the destination path of the provided volume. Also registers the new
|
|
copy of the image as a cached image.
|
|
"""
|
|
|
|
(nfs_share, file_name) = cache_copy
|
|
col_path = self.configuration.netapp_copyoffload_tool_path
|
|
src_ip, src_path = self._get_source_ip_and_path(nfs_share, file_name)
|
|
dest_ip, dest_path = self._get_destination_ip_and_path(volume)
|
|
|
|
# Always run copy offload as regular user, it's sufficient
|
|
# and rootwrap doesn't allow copy offload to run as root anyways.
|
|
self._execute(col_path, src_ip, dest_ip, src_path, dest_path,
|
|
run_as_root=False, check_exit_code=0)
|
|
self._register_image_in_cache(volume, image_id)
|
|
LOG.debug("Copied image from cache to volume %s using copy offload.",
|
|
volume['id'])
|
|
|
|
def _get_source_ip_and_path(self, nfs_share, file_name):
|
|
src_ip = self._get_ip_verify_on_cluster(nfs_share.split(':')[0])
|
|
src_path = os.path.join(nfs_share.split(':')[1], file_name)
|
|
return src_ip, src_path
|
|
|
|
def _get_destination_ip_and_path(self, volume):
|
|
dest_ip = self._get_ip_verify_on_cluster(
|
|
self._get_host_ip(volume['id']))
|
|
dest_path = os.path.join(self._get_export_path(
|
|
volume['id']), volume['name'])
|
|
return dest_ip, dest_path
|
|
|
|
def _clone_file_dst_exists(self, share, src_name, dst_name,
|
|
dest_exists=False):
|
|
"""Clone file even if dest exists."""
|
|
(vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
|
|
self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
|
|
dest_exists=dest_exists)
|
|
|
|
def _copy_from_img_service(self, context, volume, image_service,
|
|
image_id):
|
|
"""Copies from the image service using copy offload."""
|
|
LOG.debug("Trying copy from image service using copy offload.")
|
|
image_loc = image_service.get_location(context, image_id)
|
|
locations = self._construct_image_nfs_url(image_loc)
|
|
src_ip = None
|
|
selected_loc = None
|
|
# this will match the first location that has a valid IP on cluster
|
|
for location in locations:
|
|
conn, dr = self._check_get_nfs_path_segs(location)
|
|
if conn:
|
|
try:
|
|
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
|
|
selected_loc = location
|
|
break
|
|
except exception.NotFound:
|
|
pass
|
|
if src_ip is None:
|
|
raise exception.NotFound(_("Source host details not found."))
|
|
(__, ___, img_file) = selected_loc.rpartition('/')
|
|
src_path = os.path.join(dr, img_file)
|
|
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
|
|
volume['id']))
|
|
# tmp file is required to deal with img formats
|
|
tmp_img_file = six.text_type(uuid.uuid4())
|
|
col_path = self.configuration.netapp_copyoffload_tool_path
|
|
img_info = image_service.show(context, image_id)
|
|
dst_share = self._get_provider_location(volume['id'])
|
|
self._check_share_can_hold_size(dst_share, img_info['size'])
|
|
run_as_root = self._execute_as_root
|
|
|
|
dst_dir = self._get_mount_point_for_share(dst_share)
|
|
dst_img_local = os.path.join(dst_dir, tmp_img_file)
|
|
try:
|
|
# If src and dst share not equal
|
|
if (('%s:%s' % (src_ip, dr)) !=
|
|
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
|
|
dst_img_serv_path = os.path.join(
|
|
self._get_export_path(volume['id']), tmp_img_file)
|
|
# Always run copy offload as regular user, it's sufficient
|
|
# and rootwrap doesn't allow copy offload to run as root
|
|
# anyways.
|
|
self._execute(col_path, src_ip, dst_ip, src_path,
|
|
dst_img_serv_path, run_as_root=False,
|
|
check_exit_code=0)
|
|
else:
|
|
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
|
|
self._discover_file_till_timeout(dst_img_local, timeout=120)
|
|
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
|
|
{'img': image_id, 'tmp': tmp_img_file})
|
|
dst_img_cache_local = os.path.join(dst_dir,
|
|
'img-cache-%s' % image_id)
|
|
if img_info['disk_format'] == 'raw':
|
|
LOG.debug('Image is raw %s.', image_id)
|
|
self._clone_file_dst_exists(dst_share, tmp_img_file,
|
|
volume['name'], dest_exists=True)
|
|
self._move_nfs_file(dst_img_local, dst_img_cache_local)
|
|
LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
|
|
{'img': image_id, 'vol': volume['id']})
|
|
else:
|
|
LOG.debug('Image will be converted to raw %s.', image_id)
|
|
img_conv = six.text_type(uuid.uuid4())
|
|
dst_img_conv_local = os.path.join(dst_dir, img_conv)
|
|
|
|
# Checking against image size which is approximate check
|
|
self._check_share_can_hold_size(dst_share, img_info['size'])
|
|
try:
|
|
image_utils.convert_image(dst_img_local,
|
|
dst_img_conv_local, 'raw',
|
|
run_as_root=run_as_root)
|
|
data = image_utils.qemu_img_info(dst_img_conv_local,
|
|
run_as_root=run_as_root)
|
|
if data.file_format != "raw":
|
|
raise exception.InvalidResults(
|
|
_("Converted to raw, but format is now %s.")
|
|
% data.file_format)
|
|
else:
|
|
self._clone_file_dst_exists(dst_share, img_conv,
|
|
volume['name'],
|
|
dest_exists=True)
|
|
self._move_nfs_file(dst_img_conv_local,
|
|
dst_img_cache_local)
|
|
LOG.debug('Copied locally converted raw image'
|
|
' %(img)s to volume %(vol)s.',
|
|
{'img': image_id, 'vol': volume['id']})
|
|
finally:
|
|
if os.path.exists(dst_img_conv_local):
|
|
self._delete_file_at_path(dst_img_conv_local)
|
|
self._post_clone_image(volume)
|
|
finally:
|
|
if os.path.exists(dst_img_local):
|
|
self._delete_file_at_path(dst_img_local)
|
|
|
|
@utils.trace_method
|
|
def unmanage(self, volume):
|
|
"""Removes the specified volume from Cinder management.
|
|
|
|
Does not delete the underlying backend storage object. A log entry
|
|
will be made to notify the Admin that the volume is no longer being
|
|
managed.
|
|
|
|
:param volume: Cinder volume to unmanage
|
|
"""
|
|
try:
|
|
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
|
|
volume)
|
|
self.zapi_client.mark_qos_policy_group_for_deletion(
|
|
qos_policy_group_info)
|
|
except Exception:
|
|
# Unmanage even if there was a problem deprovisioning the
|
|
# associated qos policy group.
|
|
pass
|
|
|
|
super(NetAppCmodeNfsDriver, self).unmanage(volume)
|
|
|
|
def failover_host(self, context, volumes, secondary_id=None):
|
|
"""Failover a backend to a secondary replication target."""
|
|
|
|
return self._failover_host(volumes, secondary_id=secondary_id)
|
|
|
|
def _get_backing_flexvol_names(self, hosts):
|
|
"""Returns a set of flexvol names."""
|
|
flexvols = set()
|
|
ssc = self.ssc_library.get_ssc()
|
|
|
|
for host in hosts:
|
|
pool_name = volume_utils.extract_host(host, level='pool')
|
|
|
|
for flexvol_name, ssc_volume_data in ssc.items():
|
|
if ssc_volume_data['pool_name'] == pool_name:
|
|
flexvols.add(flexvol_name)
|
|
|
|
return flexvols
|
|
|
|
@utils.trace_method
|
|
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
|
|
"""Delete files backing each snapshot in the cgsnapshot.
|
|
|
|
:return: An implicit update of snapshot models that the manager will
|
|
interpret and subsequently set the model state to deleted.
|
|
"""
|
|
for snapshot in snapshots:
|
|
self._delete_backing_file_for_snapshot(snapshot)
|
|
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
|
|
|
|
return None, None
|