[Pure Storage] Add volume metadata
Add two fields to the volume metadata to expose the actual backend array the volume is created on and the actual volume name on the backend array. When multiple OpenStack clusters are using the same backend array, or if there are multiple backends that the cinder scheduler can choose from, this makes identification and location of the physical volume easier for the storage administrator. [pylint] Fix two bad member references Change-Id: Ib4135c67f57c36de31fdcd8b23976b2fafabdc91
This commit is contained in:
parent
8e3bf3af21
commit
d5cfbf41f2
@ -431,10 +431,22 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
for vol in volumes:
|
||||
if not vol.provider_id:
|
||||
vol.provider_id = self._get_vol_name(vol)
|
||||
vol_updates.append({
|
||||
'id': vol.id,
|
||||
'provider_id': self._generate_purity_vol_name(vol),
|
||||
})
|
||||
vol_name = self._generate_purity_vol_name(vol)
|
||||
if vol.metadata:
|
||||
vol_updates.append({
|
||||
'id': vol.id,
|
||||
'provider_id': vol_name,
|
||||
'metadata': {**vol.metadata,
|
||||
'array_volume_name': vol_name,
|
||||
'array_name': self._array.array_name},
|
||||
})
|
||||
else:
|
||||
vol_updates.append({
|
||||
'id': vol.id,
|
||||
'provider_id': vol_name,
|
||||
'metadata': {'array_volume_name': vol_name,
|
||||
'array_name': self._array.array_name},
|
||||
})
|
||||
return vol_updates, None
|
||||
|
||||
@pure_driver_debug_trace
|
||||
@ -529,11 +541,23 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
if self._is_vol_in_pod(purity_vol_name) or async_enabled:
|
||||
repl_status = fields.ReplicationStatus.ENABLED
|
||||
|
||||
model_update = {
|
||||
'id': volume.id,
|
||||
'provider_id': purity_vol_name,
|
||||
'replication_status': repl_status,
|
||||
}
|
||||
if not volume.metadata:
|
||||
model_update = {
|
||||
'id': volume.id,
|
||||
'provider_id': purity_vol_name,
|
||||
'replication_status': repl_status,
|
||||
'metadata': {'array_volume_name': purity_vol_name,
|
||||
'array_name': self._array.array_name}
|
||||
}
|
||||
else:
|
||||
model_update = {
|
||||
'id': volume.id,
|
||||
'provider_id': purity_vol_name,
|
||||
'replication_status': repl_status,
|
||||
'metadata': {**volume.metadata,
|
||||
'array_volume_name': purity_vol_name,
|
||||
'array_name': self._array.array_name}
|
||||
}
|
||||
return model_update
|
||||
|
||||
def _enable_async_replication_if_needed(self, array, volume):
|
||||
@ -740,7 +764,7 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
except purestorage.PureError as err:
|
||||
# Swallow any exception, just warn and continue
|
||||
LOG.warning("Disconnect on secondary array failed with"
|
||||
" message: %(msg)s", {"msg": err.text})
|
||||
" message: %(msg)s", {"msg": err.reason})
|
||||
# Now disconnect from the current array
|
||||
self._disconnect(self._get_current_array(), volume,
|
||||
connector, remove_remote_hosts=False,
|
||||
@ -1318,6 +1342,8 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
return {
|
||||
'provider_id': new_vol_name,
|
||||
'replication_status': repl_status,
|
||||
'metadata': {'array_volume_name': new_vol_name,
|
||||
'array_name': current_array.array_name},
|
||||
}
|
||||
|
||||
@pure_driver_debug_trace
|
||||
@ -2855,7 +2881,7 @@ class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver):
|
||||
except purestorage.PureError as err:
|
||||
# Swallow any exception, just warn and continue
|
||||
LOG.warning("Disconnect on sendondary array failed with"
|
||||
" message: %(msg)s", {"msg": err.text})
|
||||
" message: %(msg)s", {"msg": err.reason})
|
||||
|
||||
# Now disconnect from the current array, removing any left over
|
||||
# remote hosts that we maybe couldn't reach.
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Pure Storage driver adds volume metadata describing the backend
|
||||
array name (``array_name``) and volume name (``array_volume_name``).
|
||||
This allows easier identification and location of a cinder volume
|
||||
when multiple clusters are using the same backend arrays, or when
|
||||
the cinder scheduler has multiplae backends to choose from.
|
Loading…
Reference in New Issue
Block a user