refresh cinder proxy

Change-Id: Id07a76230e3752b990731f61c64e1e74461468c1
This commit is contained in:
ChiZhang 2014-12-30 20:49:36 +08:00
parent cf97fb49af
commit f2f2bec1a7
1 changed files with 1 additions and 104 deletions

View File

@ -1146,107 +1146,4 @@ class CinderProxy(manager.SchedulerDependentManager):
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
volume in openstack cascading level is just a logical data,
initialize connection has losts its meaning, so this interface here
just return a None value
"""
return None
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
volume in openstack cascading level is just a logical data,
terminate connection has losts its meaning, so this interface here
just return a None value
"""
return None
@periodic_task.periodic_task
def _report_driver_status(self, context):
"""cinder cascading driver has lots its meaning.
so driver report info is just a copy of simulation message
"""
LOG.info(_("report simulation volume driver"))
simu_location_info = 'LVMVolumeDriver:Huawei:cinder-volumes:default:0'
volume_stats = {
'pools': [{
'pool_name': 'OpenStack_Cascading',
'QoS_support': True,
'free_capacity_gb': 10240.0,
'location_info': simu_location_info,
'total_capacity_gb': 10240.0,
'reserved_percentage': 0
}],
'driver_version': '2.0.0',
'vendor_name': 'Huawei',
'volume_backend_name': 'LVM_iSCSI',
'storage_protocol': 'iSCSI'}
self.update_service_capabilities(volume_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _reset_stats(self):
LOG.info(_("Clear capabilities"))
self._last_volume_stats = []
def notification(self, context, event):
LOG.info(_("Notification {%s} received"), event)
self._reset_stats()
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_("volume %s: extending"), volume['id'])
cinderClient = self._get_cinder_cascaded_user_client(context)
# vol_ref = self.db.volume_get(context, volume_id)
# cascaded_volume_id = vol_ref['mapping_uuid']
cascaded_volume_id = \
self.volumes_mapping_cache['volumes'].get(volume_id, '')
LOG.info(_("Cascade info: extend volume cascade volume id is:%s"),
cascaded_volume_id)
cinderClient.volumes.extend(cascaded_volume_id, new_size)
LOG.info(_("Cascade info: volume %s: extended successfully"),
volume['id'])
except Exception:
LOG.exception(_("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
self.db.volume_update(context, volume['id'], {'size': int(new_size),
'status': 'extending'})
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
volume in openstack cascading level is just a logi