diff --git a/cinder/tests/test_netapp.py b/cinder/tests/test_netapp.py index bba3a197fe9..dd5186d571b 100644 --- a/cinder/tests/test_netapp.py +++ b/cinder/tests/test_netapp.py @@ -545,7 +545,8 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase): def _custom_setup(self): self.stubs.Set( - ssc_utils, 'refresh_cluster_ssc', lambda a, b, c: None) + ssc_utils, 'refresh_cluster_ssc', + lambda a, b, c, synchronous: None) configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) self.stubs.Set(httplib, 'HTTPConnection', diff --git a/cinder/volume/drivers/netapp/iscsi.py b/cinder/volume/drivers/netapp/iscsi.py index e5440465bb1..be5135503ed 100644 --- a/cinder/volume/drivers/netapp/iscsi.py +++ b/cinder/volume/drivers/netapp/iscsi.py @@ -781,7 +781,6 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): self.client.set_api_version(major, minor) self.ssc_vols = None self.stale_vols = set() - ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver) def _create_lun_on_eligible_vol(self, name, size, metadata, extra_specs=None): @@ -1057,6 +1056,9 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): def _update_cluster_vol_stats(self, data): """Updates vol stats with cluster config.""" + sync = True if self.ssc_vols is None else False + ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver, + synchronous=sync) if self.ssc_vols: data['netapp_mirrored'] = 'true'\ if self.ssc_vols['mirrored'] else 'false' @@ -1090,7 +1092,6 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): data['free_capacity_gb'] = 0 else: LOG.warn(_("Cluster ssc is not updated. No volume stats found.")) - ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver) @utils.synchronized('update_stale') def _update_stale_vols(self, volume=None, reset=False): diff --git a/cinder/volume/drivers/netapp/nfs.py b/cinder/volume/drivers/netapp/nfs.py index 602a1dcc20d..6f826979e53 100644 --- a/cinder/volume/drivers/netapp/nfs.py +++ b/cinder/volume/drivers/netapp/nfs.py @@ -714,7 +714,6 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): self.ssc_enabled = True LOG.info(_("Shares on vserver %s will only" " be used for provisioning.") % (self.vserver)) - ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver) else: self.ssc_enabled = False LOG.warn(_("No vserver set in config. SSC will be disabled.")) @@ -881,6 +880,12 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): def _update_cluster_vol_stats(self, data): """Updates vol stats with cluster config.""" + if self.ssc_enabled: + sync = True if self.ssc_vols is None else False + ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver, + synchronous=sync) + else: + LOG.warn(_("No vserver set in config. SSC will be disabled.")) if self.ssc_vols: data['netapp_mirrored'] = 'true'\ if self.ssc_vols['mirrored'] else 'false' @@ -914,10 +919,6 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): elif self.ssc_enabled: LOG.warn(_("No cluster ssc stats found." " Wait for next volume stats update.")) - if self.ssc_enabled: - ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver) - else: - LOG.warn(_("No vserver set in config. SSC will be disabled.")) @utils.synchronized('update_stale') def _update_stale_vols(self, volume=None, reset=False): diff --git a/cinder/volume/drivers/netapp/ssc_utils.py b/cinder/volume/drivers/netapp/ssc_utils.py index d03d61efe4a..1343d4f4f71 100644 --- a/cinder/volume/drivers/netapp/ssc_utils.py +++ b/cinder/volume/drivers/netapp/ssc_utils.py @@ -434,6 +434,9 @@ def refresh_cluster_stale_ssc(*args, **kwargs): vol_set = ssc_vols_copy[k] vol_set.discard(vol) backend.refresh_ssc_vols(ssc_vols_copy) + LOG.info(_('Successfully completed stale refresh job for' + ' %(server)s and vserver %(vs)s') + % {'server': na_server, 'vs': vserver}) refresh_stale_ssc() finally: @@ -464,13 +467,16 @@ def get_cluster_latest_ssc(*args, **kwargs): ssc_vols = get_cluster_ssc(na_server, vserver) backend.refresh_ssc_vols(ssc_vols) backend.ssc_run_time = timeutils.utcnow() + LOG.info(_('Successfully completed ssc job for %(server)s' + ' and vserver %(vs)s') + % {'server': na_server, 'vs': vserver}) get_latest_ssc() finally: na_utils.set_safe_attr(backend, 'ssc_job_running', False) -def refresh_cluster_ssc(backend, na_server, vserver): +def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False): """Refresh cluster ssc for backend.""" if not isinstance(backend, driver.VolumeDriver): raise exception.InvalidInput(reason=_("Backend not a VolumeDriver.")) @@ -483,17 +489,23 @@ def refresh_cluster_ssc(backend, na_server, vserver): elif (getattr(backend, 'ssc_run_time', None) is None or (backend.ssc_run_time and timeutils.is_newer_than(backend.ssc_run_time, delta_secs))): - t = Timer(0, get_cluster_latest_ssc, - args=[backend, na_server, vserver]) - t.start() + if synchronous: + get_cluster_latest_ssc(backend, na_server, vserver) + else: + t = Timer(0, get_cluster_latest_ssc, + args=[backend, na_server, vserver]) + t.start() elif getattr(backend, 'refresh_stale_running', None): LOG.warn(_('refresh stale ssc job in progress. Returning... ')) return else: if backend.stale_vols: - t = Timer(0, refresh_cluster_stale_ssc, - args=[backend, na_server, vserver]) - t.start() + if synchronous: + refresh_cluster_stale_ssc(backend, na_server, vserver) + else: + t = Timer(0, refresh_cluster_stale_ssc, + args=[backend, na_server, vserver]) + t.start() def get_volumes_for_specs(ssc_vols, specs):