Move vol_db_empty to NFS driver

Check for vol_db_empty is needed only for NFS-based drivers. That's why
I moved these checks out from volume manager.

Change-Id: I11d29948b9850734278bf66b790ffb214f8e4872
This commit is contained in:
Ivan Kolodyazhny 2017-06-18 15:50:10 +03:00
parent c1dbabaddb
commit 9b81659128
4 changed files with 35 additions and 24 deletions

View File

@ -46,6 +46,9 @@ class FakeDb(object):
"""Mock this if you want results from it."""
return []
def volume_get_all(self, *a, **kw):
return []
class QuobyteDriverTestCase(test.TestCase):
"""Test case for Quobyte driver."""

View File

@ -53,12 +53,15 @@ class WindowsSmbFsTestCase(test.TestCase):
_FAKE_VOLUME_NAME)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
@mock.patch.object(remotefs.RemoteFSDriver,
'_check_if_volume_db_is_empty')
@mock.patch.object(smbfs, 'utilsfactory')
@mock.patch.object(smbfs, 'remotefs_brick')
def setUp(self, mock_remotefs, mock_utilsfactory):
def setUp(self, mock_remotefs, mock_utilsfactory, mock_is_empty):
super(WindowsSmbFsTestCase, self).setUp()
self.context = context.get_admin_context()
mock_is_empty.return_value = None
self._FAKE_SMBFS_CONFIG = mock.MagicMock(
smbfs_oversub_ratio = 2,

View File

@ -29,6 +29,7 @@ from oslo_utils import units
import six
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
@ -148,7 +149,7 @@ class RemoteFSDriver(driver.BaseVD):
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
self._is_voldb_empty_at_startup = self._check_if_volume_db_is_empty()
self._supports_encryption = False
# We let the drivers inheriting this specify
@ -159,6 +160,32 @@ class RemoteFSDriver(driver.BaseVD):
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
if not self.db:
return False
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info("Determined volume DB was empty at startup.")
return True
else:
LOG.info("Determined volume DB was not empty at startup.")
return False
def _check_if_volume_db_is_empty(self):
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
return vol_db_empty
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass

View File

@ -216,10 +216,6 @@ class VolumeManager(manager.CleanableManager,
"configuration to the new path.", volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
@ -251,7 +247,6 @@ class VolumeManager(manager.CleanableManager,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
@ -341,23 +336,6 @@ class VolumeManager(manager.CleanableManager,
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info("Determined volume DB was empty at startup.")
return True
else:
LOG.info("Determined volume DB was not empty at startup.")
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in