Enforce driver is initialized

This updates the volume manager and the
backup manager to enforce that the driver
has been initialized.

The managers call the driver in many places
without ensuring that the driver has been
properly initialized.   When the driver fails
inside of do_setup(), the managers shouldn't
call the driver's set_initialized() method.
The managers now dump out the exception, and
exit the init_host, leaving the driver in
an uninitialized state.

Fixes bug #1225897

Change-Id: I77b947f2a9fbe1b38f321511dba10fcd2fe1fe90
This commit is contained in:
Walter A. Boring IV 2013-09-16 17:05:27 -07:00
parent 9479e27761
commit 2177cbbe06
9 changed files with 71 additions and 7 deletions

View File

@ -41,6 +41,7 @@ from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
@ -95,8 +96,22 @@ class BackupManager(manager.SchedulerDependentManager):
"""
ctxt = context.get_admin_context()
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
self.driver.set_initialized()
LOG.info(_("Cleaning up incomplete backup operations"))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
@ -131,6 +146,7 @@ class BackupManager(manager.SchedulerDependentManager):
LOG.info(_('Resuming delete on backup: %s') % backup['id'])
self.delete_backup(ctxt, backup['id'])
@utils.require_driver_initialized
def create_backup(self, context, backup_id):
"""Create volume backups using configured backup service."""
backup = self.db.backup_get(context, backup_id)
@ -186,11 +202,13 @@ class BackupManager(manager.SchedulerDependentManager):
self.az})
LOG.info(_('create_backup finished. backup: %s'), backup_id)
@utils.require_driver_initialized
def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_('restore_backup started, restoring backup: %(backup_id)s'
' to volume: %(volume_id)s') %
{'backup_id': backup_id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id)
volume = self.db.volume_get(context, volume_id)
self.db.backup_update(context, backup_id, {'host': self.host})
@ -256,10 +274,11 @@ class BackupManager(manager.SchedulerDependentManager):
' to volume: %(volume_id)s') %
{'backup_id': backup_id, 'volume_id': volume_id})
@utils.require_driver_initialized
def delete_backup(self, context, backup_id):
"""Delete volume backup from configured backup service."""
backup = self.db.backup_get(context, backup_id)
LOG.info(_('delete_backup started, backup: %s'), backup_id)
backup = self.db.backup_get(context, backup_id)
self.db.backup_update(context, backup_id, {'host': self.host})
expected_status = 'deleting'

View File

@ -126,6 +126,10 @@ class ImageNotAuthorized(CinderException):
message = _("Not authorized for image %(image_id)s.")
class DriverNotInitialized(CinderException):
message = _("Volume driver '%(driver)s' not initialized.")
class Invalid(CinderException):
message = _("Unacceptable parameters.")
code = 400

View File

@ -51,6 +51,7 @@ class BackupTestCase(test.TestCase):
importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
def tearDown(self):
super(BackupTestCase, self).tearDown()

View File

@ -86,6 +86,7 @@ class GPFSDriverTestCase(test.TestCase):
gpfs_mount_point_base=self.volumes_path)
self.volume = importutils.import_object(CONF.volume_manager)
self.volume.driver.set_execute(self._execute_wrapper)
self.volume.driver.set_initialized()
self.stubs.Set(GPFSDriver, '_create_gpfs_snap',
self._fake_gpfs_snap)

View File

@ -102,6 +102,7 @@ class RBDTestCase(test.TestCase):
configuration=self.configuration,
rados=self.rados,
rbd=self.rbd)
self.driver.set_initialized()
def test_create_volume(self):
name = u'volume-00000001'
@ -498,6 +499,7 @@ class ManagedRBDTestCase(DriverTestCase):
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
fake_image.stub_out_image_service(self.stubs)
self.volume.driver.set_initialized()
def _clone_volume_from_image(self, expected_status,
clone_works=True):

View File

@ -105,6 +105,7 @@ class BaseVolumeTestCase(test.TestCase):
test_notifier.NOTIFICATIONS = []
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.volume.driver.set_initialized()
def tearDown(self):
try:
@ -1959,6 +1960,7 @@ class DriverTestCase(test.TestCase):
"""Fake _execute."""
return self.output, None
self.volume.driver.set_execute(_fake_execute)
self.volume.driver.set_initialized()
def tearDown(self):
try:

View File

@ -792,3 +792,14 @@ def brick_get_connector(protocol, driver=None,
driver=driver,
execute=execute,
use_multipath=use_multipath)
def require_driver_initialized(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
raise exception.DriverNotInitialized(driver=driver_name)
return func(self, *args, **kwargs)
return wrapper

View File

@ -1439,6 +1439,12 @@ class CreateVolumeFromSpecTask(base.CinderTask):
return self.driver.create_volume(volume_ref)
def __call__(self, context, volume_ref, volume_spec):
# we can't do anything if the driver didn't init
if not self.driver.initialized:
LOG.error(_("Unable to create volume, driver not initialized"))
driver_name = self.driver.__class__.__name__
raise exception.DriverNotInitialized(driver=driver_name)
create_type = volume_spec.pop('type', None)
create_functor = self._create_func_mapping.get(create_type)
if not create_functor:

View File

@ -169,10 +169,14 @@ class VolumeManager(manager.SchedulerDependentManager):
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
except Exception as ex:
LOG.error(_("Error encountered during "
"initialization of driver: %s"),
self.driver.__class__.__name__)
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
@ -198,6 +202,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
@utils.require_driver_initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
@ -227,6 +232,7 @@ class VolumeManager(manager.SchedulerDependentManager):
self._reset_stats()
return volume_id
@utils.require_driver_initialized
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
@ -304,12 +310,14 @@ class VolumeManager(manager.SchedulerDependentManager):
return True
@utils.require_driver_initialized
def create_snapshot(self, context, volume_id, snapshot_id):
"""Creates and exports the snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "create.start")
@ -352,12 +360,14 @@ class VolumeManager(manager.SchedulerDependentManager):
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
@utils.require_driver_initialized
def delete_snapshot(self, context, snapshot_id):
"""Deletes and unexports snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
project_id = snapshot_ref['project_id']
LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "delete.start")
@ -412,6 +422,7 @@ class VolumeManager(manager.SchedulerDependentManager):
QUOTAS.commit(context, reservations, project_id=project_id)
return True
@utils.require_driver_initialized
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached"""
@ -484,6 +495,7 @@ class VolumeManager(manager.SchedulerDependentManager):
mountpoint)
return do_attach()
@utils.require_driver_initialized
def detach_volume(self, context, volume_id):
"""Updates db to show volume is detached"""
# TODO(vish): refactor this into a more general "unreserve"
@ -508,6 +520,7 @@ class VolumeManager(manager.SchedulerDependentManager):
volume['name'] not in volume['provider_location']):
self.driver.ensure_export(context, volume)
@utils.require_driver_initialized
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
@ -538,6 +551,7 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
@utils.require_driver_initialized
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
@ -604,6 +618,7 @@ class VolumeManager(manager.SchedulerDependentManager):
conn_info['data']['access_mode'] = access_mode
return conn_info
@utils.require_driver_initialized
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
@ -612,6 +627,7 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref = self.db.volume_get(context, volume_id)
self.driver.terminate_connection(volume_ref, connector, force=force)
@utils.require_driver_initialized
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
@ -708,6 +724,7 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.volume_update(ctxt, volume_id, {'migration_status': None})
return volume['id']
@utils.require_driver_initialized
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):
"""Migrate the volume to the specified host (called on source host)."""
volume_ref = self.db.volume_get(ctxt, volume_id)
@ -753,7 +770,7 @@ class VolumeManager(manager.SchedulerDependentManager):
def _report_driver_status(self, context):
LOG.info(_("Updating volume status"))
if not self.driver.initialized:
LOG.warning(_('Unabled to update stats, driver is '
LOG.warning(_('Unable to update stats, driver is '
'uninitialized'))
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
@ -793,6 +810,7 @@ class VolumeManager(manager.SchedulerDependentManager):
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@utils.require_driver_initialized
def extend_volume(self, context, volume_id, new_size):
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']