Refactor volume status in managing vol

This feature introduce 'managing' and 'error_managing'
status into managing process and 'error_managing_deleting'
status into deleting processto to fix the quota decrease
issue when some exception is raised in c-vol. If volume is
in error_managing, quota wouldn't be decreased
when deleting this volume. But we still expose
'creating','error' and 'deleting' status to user for API
compatibility.

Change-Id: I5887c5f2ded6d6a18f497a018d5bf6105bc5afd7
Closes-Bug: #1504007
Implements: blueprint refactor-volume-status-in-managing-vol
This commit is contained in:
wanghao 2016-06-23 16:04:35 +08:00 committed by wanghao
parent 89f369ce61
commit efd1f5c7eb
13 changed files with 235 additions and 34 deletions

View File

@ -44,7 +44,9 @@ class AdminController(wsgi.Controller):
'available',
'deleting',
'error',
'error_deleting', ])
'error_deleting',
'error_managing',
'managing', ])
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)

View File

@ -49,12 +49,25 @@ class ViewBuilder(common.ViewBuilder):
},
}
def _get_volume_status(self, volume):
# NOTE(wanghao): for fixing bug 1504007, we introduce 'managing',
# 'error_managing' and 'error_managing_deleting' status into managing
# process, but still expose 'creating' and 'error' and 'deleting'
# status to user for API compatibility.
status_map = {
'managing': 'creating',
'error_managing': 'error',
'error_managing_deleting': 'deleting',
}
vol_status = volume.get('status')
return status_map.get(vol_status, vol_status)
def detail(self, request, volume):
"""Detailed view of a single volume."""
volume_ref = {
'volume': {
'id': volume.get('id'),
'status': volume.get('status'),
'status': self._get_volume_status(volume),
'size': volume.get('size'),
'availability_zone': volume.get('availability_zone'),
'created_at': volume.get('created_at'),

View File

@ -303,7 +303,7 @@ class SchedulerManager(manager.Manager):
volume = objects.Volume.get_by_id(context, volume_id)
def _manage_existing_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error'}}
volume_state = {'volume_state': {'status': 'error_managing'}}
self._set_volume_state_and_notify('manage_existing', volume_state,
context, ex, request_spec)
@ -346,6 +346,9 @@ class SchedulerManager(manager.Manager):
if volume_id:
db.volume_update(context, volume_id, volume_state)
if volume_state.get('status') == 'error_managing':
volume_state['status'] = 'error'
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,

View File

@ -23,6 +23,7 @@ except ImportError:
from urllib.parse import urlencode
import webob
from cinder.api.openstack import api_version_request as api_version
from cinder import context
from cinder import exception
from cinder import test
@ -41,6 +42,14 @@ def app():
return mapper
def app_v3():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v3'] = api
return mapper
def service_get(context, host, binary):
"""Replacement for Service.service_get_by_host_and_topic.
@ -111,6 +120,12 @@ def api_manage(*args, **kwargs):
return fake_volume.fake_volume_obj(ctx, **vol)
def api_manage_new(*args, **kwargs):
volume = api_manage()
volume.status = 'managing'
return volume
def api_get_manageable_volumes(*args, **kwargs):
"""Replacement for cinder.volume.api.API.get_manageable_volumes."""
vols = [
@ -168,6 +183,18 @@ class VolumeManageTest(test.TestCase):
res = req.get_response(app())
return res
def _get_resp_post_v3(self, body, version):
"""Helper to execute a POST os-volume-manage API call."""
req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = self._admin_ctxt
req.headers["OpenStack-API-Version"] = "volume " + version
req.api_version_request = api_version.APIVersionRequest(version)
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app_v3())
return res
@mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
@ -372,3 +399,26 @@ class VolumeManageTest(test.TestCase):
self.assertEqual(exception.ServiceUnavailable.message,
res.json['badRequest']['message'])
self.assertTrue(mock_is_up.called)
@mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage_new)
def test_manage_volume_with_creating_status_in_v3(self, mock_api_manage):
"""Test managing volume to return 'creating' status in V3 API."""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref'}}
res = self._get_resp_post_v3(body, '3.15')
self.assertEqual(202, res.status_int)
self.assertEqual(1, mock_api_manage.call_count)
self.assertEqual('creating',
jsonutils.loads(res.body)['volume']['status'])
@mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage_new)
def test_manage_volume_with_creating_status_in_v2(self, mock_api_manage):
"""Test managing volume to return 'creating' status in V2 API."""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(202, res.status_int)
self.assertEqual(1, mock_api_manage.call_count)
self.assertEqual('creating',
jsonutils.loads(res.body)['volume']['status'])

View File

@ -1379,6 +1379,20 @@ class VolumeApiTest(test.TestCase):
res_dict = self.controller.show(req, fake.VOLUME_ID)
self.assertEqual(False, res_dict['volume']['encrypted'])
def test_volume_show_with_error_managing_deleting(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = stubs.stub_volume(volume_id,
status='error_managing_deleting')
return fake_volume.fake_volume_obj(context, **vol)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
res_dict = self.controller.show(req, fake.VOLUME_ID)
self.assertEqual('deleting', res_dict['volume']['status'])
def test_volume_delete(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)

View File

@ -4690,6 +4690,103 @@ class VolumeTestCase(BaseVolumeTestCase):
'manage_existing.end',
host=test_vol.host)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'manage_existing_get_size')
@mock.patch('cinder.volume.flows.manager.manage_existing.'
'ManageExistingTask.execute')
def test_manage_volume_raise_driver_exception(self, mock_execute,
mock_driver_get_size):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'type1')
# create source volume
self.volume_params['volume_type_id'] = vol_type['id']
self.volume_params['status'] = 'managing'
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
mock_execute.side_effect = exception.VolumeBackendAPIException(
data="volume driver got exception")
mock_driver_get_size.return_value = 1
# Set quota usage
reserve_opts = {'volumes': 1, 'gigabytes': 1}
reservations = QUOTAS.reserve(self.context, project_id=project_id,
**reserve_opts)
QUOTAS.commit(self.context, reservations)
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use = usage.in_use
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use = usage.in_use
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.manage_existing,
self.context, test_vol.id,
'volume_ref')
# check volume status
volume = objects.Volume.get_by_id(context.get_admin_context(),
test_vol.id)
self.assertEqual('error_managing', volume.status)
# Delete this volume with 'error_managing_deleting' status in c-vol.
test_vol.status = 'error_managing_deleting'
test_vol.save()
self.volume.delete_volume(self.context, test_vol.id, volume=test_vol)
ctxt = context.get_admin_context(read_deleted='yes')
volume = objects.Volume.get_by_id(ctxt, test_vol.id)
self.assertEqual('deleted', volume.status)
# Get in_use number after deleting error_managing volume
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use_new = usage.in_use
self.assertEqual(volumes_in_use, volumes_in_use_new)
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use_new = usage.in_use
self.assertEqual(gigabytes_in_use, gigabytes_in_use_new)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'manage_existing_get_size')
def test_manage_volume_raise_driver_size_exception(self,
mock_driver_get_size):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}})
# create source volume
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
mock_driver_get_size.side_effect = exception.VolumeBackendAPIException(
data="volume driver got exception")
# Set quota usage
reserve_opts = {'volumes': 1, 'gigabytes': 1}
reservations = QUOTAS.reserve(self.context, project_id=project_id,
**reserve_opts)
QUOTAS.commit(self.context, reservations)
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use = usage.in_use
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use = usage.in_use
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.manage_existing,
self.context, test_vol.id,
'volume_ref')
# check volume status
volume = objects.Volume.get_by_id(context.get_admin_context(),
test_vol.id)
self.assertEqual('error_managing', volume.status)
# Delete this volume with 'error_managing_deleting' status in c-vol.
test_vol.status = 'error_managing_deleting'
test_vol.save()
self.volume.delete_volume(self.context, test_vol.id, volume=test_vol)
ctxt = context.get_admin_context(read_deleted='yes')
volume = objects.Volume.get_by_id(ctxt, test_vol.id)
self.assertEqual('deleted', volume.status)
# Get in_use number after raising exception
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use_new = usage.in_use
self.assertEqual(volumes_in_use, volumes_in_use_new)
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use_new = usage.in_use
self.assertEqual(gigabytes_in_use, gigabytes_in_use_new)
@ddt.ddt
class VolumeMigrationTestCase(BaseVolumeTestCase):

View File

@ -96,7 +96,9 @@ class ManageVolumeFlowTestCase(test.TestCase):
task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver)
task.revert(self.ctxt, mock_result, mock_flow_failures, volume_ref)
mock_error_out.assert_called_once_with(volume_ref, reason=mock.ANY)
mock_error_out.assert_called_once_with(volume_ref,
reason='Volume manage failed.',
status='error_managing')
def test_get_flow(self):
mock_volume_flow = mock.Mock()

View File

@ -368,15 +368,18 @@ class API(base.Base):
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
if volume.status != 'error_managing':
LOG.debug("Decrease volume quotas only if status is not "
"error_managing.")
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
volume.destroy()
@ -400,7 +403,7 @@ class API(base.Base):
# If not force deleting we have status conditions
if not force:
expected['status'] = ('available', 'error', 'error_restoring',
'error_extending')
'error_extending', 'error_managing')
if cascade:
# Allow deletion if all snapshots are in an expected state
@ -409,6 +412,8 @@ class API(base.Base):
# Don't allow deletion of volume with snapshots
filters = [~db.volume_has_snapshots_filter()]
values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
if volume.status == 'error_managing':
values['status'] = 'error_managing_deleting'
result = volume.conditional_update(values, expected, filters)

View File

@ -56,7 +56,7 @@ class EntryCreateTask(flow_utils.CinderTask):
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'status': 'managing',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': kwargs.pop('description'),
@ -115,7 +115,7 @@ class ManageCastTask(flow_utils.CinderTask):
def revert(self, context, result, flow_failures, volume, **kwargs):
# Restore the source volume status and set the volume to error status.
common.error_out(volume)
common.error_out(volume, status='error_managing')
LOG.error(_LE("Volume %s: manage failed."), volume.id)
exc_info = False
if all(flow_failures[-1].exc_info):

View File

@ -75,7 +75,7 @@ def _clean_reason(reason):
return reason[0:REASON_LENGTH] + '...'
def error_out(resource, reason=None):
def error_out(resource, reason=None, status='error'):
"""Sets status to error for any persistent OVO."""
reason = _clean_reason(reason)
try:
@ -83,11 +83,12 @@ def error_out(resource, reason=None):
'%(reason)s', {'object_type': resource.obj_name(),
'object_id': resource.id,
'reason': reason})
resource.status = 'error'
resource.status = status
resource.save()
except Exception:
# Don't let this cause further exceptions.
LOG.exception(_LE("Failed setting %(object_type)s %(object_id)s to "
" error status."),
" %(status) status."),
{'object_type': resource.obj_name(),
'object_id': resource.id})
'object_id': resource.id,
'status': status})

View File

@ -13,6 +13,7 @@
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
import taskflow.engines
from taskflow.patterns import linear_flow
@ -40,16 +41,24 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask):
self.driver = driver
def execute(self, context, volume, manage_existing_ref):
driver_name = self.driver.__class__.__name__
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error(_LE("Unable to manage existing volume. "
"Volume driver %s not initialized.") % driver_name)
flow_common.error_out(volume, _("Volume driver %s not "
"initialized.") % driver_name)
"initialized.") % driver_name,
status='error_managing')
raise exception.DriverNotInitialized()
size = self.driver.manage_existing_get_size(volume,
manage_existing_ref)
size = 0
try:
size = self.driver.manage_existing_get_size(volume,
manage_existing_ref)
except Exception:
with excutils.save_and_reraise_exception():
reason = _("Volume driver %s get exception.") % driver_name
flow_common.error_out(volume, reason,
status='error_managing')
return {'size': size,
'volume_type_id': volume.volume_type_id,
@ -60,7 +69,8 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask):
def revert(self, context, result, flow_failures, volume, **kwargs):
reason = _('Volume manage failed.')
flow_common.error_out(volume, reason=reason)
flow_common.error_out(volume, reason=reason,
status='error_managing')
LOG.error(_LE("Volume %s: manage failed."), volume.id)

View File

@ -763,16 +763,17 @@ class VolumeManager(manager.SchedulerDependentManager):
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)

View File

@ -60,6 +60,9 @@ def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
@ -70,7 +73,7 @@ def _usage_from_volume(context, volume_ref, **kw):
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_ref['status'],
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],