Update provider_id column on SolidFire init

This patch just implements an init routine to update
the cinder db with provider_id mappings to known/active
volumes on the SolidFire cluster.

Now that we have added a mapping feature in the DB, we need
to go back and populate these columns on an upgrade.  It's
also helpful to have this mechanism for internal mappings on
init, that's why it's here as an init routine instead of part
of an upgrade migration or cinder-manager script.

Change-Id: I1e4da2adff2ae85a3020b38167c8c63fc56b8417
This commit is contained in:
John Griffith 2015-08-20 19:09:57 +00:00
parent 1a7ce2d32e
commit 44419e89d8
4 changed files with 69 additions and 3 deletions

View File

@ -50,6 +50,7 @@ class SolidFireVolumeTestCase(test.TestCase):
self.configuration.sf_template_account_name = 'openstack-vtemplate'
self.configuration.sf_allow_template_caching = False
self.configuration.sf_svip = None
self.configuration.sf_enable_volume_mapping = True
super(SolidFireVolumeTestCase, self).setUp()
self.stubs.Set(solidfire.SolidFireDriver,
@ -1013,3 +1014,35 @@ class SolidFireVolumeTestCase(test.TestCase):
self.configuration.sf_svip = configured_svip
v = sfv._get_model_info(sfaccount, 1)
self.assertEqual('%s 0' % configured_svip, v['provider_location'])
def test_init_volume_mappings(self):
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
vid_1 = 'c9125d6d-22ff-4cc3-974d-d4e350df9c91'
vid_2 = '79883868-6933-47a1-a362-edfbf8d55a18'
project_1 = 'e6fb073c-11f0-4f4c-897c-90e7c7c4bcf8'
project_2 = '4ff32607-305c-4a6b-a51a-0dd33124eecf'
vrefs = [{'id': vid_1,
'project_id': project_1,
'provider_id': None},
{'id': vid_2,
'project_id': project_2,
'provider_id': 22}]
sf_vols = [{'volumeID': 99,
'name': 'UUID-' + vid_1,
'accountID': 100},
{'volumeID': 22,
'name': 'UUID-' + vid_2,
'accountID': 200}]
def _fake_issue_api_req(method, params, version=0):
if 'ListActiveVolumes' in method:
return {'result': {'volumes': sf_vols}}
with mock.patch.object(
sfv, '_issue_api_request', side_effect=_fake_issue_api_req):
updates = sfv._init_volume_mappings(vrefs)
self.assertEqual(99, updates[0]['provider_id'])
self.assertEqual(1, len(updates))

View File

@ -1179,10 +1179,10 @@ class BaseVD(object):
"""
return None
def update_provider_info(self, volid_list):
def update_provider_info(self, volumes):
"""Get provider info updates from driver.
:param volid_list: List of Cinder vol id's to check for updates
:param volumes: List of Cinder volumes to check for updates
:return: dict of update {'id': uuid, provider_id: <provider-id>}
"""
return None

View File

@ -74,6 +74,12 @@ sf_opts = [
'This is required or deployments that have implemented '
'the use of VLANs for iSCSI networks in their cloud.'),
cfg.BoolOpt('sf_enable_volume_mapping',
default=True,
help='Create an internal mapping of volume IDs and account. '
'Optimizes lookups and performance at the expense of '
'memory, very large deployments may want to consider '
'setting to False.'),
cfg.IntOpt('sf_api_port',
default=443,
@ -161,6 +167,8 @@ class SolidFireDriver(san.SanISCSIDriver):
self._endpoint = self._build_endpoint_info()
self.template_account_id = None
self.max_volumes_per_account = 1990
self.volume_map = {}
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
@ -174,6 +182,31 @@ class SolidFireDriver(san.SanISCSIDriver):
solidfire_driver=self,
configuration=self.configuration))
def _init_volume_mappings(self, vrefs):
updates = []
sf_vols = self._issue_api_request('ListActiveVolumes',
{})['result']['volumes']
self.volume_map = {}
for v in vrefs:
seek_name = 'UUID-%s' % v['id']
sfvol = next(
(sv for sv in sf_vols if sv['name'] == seek_name), None)
if sfvol:
if self.configuration.sf_enable_volume_mapping:
self.volume_map[v['id']] = (
{'sf_id': sfvol['volumeID'],
'sf_account': sfvol['accountID'],
'cinder_account': v['project_id']})
if v.get('provider_id', 'nil') != sfvol['volumeID']:
v['provider_id'] == sfvol['volumeID']
updates.append({'id': v['id'],
'provider_id': sfvol['volumeID']})
return updates
def update_provider_info(self, vrefs):
return self._init_volume_mappings(vrefs)
def _create_template_account(self, account_name):
# We raise an API exception if the account doesn't exist

View File

@ -301,7 +301,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates = self.driver.update_provider_info([v['id'] for v in volumes])
updates = self.driver.update_provider_info(volumes)
host_vols = utils.list_of_dicts_to_dict(volumes, 'id')
for u in updates or []: