Merge "Add service_uuid FK to volumes"

This commit is contained in:
Zuul 2017-11-27 19:22:32 +00:00 committed by Gerrit Code Review
commit b6c5f82b7f
12 changed files with 191 additions and 4 deletions

View File

@ -208,6 +208,7 @@ class DbCommands(object):
# Added in Queens
db.service_uuids_online_data_migration,
db.backup_service_online_migration,
db.volume_service_uuids_online_data_migration,
)
def __init__(self):
@ -299,6 +300,14 @@ class DbCommands(object):
max_count = 50
print(_('Running batches of %i until complete.') % max_count)
# FIXME(jdg): So this is annoying and confusing,
# we iterate through in batches until there are no
# more updates, that's AWESOME!! BUT we only print
# out a table reporting found/done AFTER the loop
# here, so that means the response the user sees is
# always a table of "needed 0" and "completed 0".
# So it's an indication of "all done" but it seems like
# some feedback as we go would be nice to have here.
ran = None
migration_info = {}
while ran is None or ran != 0:

View File

@ -101,6 +101,10 @@ def backup_service_online_migration(context, max_count):
return IMPL.backup_service_online_migration(context, max_count)
def volume_service_uuids_online_data_migration(context, max_count):
return IMPL.volume_service_uuids_online_data_migration(context, max_count)
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)

View File

@ -632,6 +632,38 @@ def backup_service_online_migration(context, max_count):
return total, updated
@enginefacade.writer
def volume_service_uuids_online_data_migration(context, max_count):
"""Update volume service_uuid columns."""
updated = 0
query = model_query(context,
models.Volume).filter_by(service_uuid=None)
total = query.count()
vol_refs = query.limit(max_count).all()
service_refs = model_query(context, models.Service).filter_by(
topic="cinder-volume").limit(max_count).all()
# build a map to access the service uuid by host
svc_map = {}
for svc in service_refs:
svc_map[svc.host] = svc.uuid
# update our volumes appropriately
for v in vol_refs:
host = v.host.split('#')
v['service_uuid'] = svc_map[host[0]]
# re-use the session we already have associated with the
# volumes here (from the query above)
session = query.session
with session.begin():
v.save(session)
updated += 1
return total, updated
###################

View File

@ -0,0 +1,37 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(migrate_engine):
"""Add service_uuid column to volumes."""
meta = MetaData(bind=migrate_engine)
Table('services', meta, autoload=True)
volumes = Table('volumes', meta, autoload=True)
if not hasattr(volumes.c, 'service_uuid'):
volumes.create_column(Column('service_uuid', String(36),
ForeignKey('services.uuid'),
nullable=True))
index_name = 'volumes_service_uuid_idx'
indexes = Inspector(migrate_engine).get_indexes('volumes')
if index_name not in (i['name'] for i in indexes):
volumes = Table('volumes', meta, autoload=True)
Index(index_name, volumes.c.service_uuid, volumes.c.deleted).create()

View File

@ -242,6 +242,10 @@ class GroupSnapshot(BASE, CinderBase):
class Volume(BASE, CinderBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
__table_args__ = (Index('volumes_service_uuid_idx',
'deleted', 'service_uuid'),
CinderBase.__table_args__)
id = Column(String(36), primary_key=True)
_name_id = Column(String(36)) # Don't access/modify this directly!
@ -311,6 +315,12 @@ class Volume(BASE, CinderBase):
foreign_keys=group_id,
primaryjoin='Volume.group_id == Group.id')
service_uuid = Column(String(36), index=True)
service = relationship(Service,
backref="volumes",
foreign_keys=service_uuid,
primaryjoin='Volume.service_uuid == Service.uuid')
class VolumeMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a volume."""

View File

@ -138,6 +138,7 @@ OBJ_VERSIONS.add('1.27', {'Backup': '1.5', 'BackupImport': '1.5'})
OBJ_VERSIONS.add('1.28', {'Service': '1.5'})
OBJ_VERSIONS.add('1.29', {'Service': '1.6'})
OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'})
OBJ_VERSIONS.add('1.31', {'Volume': '1.7'})
class CinderObjectRegistry(base.VersionedObjectRegistry):

View File

@ -60,7 +60,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
# Version 1.4: Added cluster fields
# Version 1.5: Added group
# Version 1.6: This object is now cleanable (adds rows to workers table)
VERSION = '1.6'
# Version 1.7: Added service_uuid
VERSION = '1.7'
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
'volume_type', 'volume_attachment', 'consistencygroup',
@ -124,6 +125,7 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
nullable=True),
'snapshots': fields.ObjectField('SnapshotList', nullable=True),
'group': fields.ObjectField('Group', nullable=True),
'service_uuid': fields.StringField(nullable=True),
}
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
@ -233,7 +235,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
def obj_make_compatible(self, primitive, target_version):
"""Make a Volume representation compatible with a target version."""
added_fields = (((1, 4), ('cluster', 'cluster_name')),
((1, 5), ('group', 'group_id')))
((1, 5), ('group', 'group_id')),
((1, 7), ('service_uuid')))
# Convert all related objects
super(Volume, self).obj_make_compatible(primitive, target_version)

View File

@ -47,7 +47,7 @@ object_data = {
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Volume': '1.6-7d3bc8577839d5725670d55e480fe95f',
'Volume': '1.7-0845c5b7b826a4e9019f3684c2f9b132',
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'VolumeAttachment': '1.2-b68b357a1756582b706006ea9de40c9a',
'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',

View File

@ -460,6 +460,61 @@ class DBAPIServiceTestCase(BaseTest):
self.assertIsInstance(binary_op, sqlalchemy_api.sql.functions.Function)
self.assertEqual('binary', binary_op.name)
def test_volume_service_uuid_migrations(self):
# Force create one entry with no UUID
sqlalchemy_api.volume_create(self.ctxt,
{'host': 'host1@lvm-driver1#lvm-driver1'})
# Create another one with a valid UUID
sqlalchemy_api.volume_create(
self.ctxt,
{'host': 'host1@lvm-driver1#lvm-driver1',
'service_uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
# Need a service to query
values = {
'host': 'host1@lvm-driver1',
'binary': 'cinder-volume',
'topic': 'cinder-volume'}
utils.create_service(self.ctxt, values)
# Run the migration and verify that we updated 1 entry
total, updated = db.volume_service_uuids_online_data_migration(
self.ctxt, 10)
self.assertEqual(1, total)
self.assertEqual(1, updated)
def test_volume_service_uuid_migrations_with_limit(self):
"""Test db migrate of volumes in batches."""
db.volume_create(
self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
db.volume_create(
self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
db.volume_create(
self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
values = {
'host': 'host1@lvm-driver1',
'binary': 'cinder-volume',
'topic': 'cinder-volume',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
utils.create_service(self.ctxt, values)
# Run the migration and verify that we updated 2 entries
total, updated = db.volume_service_uuids_online_data_migration(
self.ctxt, 2)
self.assertEqual(3, total)
self.assertEqual(2, updated)
# Now get the ,last one (intentionally setting max > expected)
total, updated = db.volume_service_uuids_online_data_migration(
self.ctxt, 2)
self.assertEqual(1, total)
self.assertEqual(1, updated)
@ddt.ddt
class DBAPIVolumeTestCase(BaseTest):

View File

@ -372,6 +372,18 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertEqual(sorted(['deleted', 'uuid']),
sorted(index_columns))
def _check_114(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.service_uuid.type,
self.VARCHAR_TYPE)
index_columns = []
for idx in volumes.indexes:
if idx.name == 'volumes_service_uuid_idx':
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(['deleted', 'service_uuid']),
sorted(index_columns))
def test_walk_versions(self):
self.walk_versions(False, False)
self.assert_each_foreign_key_is_part_of_an_index()

View File

@ -168,7 +168,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
m_get_stats.return_value = {'name': 'cinder-volumes'}
m_get_filter.return_value = myfilterfunction
m_get_goodness.return_value = mygoodnessfunction
manager._report_driver_status(1)
manager._report_driver_status(context.get_admin_context())
self.assertTrue(m_get_stats.called)
mock_update.assert_called_once_with(expected)

View File

@ -206,6 +206,7 @@ class VolumeManager(manager.CleanableManager,
self.configuration = config.Configuration(volume_backend_opts,
config_group=service_name)
self.stats = {}
self.service_uuid = None
if not volume_driver:
# Get from configuration, which will get the default
@ -236,6 +237,7 @@ class VolumeManager(manager.CleanableManager,
"for driver init.")
else:
curr_active_backend_id = service.active_backend_id
self.service_uuid = service.uuid
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning("Suppressing requests library SSL Warnings")
@ -678,6 +680,10 @@ class VolumeManager(manager.CleanableManager,
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
updates = {'service_uuid': self.service_uuid}
volume.update(updates)
volume.save()
LOG.info("Created volume successfully.", resource=volume)
return volume.id
@ -2358,6 +2364,24 @@ class VolumeManager(manager.CleanableManager,
@periodic_task.periodic_task
def _report_driver_status(self, context):
# It's possible during live db migration that the self.service_uuid
# value isn't set (we didn't restart services), so we'll go ahead
# and make this a part of the service periodic
if not self.service_uuid:
svc_host = vol_utils.extract_host(self.host, 'backend')
# We hack this with a try/except for unit tests temporarily
try:
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
self.service_uuid = service.uuid
except exception.ServiceNotFound:
LOG.warning("Attempt to update service_uuid "
"resulted in a Service NotFound "
"exception, service_uuid field on "
"volumes will be NULL.")
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''