Add cinder-manage command to update service_uuid
In some deployments, after an upgrade, we remove the old service
records and create new ones. This leaves the volumes with the
service_uuid pointing to the old (deleted) service and causes an
issue while purging out the deleted service records.
This patch adds a cinder-manage command to update the
service_uuid field of volumes with the following command:
``cinder-manage volume update_service``
The service_uuid of volumes associated with old service_uuid
also gets updated when cinder creates a new service.
Change-Id: I0b13c6351733b8163bcf6e73c939c375493dcba5
(cherry picked from commit edeac132a1
)
This commit is contained in:
parent
65ff2ed722
commit
530376bc5d
@ -640,6 +640,16 @@ class VolumeCommands(object):
|
||||
db.volume_update(ctxt, v['id'],
|
||||
{'host': newhost})
|
||||
|
||||
def update_service(self):
|
||||
"""Modify the service uuid associated with a volume.
|
||||
|
||||
In certain upgrade cases, we create new cinder services and delete the
|
||||
records of old ones, however, the volumes created with old service
|
||||
still contain the service uuid of the old services.
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
db.volume_update_all_by_service(ctxt)
|
||||
|
||||
|
||||
class ConfigCommands(object):
|
||||
"""Class for exposing the flags defined by flag_file(s)."""
|
||||
|
@ -460,6 +460,11 @@ def volume_get_all_by_host(context, host, filters=None):
|
||||
return IMPL.volume_get_all_by_host(context, host, filters=filters)
|
||||
|
||||
|
||||
def volume_update_all_by_service(context):
|
||||
"""Update all volumes associated with an old service."""
|
||||
return IMPL.volume_update_all_by_service(context)
|
||||
|
||||
|
||||
def volume_get_all_by_group(context, group_id, filters=None):
|
||||
"""Get all volumes belonging to a consistency group."""
|
||||
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
|
||||
|
@ -2846,6 +2846,32 @@ def volume_get_all_by_group(context, group_id, filters=None):
|
||||
return query.all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@main_context_manager.writer
|
||||
def volume_update_all_by_service(context):
|
||||
"""Ensure volumes have the correct service_uuid value for their host.
|
||||
|
||||
In some deployment tools, when performing an upgrade, all service records
|
||||
are recreated including c-vol service which gets a new record in the
|
||||
services table, though its host name is constant. Later we then delete the
|
||||
old service record.
|
||||
As a consequence, the volumes have the right host name but the service
|
||||
UUID needs to be updated to the ID of the new service record.
|
||||
|
||||
:param context: context to query under
|
||||
"""
|
||||
# Get all cinder-volume services
|
||||
services = service_get_all(context, binary='cinder-volume')
|
||||
for service in services:
|
||||
query = model_query(context, models.Volume)
|
||||
query = query.filter(
|
||||
_filter_host(
|
||||
models.Volume.host, service.host),
|
||||
models.Volume.service_uuid != service.uuid)
|
||||
query.update(
|
||||
{"service_uuid": service.uuid}, synchronize_session=False)
|
||||
|
||||
|
||||
@require_context
|
||||
@main_context_manager.reader
|
||||
def volume_get_all_by_generic_group(context, group_id, filters=None):
|
||||
|
@ -42,6 +42,7 @@ profiler_opts = importutils.try_import('osprofiler.opts')
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import coordination
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
@ -366,6 +367,9 @@ class Service(service.Service):
|
||||
# If we have updated the service_ref with replication data from
|
||||
# the cluster it will be saved.
|
||||
service_ref.save()
|
||||
# Update all volumes that are associated with an old service with
|
||||
# the new service uuid
|
||||
db.volume_update_all_by_service(context)
|
||||
|
||||
def __getattr__(self, key: str):
|
||||
manager = self.__dict__.get('manager', None)
|
||||
|
@ -911,6 +911,42 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
filters={'foo': 'bar'})
|
||||
self.assertEqual([], vols)
|
||||
|
||||
def test_volume_update_all_by_service(self):
|
||||
volume_service_uuid = '918f24b6-c4c9-48e6-86c6-6871e91f4779'
|
||||
alt_vol_service_uuid = '4b3356a0-31e1-4cec-af1c-07e1e0d7dcf0'
|
||||
service_uuid_1 = 'c7b169f8-8da6-4330-b462-0467069371e2'
|
||||
service_uuid_2 = '38d41b71-2f4e-4d3e-8206-d51ace608bca'
|
||||
host = 'fake_host'
|
||||
alt_host = 'alt_fake_host'
|
||||
binary = 'cinder-volume'
|
||||
# Create 3 volumes with host 'fake_host'
|
||||
for i in range(3):
|
||||
db.volume_create(self.ctxt, {
|
||||
'service_uuid': volume_service_uuid,
|
||||
'host': host,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID})
|
||||
# Create 2 volumes with host 'alt_fake_host'
|
||||
for i in range(2):
|
||||
db.volume_create(self.ctxt, {
|
||||
'service_uuid': alt_vol_service_uuid,
|
||||
'host': alt_host,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID})
|
||||
# Create service entry for 'fake_host'
|
||||
utils.create_service(
|
||||
self.ctxt,
|
||||
{'uuid': service_uuid_1, 'host': host, 'binary': binary})
|
||||
# Create service entry for 'alt_fake_host'
|
||||
utils.create_service(
|
||||
self.ctxt,
|
||||
{'uuid': service_uuid_2, 'host': alt_host, 'binary': binary})
|
||||
db.volume_update_all_by_service(self.ctxt)
|
||||
volumes = db.volume_get_all(self.ctxt)
|
||||
for volume in volumes:
|
||||
if volume.host == host:
|
||||
self.assertEqual(service_uuid_1, volume.service_uuid)
|
||||
elif volume.host == alt_host:
|
||||
self.assertEqual(service_uuid_2, volume.service_uuid)
|
||||
|
||||
def test_volume_get_all_by_project(self):
|
||||
volumes = []
|
||||
for i in range(3):
|
||||
|
@ -178,6 +178,15 @@ Delete a volume without first checking that the volume is available.
|
||||
Updates the host name of all volumes currently associated with a specified
|
||||
host.
|
||||
|
||||
``cinder-manage volume update_service``
|
||||
|
||||
When upgrading cinder, new service entries are created in the database as the
|
||||
existing cinder-volume host(s) are upgraded. In some cases, rows in the
|
||||
volumes table keep references to the old service, which can prevent the old
|
||||
services from being deleted when the database is purged. This command makes
|
||||
sure that all volumes have updated service references for all volumes on all
|
||||
cinder-volume hosts.
|
||||
|
||||
Cinder Host
|
||||
~~~~~~~~~~~
|
||||
|
||||
|
15
releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml
Normal file
15
releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added a new cinder-manage command to handle the situation where database
|
||||
purges would not complete due to the volumes table holding references to
|
||||
deleted services. The new command makes sure that all volumes have a
|
||||
reference only to the correct service_uuid, which will allow old service
|
||||
records to be purged from the database.
|
||||
|
||||
Command: ``cinder-manage volume update_service``
|
||||
- |
|
||||
When Cinder creates a new cinder-volume service, it now also immediately
|
||||
updates the service_uuid for all volumes associated with that
|
||||
cinder-volume host. In some cases, this was preventing the database purge
|
||||
operation from completing successfully.
|
Loading…
Reference in New Issue
Block a user