Browse Source

Merge "Fix online data migrations"

changes/89/680889/1
Zuul 1 week ago
parent
commit
360a19eef4

+ 7
- 35
cinder/cmd/manage.py View File

@@ -122,30 +122,6 @@ def _get_non_shared_target_hosts(ctxt):
122 122
     return hosts, numvols_needing_update
123 123
 
124 124
 
125
-def shared_targets_online_data_migration(ctxt, max_count):
126
-    """Update existing volumes shared_targets flag based on capabilities."""
127
-    non_shared_hosts = []
128
-    completed = 0
129
-
130
-    non_shared_hosts, total_vols_to_update = _get_non_shared_target_hosts(ctxt)
131
-    for host in non_shared_hosts:
132
-        # We use the api call here instead of going direct to
133
-        # db query to take advantage of parsing out the host
134
-        # correctly
135
-        vrefs = db_api.volume_get_all_by_host(
136
-            ctxt, host,
137
-            filters={'shared_targets': True})
138
-        if len(vrefs) > max_count:
139
-            del vrefs[-(len(vrefs) - max_count):]
140
-        max_count -= len(vrefs)
141
-        for v in vrefs:
142
-            db.volume_update(
143
-                ctxt, v['id'],
144
-                {'shared_targets': 0})
145
-            completed += 1
146
-    return total_vols_to_update, completed
147
-
148
-
149 125
 # Decorators for actions
150 126
 def args(*args, **kwargs):
151 127
     def _decorator(func):
@@ -268,17 +244,13 @@ class DbCommands(object):
268 244
     # NOTE: Online migrations cannot depend on having Cinder services running.
269 245
     # Migrations can be called during Fast-Forward Upgrades without having any
270 246
     # Cinder services up.
271
-    online_migrations = (
272
-        # Added in Queens
273
-        db.service_uuids_online_data_migration,
274
-        # Added in Queens
275
-        db.backup_service_online_migration,
276
-        # Added in Queens
277
-        db.volume_service_uuids_online_data_migration,
278
-        # Added in Queens
279
-        shared_targets_online_data_migration,
280
-        # Added in Queens
281
-        db.attachment_specs_online_data_migration
247
+    # NOTE; Online migrations must be removed at the beginning of the next
248
+    # release to the one they've been introduced.  A comment with the release
249
+    # a migration is introduced and the one where it must be removed must
250
+    # preceed any element of the "online_migrations" tuple, like this:
251
+    #    # Added in Queens remove in Rocky
252
+    #    db.service_uuids_online_data_migration,
253
+    online_migrations = tuple(
282 254
     )
283 255
 
284 256
     def __init__(self):

+ 32
- 0
cinder/cmd/status.py View File

@@ -18,6 +18,9 @@
18 18
 import os
19 19
 import sys
20 20
 
21
+from cinder import context
22
+from cinder import db
23
+from cinder import exception
21 24
 from cinder import objects
22 25
 from cinder import service  # noqa
23 26
 from oslo_config import cfg
@@ -60,6 +63,10 @@ def _get_enabled_drivers():
60 63
 class Checks(uc.UpgradeCommands):
61 64
     """Upgrade checks to run."""
62 65
 
66
+    def __init__(self, *args, **kwargs):
67
+        super(Checks, self).__init__(*args, **kwargs)
68
+        self.context = context.get_admin_context()
69
+
63 70
     def _file_exists(self, path):
64 71
         """Helper for mocking check of os.path.exists."""
65 72
         return os.path.exists(path)
@@ -231,6 +238,29 @@ class Checks(uc.UpgradeCommands):
231 238
 
232 239
         return uc.Result(SUCCESS)
233 240
 
241
+    def _check_service_uuid(self):
242
+        try:
243
+            db.service_get_by_uuid(self.context, None)
244
+        except exception.ServiceNotFound:
245
+            volumes = db.volume_get_all(self.context,
246
+                                        limit=1,
247
+                                        filters={'service_uuid': None})
248
+            if not volumes:
249
+                return uc.Result(SUCCESS)
250
+        return uc.Result(
251
+            FAILURE,
252
+            'Services and volumes must have a service UUID. Please fix this '
253
+            'issue by running Queens online data migrations.')
254
+
255
+    def _check_attachment_specs(self):
256
+        if db.attachment_specs_exist(self.context):
257
+            return uc.Result(
258
+                FAILURE,
259
+                'There should be no more AttachmentSpecs in the system. '
260
+                'Please fix this issue by running Queens online data '
261
+                'migrations.')
262
+        return uc.Result(SUCCESS)
263
+
234 264
     _upgrade_checks = (
235 265
         # added in Stein
236 266
         ('Backup Driver Path', _check_backup_module),
@@ -240,6 +270,8 @@ class Checks(uc.UpgradeCommands):
240 270
         # added in Train
241 271
         ('Periodic Interval Use', _check_periodic_interval),
242 272
         ('Use of Nest Quota Driver', _check_nested_quota),
273
+        ('Service UUIDs', _check_service_uuid),
274
+        ('Attachment specs', _check_attachment_specs),
243 275
     )
244 276
 
245 277
 

+ 5
- 15
cinder/db/api.py View File

@@ -97,18 +97,6 @@ def dispose_engine():
97 97
 ###################
98 98
 
99 99
 
100
-def service_uuids_online_data_migration(context, max_count):
101
-    return IMPL.service_uuids_online_data_migration(context, max_count)
102
-
103
-
104
-def backup_service_online_migration(context, max_count):
105
-    return IMPL.backup_service_online_migration(context, max_count)
106
-
107
-
108
-def volume_service_uuids_online_data_migration(context, max_count):
109
-    return IMPL.volume_service_uuids_online_data_migration(context, max_count)
110
-
111
-
112 100
 def service_destroy(context, service_id):
113 101
     """Destroy the service or raise if it does not exist."""
114 102
     return IMPL.service_destroy(context, service_id)
@@ -1827,6 +1815,11 @@ class Condition(object):
1827 1815
 ###################
1828 1816
 
1829 1817
 
1818
+def attachment_specs_exist(context):
1819
+    """Check if there are attachment specs left."""
1820
+    return IMPL.attachment_specs_exist(context)
1821
+
1822
+
1830 1823
 def attachment_specs_get(context, attachment_id):
1831 1824
     """DEPRECATED: Get all specs for an attachment."""
1832 1825
     return IMPL.attachment_specs_get(context, attachment_id)
@@ -1850,9 +1843,6 @@ def attachment_specs_update_or_create(context,
1850 1843
                                                   specs)
1851 1844
 
1852 1845
 
1853
-def attachment_specs_online_data_migration(context, max_count):
1854
-    return IMPL.attachment_specs_online_data_migration(context, max_count)
1855
-
1856 1846
 ###################
1857 1847
 
1858 1848
 

+ 6
- 111
cinder/db/sqlalchemy/api.py View File

@@ -58,7 +58,6 @@ from sqlalchemy.sql import func
58 58
 from sqlalchemy.sql import sqltypes
59 59
 
60 60
 from cinder.api import common
61
-from cinder.common import constants
62 61
 from cinder.common import sqlalchemyutils
63 62
 from cinder import db
64 63
 from cinder.db.sqlalchemy import models
@@ -535,7 +534,7 @@ def service_get_all(context, backend_match_level=None, **filters):
535 534
 
536 535
 @require_admin_context
537 536
 def service_get_by_uuid(context, service_uuid):
538
-    query = model_query(context, models.Service).fitler_by(uuid=service_uuid)
537
+    query = model_query(context, models.Service).filter_by(uuid=service_uuid)
539 538
     result = query.first()
540 539
     if not result:
541 540
         raise exception.ServiceNotFound(service_id=service_uuid)
@@ -570,115 +569,6 @@ def service_update(context, service_id, values):
570 569
         raise exception.ServiceNotFound(service_id=service_id)
571 570
 
572 571
 
573
-@enginefacade.writer
574
-def service_uuids_online_data_migration(context, max_count):
575
-    from cinder.objects import service
576
-
577
-    updated = 0
578
-    total = model_query(context, models.Service).filter_by(uuid=None).count()
579
-    db_services = model_query(context, models.Service).filter_by(
580
-        uuid=None).limit(max_count).all()
581
-    for db_service in db_services:
582
-        # The conversion in the Service object code
583
-        # will generate a UUID and save it for us.
584
-        service_obj = service.Service._from_db_object(
585
-            context, service.Service(), db_service)
586
-        if 'uuid' in service_obj:
587
-            updated += 1
588
-    return total, updated
589
-
590
-
591
-@require_admin_context
592
-def backup_service_online_migration(context, max_count):
593
-    name_rules = {'cinder.backup.drivers.swift':
594
-                  'cinder.backup.drivers.swift.SwiftBackupDriver',
595
-                  'cinder.backup.drivers.ceph':
596
-                  'cinder.backup.drivers.ceph.CephBackupDriver',
597
-                  'cinder.backup.drivers.glusterfs':
598
-                  'cinder.backup.drivers.glusterfs.GlusterfsBackupDriver',
599
-                  'cinder.backup.drivers.google':
600
-                  'cinder.backup.drivers.google.GoogleBackupDriver',
601
-                  'cinder.backup.drivers.nfs':
602
-                  'cinder.backup.drivers.nfs.NFSBackupDriver',
603
-                  'cinder.backup.drivers.tsm':
604
-                  'cinder.backup.drivers.tsm.TSMBackupDriver',
605
-                  'cinder.backup.drivers.posix':
606
-                  'cinder.backup.drivers.posix.PosixBackupDriver'}
607
-    total = 0
608
-    updated = 0
609
-    session = get_session()
610
-    with session.begin():
611
-        total = model_query(
612
-            context, models.Backup, session=session).filter(
613
-            models.Backup.service.in_(name_rules.keys())).count()
614
-        backups = (model_query(
615
-            context, models.Backup, session=session).filter(
616
-            models.Backup.service.in_(
617
-                name_rules.keys())).limit(max_count)).all()
618
-        if len(backups):
619
-            for backup in backups:
620
-                updated += 1
621
-                backup.service = name_rules[backup.service]
622
-
623
-    return total, updated
624
-
625
-
626
-@enginefacade.writer
627
-def volume_service_uuids_online_data_migration(context, max_count):
628
-    """Update volume service_uuid columns."""
629
-
630
-    updated = 0
631
-    query = model_query(context,
632
-                        models.Volume).filter_by(service_uuid=None).\
633
-        filter(models.Volume.host.isnot(None))
634
-    total = query.count()
635
-    vol_refs = query.limit(max_count).all()
636
-
637
-    service_refs = model_query(context, models.Service).filter_by(
638
-        topic=constants.VOLUME_TOPIC).limit(max_count).all()
639
-
640
-    # build a map to access the service uuid by host
641
-    svc_map = {}
642
-    for svc in service_refs:
643
-        svc_map[svc.host] = svc.uuid
644
-
645
-    # update our volumes appropriately
646
-    for v in vol_refs:
647
-        host = v.host.split('#')
648
-        v['service_uuid'] = svc_map[host[0]]
649
-        # re-use the session we already have associated with the
650
-        # volumes here (from the query above)
651
-        session = query.session
652
-        with session.begin():
653
-            v.save(session)
654
-        updated += 1
655
-    return total, updated
656
-
657
-
658
-@enginefacade.writer
659
-def attachment_specs_online_data_migration(context, max_count):
660
-    from cinder.objects import volume_attachment
661
-    # First figure out how many attachments have specs which need to be
662
-    # migrated, grouped by the attachment.id from the specs table.
663
-    session = get_session()
664
-    total = session.query(models.AttachmentSpecs.attachment_id).filter_by(
665
-        deleted=False).group_by(models.AttachmentSpecs.attachment_id).count()
666
-    # Now get the limited distinct set of attachment_ids to start migrating.
667
-    result = session.query(
668
-        models.AttachmentSpecs.attachment_id).filter_by(
669
-        deleted=False).group_by(models.AttachmentSpecs.attachment_id).limit(
670
-        max_count).all()
671
-    migrated = 0
672
-    # result is a list of tuples where the first item is the attachment_id
673
-    for attachment_id in result:
674
-        attachment_id = attachment_id[0]
675
-        # Loading the volume attachment object will migrate it's related
676
-        # attachment specs and delete those attachment specs.
677
-        volume_attachment.VolumeAttachment.get_by_id(context, attachment_id)
678
-        migrated += 1
679
-    return total, migrated
680
-
681
-
682 572
 ###################
683 573
 
684 574
 
@@ -2117,6 +2007,11 @@ def attachment_destroy(context, attachment_id):
2117 2007
     return updated_values
2118 2008
 
2119 2009
 
2010
+def attachment_specs_exist(context):
2011
+    query = model_query(context, models.AttachmentSpecs, read_deleted='no')
2012
+    return bool(query.first())
2013
+
2014
+
2120 2015
 def _attachment_specs_query(context, attachment_id, session=None):
2121 2016
     return model_query(context, models.AttachmentSpecs, session=session,
2122 2017
                        read_deleted="no").\

+ 78
- 1
cinder/tests/unit/cmd/test_status.py View File

@@ -14,15 +14,22 @@
14 14
 
15 15
 import ddt
16 16
 import mock
17
+import uuid
17 18
 
18 19
 from oslo_config import cfg
19 20
 from oslo_upgradecheck import upgradecheck as uc
20 21
 import testtools
21 22
 
23
+import cinder.backup.manager  # noqa
22 24
 from cinder.cmd import status
23
-
25
+from cinder import context
26
+from cinder import db
27
+from cinder.db.sqlalchemy import api as sqla_api
28
+from cinder import exception
29
+from cinder import test
24 30
 import cinder.volume.manager as volume_manager
25 31
 
32
+
26 33
 CONF = cfg.CONF
27 34
 
28 35
 
@@ -30,6 +37,18 @@ CONF = cfg.CONF
30 37
 class TestCinderStatus(testtools.TestCase):
31 38
     """Test cases for the cinder-status upgrade check command."""
32 39
 
40
+    def _setup_database(self):
41
+        CONF.set_default('connection', 'sqlite://', 'database')
42
+        CONF.set_default('sqlite_synchronous', False, 'database')
43
+
44
+        if not test._DB_CACHE:
45
+            test._DB_CACHE = test.Database(
46
+                sqla_api, test.migration,
47
+                sql_connection=CONF.database.connection)
48
+        self.useFixture(test._DB_CACHE)
49
+        sqla_api._GET_METHODS = {}
50
+        self.addCleanup(CONF.reset)
51
+
33 52
     def setUp(self):
34 53
         super(TestCinderStatus, self).setUp()
35 54
         self.checks = status.Checks()
@@ -47,6 +66,9 @@ class TestCinderStatus(testtools.TestCase):
47 66
         self.find_file = patcher.start()
48 67
         self.find_file.return_value = '/etc/cinder/'
49 68
 
69
+        self._setup_database()
70
+        self.context = context.get_admin_context()
71
+
50 72
     def _set_config(self, key, value, group=None):
51 73
         CONF.set_override(key, value, group=group)
52 74
         self.addCleanup(CONF.clear_override, key, group=group)
@@ -209,3 +231,58 @@ class TestCinderStatus(testtools.TestCase):
209 231
         self._set_config('enabled_backends', None)
210 232
         result = self.checks._check_removed_drivers()
211 233
         self.assertEqual(uc.Code.SUCCESS, result.code)
234
+
235
+    @staticmethod
236
+    def uuid():
237
+        return str(uuid.uuid4())
238
+
239
+    def _create_service(self, **values):
240
+        values.setdefault('uuid', self.uuid())
241
+        db.service_create(self.context, values)
242
+
243
+    def _create_volume(self, **values):
244
+        values.setdefault('id', self.uuid())
245
+        values.setdefault('service_uuid', self.uuid())
246
+        try:
247
+            db.volume_create(self.context, values)
248
+        # Support setting deleted on creation
249
+        except exception.VolumeNotFound:
250
+            if values.get('deleted') is not True:
251
+                raise
252
+
253
+    def test__check_service_uuid_ok(self):
254
+        self._create_service()
255
+        self._create_service()
256
+        self._create_volume()
257
+        # Confirm that we ignored deleted entries
258
+        self._create_volume(service_uuid=None, deleted=True)
259
+        result = self.checks._check_service_uuid()
260
+        self.assertEqual(uc.Code.SUCCESS, result.code)
261
+
262
+    def test__check_service_uuid_fail_service(self):
263
+        self._create_service()
264
+        self._create_service(uuid=None)
265
+        self._create_volume()
266
+        result = self.checks._check_service_uuid()
267
+        self.assertEqual(uc.Code.FAILURE, result.code)
268
+
269
+    def test__check_service_uuid_fail_volume(self):
270
+        self._create_service()
271
+        self._create_volume(service_uuid=None)
272
+        result = self.checks._check_service_uuid()
273
+        self.assertEqual(uc.Code.FAILURE, result.code)
274
+
275
+    def test__check_attachment_specs_ok(self):
276
+        attach_uuid = self.uuid()
277
+        # Confirm that we ignore deleted attachment specs
278
+        db.attachment_specs_update_or_create(self.context, attach_uuid,
279
+                                             {'k': 'v'})
280
+        db.attachment_specs_delete(self.context, attach_uuid, 'k')
281
+        result = self.checks._check_attachment_specs()
282
+        self.assertEqual(uc.Code.SUCCESS, result.code)
283
+
284
+    def test__check_attachment_specs_fail(self):
285
+        db.attachment_specs_update_or_create(self.context, self.uuid(),
286
+                                             {'k': 'v', 'k2': 'v2'})
287
+        result = self.checks._check_attachment_specs()
288
+        self.assertEqual(uc.Code.FAILURE, result.code)

+ 0
- 47
cinder/tests/unit/test_cmd.py View File

@@ -2248,50 +2248,3 @@ class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
2248 2248
             'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
2249 2249
         utils.create_service(ctxt, values)
2250 2250
         self.ctxt = ctxt
2251
-
2252
-    @mock.patch('cinder.objects.Service.get_minimum_obj_version',
2253
-                return_value='1.8')
2254
-    def test_shared_targets_migrations(self, mock_version):
2255
-        """Ensure we can update the column."""
2256
-        # Run the migration and verify that we updated 1 entry
2257
-        with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
2258
-                        return_value={'connection_protocol': 'iSCSI',
2259
-                                      'shared_targets': False}):
2260
-            total, updated = (
2261
-                cinder_manage.shared_targets_online_data_migration(
2262
-                    self.ctxt, 10))
2263
-            self.assertEqual(3, total)
2264
-            self.assertEqual(3, updated)
2265
-
2266
-    @mock.patch('cinder.objects.Service.get_minimum_obj_version',
2267
-                return_value='1.8')
2268
-    def test_shared_targets_migrations_non_iscsi(self, mock_version):
2269
-        """Ensure we can update the column."""
2270
-        # Run the migration and verify that we updated 1 entry
2271
-        with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
2272
-                        return_value={'connection_protocol': 'RBD'}):
2273
-            total, updated = (
2274
-                cinder_manage.shared_targets_online_data_migration(
2275
-                    self.ctxt, 10))
2276
-            self.assertEqual(3, total)
2277
-            self.assertEqual(3, updated)
2278
-
2279
-    @mock.patch('cinder.objects.Service.get_minimum_obj_version',
2280
-                return_value='1.8')
2281
-    def test_shared_targets_migrations_with_limit(self, mock_version):
2282
-        """Ensure we update in batches."""
2283
-        # Run the migration and verify that we updated 1 entry
2284
-        with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
2285
-                        return_value={'connection_protocol': 'iSCSI',
2286
-                                      'shared_targets': False}):
2287
-            total, updated = (
2288
-                cinder_manage.shared_targets_online_data_migration(
2289
-                    self.ctxt, 2))
2290
-            self.assertEqual(3, total)
2291
-            self.assertEqual(2, updated)
2292
-
2293
-            total, updated = (
2294
-                cinder_manage.shared_targets_online_data_migration(
2295
-                    self.ctxt, 2))
2296
-            self.assertEqual(1, total)
2297
-            self.assertEqual(1, updated)

+ 0
- 203
cinder/tests/unit/test_db_api.py View File

@@ -27,7 +27,6 @@ import six
27 27
 from sqlalchemy.sql import operators
28 28
 
29 29
 from cinder.api import common
30
-from cinder.common import constants
31 30
 from cinder import context
32 31
 from cinder import db
33 32
 from cinder.db.sqlalchemy import api as sqlalchemy_api
@@ -163,79 +162,6 @@ class DBAPIServiceTestCase(BaseTest):
163 162
 
164 163
     """Unit tests for cinder.db.api.service_*."""
165 164
 
166
-    def test_service_uuid_migrations(self):
167
-        # Force create one entry with no UUID
168
-        sqlalchemy_api.service_create(self.ctxt, {
169
-            'host': 'host1',
170
-            'binary': constants.VOLUME_BINARY,
171
-            'topic': 'volume', })
172
-
173
-        # Create another one with a valid UUID
174
-        sqlalchemy_api.service_create(self.ctxt, {
175
-            'host': 'host2',
176
-            'binary': constants.VOLUME_BINARY,
177
-            'topic': 'volume',
178
-            'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
179
-
180
-        # Run the migration and verify that we updated 1 entry
181
-        total, updated = db.service_uuids_online_data_migration(
182
-            self.ctxt, 10)
183
-
184
-        self.assertEqual(1, total)
185
-        self.assertEqual(1, updated)
186
-
187
-    def test_service_uuid_migrations_with_limit(self):
188
-        sqlalchemy_api.service_create(self.ctxt, {
189
-            'host': 'host1',
190
-            'binary': constants.VOLUME_BINARY,
191
-            'topic': 'volume', })
192
-        sqlalchemy_api.service_create(self.ctxt, {
193
-            'host': 'host2',
194
-            'binary': constants.VOLUME_BINARY,
195
-            'topic': 'volume', })
196
-        sqlalchemy_api.service_create(self.ctxt, {
197
-            'host': 'host3',
198
-            'binary': constants.VOLUME_BINARY,
199
-            'topic': 'volume', })
200
-        # Run the migration and verify that we updated 1 entry
201
-        total, updated = db.service_uuids_online_data_migration(
202
-            self.ctxt, 2)
203
-
204
-        self.assertEqual(3, total)
205
-        self.assertEqual(2, updated)
206
-
207
-        # Now get the rest, intentionally setting max > what we should have
208
-        total, updated = db.service_uuids_online_data_migration(
209
-            self.ctxt, 2)
210
-
211
-        self.assertEqual(1, total)
212
-        self.assertEqual(1, updated)
213
-
214
-    @ddt.data({'count': 5, 'total': 3, 'updated': 3},
215
-              {'count': 2, 'total': 3, 'updated': 2})
216
-    @ddt.unpack
217
-    def test_backup_service_online_migration(self, count, total, updated):
218
-        volume = utils.create_volume(self.ctxt)
219
-        sqlalchemy_api.backup_create(self.ctxt, {
220
-            'service': 'cinder.backup.drivers.swift',
221
-            'volume_id': volume.id
222
-        })
223
-        sqlalchemy_api.backup_create(self.ctxt, {
224
-            'service': 'cinder.backup.drivers.ceph',
225
-            'volume_id': volume.id
226
-        })
227
-        sqlalchemy_api.backup_create(self.ctxt, {
228
-            'service': 'cinder.backup.drivers.glusterfs',
229
-            'volume_id': volume.id
230
-        })
231
-        sqlalchemy_api.backup_create(self.ctxt, {
232
-            'service': 'cinder.backup.drivers.fake_backup_service',
233
-            'volume_id': volume.id
234
-        })
235
-        t, u = db.backup_service_online_migration(self.ctxt, count)
236
-        self.assertEqual(total, t)
237
-        self.assertEqual(updated, u)
238
-
239 165
     def test_service_create(self):
240 166
         # Add a cluster value to the service
241 167
         values = {'cluster_name': 'cluster'}
@@ -461,65 +387,6 @@ class DBAPIServiceTestCase(BaseTest):
461 387
         self.assertIsInstance(binary_op, sqlalchemy_api.sql.functions.Function)
462 388
         self.assertEqual('binary', binary_op.name)
463 389
 
464
-    def test_volume_service_uuid_migrations(self):
465
-        # Force create one entry with no UUID
466
-        sqlalchemy_api.volume_create(self.ctxt,
467
-                                     {'host': 'host1@lvm-driver1#lvm-driver1'})
468
-
469
-        # Create another one with a valid UUID
470
-        sqlalchemy_api.volume_create(
471
-            self.ctxt,
472
-            {'host': 'host1@lvm-driver1#lvm-driver1',
473
-             'service_uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
474
-
475
-        # Need a service to query
476
-        values = {
477
-            'host': 'host1@lvm-driver1',
478
-            'binary': constants.VOLUME_BINARY,
479
-            'topic': constants.VOLUME_TOPIC}
480
-        utils.create_service(self.ctxt, values)
481
-
482
-        # Run the migration and verify that we updated 1 entry
483
-        total, updated = db.volume_service_uuids_online_data_migration(
484
-            self.ctxt, 10)
485
-
486
-        self.assertEqual(1, total)
487
-        self.assertEqual(1, updated)
488
-
489
-    def test_volume_service_uuid_migrations_with_limit(self):
490
-        """Test db migrate of volumes in batches."""
491
-        db.volume_create(
492
-            self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
493
-        db.volume_create(
494
-            self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
495
-        db.volume_create(
496
-            self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
497
-        # Entries with no host should be skipped
498
-        db.volume_create(self.ctxt, {'host': None})
499
-
500
-        values = {
501
-            'host': 'host1@lvm-driver1',
502
-            'binary': constants.VOLUME_BINARY,
503
-            'topic': constants.VOLUME_TOPIC,
504
-            'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
505
-        utils.create_service(self.ctxt, values)
506
-
507
-        # Run the migration and verify that we updated 2 entries
508
-        total, updated = db.volume_service_uuids_online_data_migration(
509
-            self.ctxt, 2)
510
-
511
-        # total = number of volumes that have hosts and don't have a
512
-        # service_uuid
513
-        self.assertEqual(3, total)
514
-        self.assertEqual(2, updated)
515
-
516
-        # Now get the last one (intentionally setting max > expected)
517
-        total, updated = db.volume_service_uuids_online_data_migration(
518
-            self.ctxt, 99)
519
-
520
-        self.assertEqual(1, total)
521
-        self.assertEqual(1, updated)
522
-
523 390
 
524 391
 @ddt.ddt
525 392
 class DBAPIVolumeTestCase(BaseTest):
@@ -3475,73 +3342,3 @@ class DBAPIGroupTestCase(BaseTest):
3475 3342
             self.assertEqual(
3476 3343
                 new_cluster_name + groups[i].cluster_name[len(cluster_name):],
3477 3344
                 db_groups[i].cluster_name)
3478
-
3479
-
3480
-class DBAPIAttachmentSpecsTestCase(BaseTest):
3481
-    def test_attachment_specs_online_data_migration(self):
3482
-        """Tests the online data migration initiated via cinder-manage"""
3483
-        # Create five attachment records:
3484
-        # 1. first attachment has specs but is deleted so it's ignored
3485
-        # 2. second attachment is already migrated (no attachment_specs
3486
-        #    entries) so it's ignored
3487
-        # 3. the remaining attachments have specs so they are migrated in
3488
-        #    in batches of 2
3489
-
3490
-        # Create an attachment record with specs and delete it.
3491
-        attachment = objects.VolumeAttachment(
3492
-            self.ctxt, attach_status='attaching', volume_id=fake.VOLUME_ID)
3493
-        attachment.create()
3494
-        # Create an attachment_specs entry for attachment.
3495
-        connector = {'host': '127.0.0.1'}
3496
-        db.attachment_specs_update_or_create(
3497
-            self.ctxt, attachment.id, connector)
3498
-        # Now delete the attachment which should also delete the specs.
3499
-        attachment.destroy()
3500
-        # Run the migration routine to see that there is nothing to migrate.
3501
-        total, migrated = db.attachment_specs_online_data_migration(
3502
-            self.ctxt, 50)
3503
-        self.assertEqual(0, total)
3504
-        self.assertEqual(0, migrated)
3505
-
3506
-        # Create a volume attachment with no specs (already migrated).
3507
-        attachment = objects.VolumeAttachment(
3508
-            self.ctxt, attach_status='attaching', volume_id=fake.VOLUME_ID,
3509
-            connector=connector)
3510
-        attachment.create()
3511
-        # Run the migration routine to see that there is nothing to migrate.
3512
-        total, migrated = db.attachment_specs_online_data_migration(
3513
-            self.ctxt, 50)
3514
-        self.assertEqual(0, total)
3515
-        self.assertEqual(0, migrated)
3516
-
3517
-        # We have to create a real volume because of the joinedload in the
3518
-        # DB API query to get the volume attachment.
3519
-        volume = db.volume_create(self.ctxt, {'host': 'host1'})
3520
-
3521
-        # Now create three volume attachments with specs and migrate them
3522
-        # in batches of 2 to show we are enforcing the limit.
3523
-        for x in range(3):
3524
-            attachment = objects.VolumeAttachment(
3525
-                self.ctxt, attach_status='attaching', volume_id=volume['id'])
3526
-            attachment.create()
3527
-            # Create an attachment_specs entry for the attachment.
3528
-            db.attachment_specs_update_or_create(
3529
-                self.ctxt, attachment.id, connector)
3530
-
3531
-        # Migrate 2 at a time.
3532
-        total, migrated = db.attachment_specs_online_data_migration(
3533
-            self.ctxt, 2)
3534
-        self.assertEqual(3, total)
3535
-        self.assertEqual(2, migrated)
3536
-
3537
-        # This should complete the migration.
3538
-        total, migrated = db.attachment_specs_online_data_migration(
3539
-            self.ctxt, 2)
3540
-        self.assertEqual(1, total)
3541
-        self.assertEqual(1, migrated)
3542
-
3543
-        # Run it one more time to make sure there is nothing left.
3544
-        total, migrated = db.attachment_specs_online_data_migration(
3545
-            self.ctxt, 2)
3546
-        self.assertEqual(0, total)
3547
-        self.assertEqual(0, migrated)

+ 5
- 0
releasenotes/notes/online-migration-checks-64b0d1732901e78e.yaml View File

@@ -0,0 +1,5 @@
1
+---
2
+upgrade:
3
+  - |
4
+    Two new checks are added to the ``cinder-status upgrade check`` CLI to
5
+    ensure that online data migrations from Queens onward have been completed.

Loading…
Cancel
Save