Remove newton-compat upgrade code on service startup
This upgrade compat check code was marked for removal
in Ocata. Since we're now in Queens, and this is burning
time on every service start, let's remove it.
Change-Id: I8528752630276bd6e2348f27721238115de504cd
Related-Bug: #1722326
(cherry picked from commit 6c630c5e53
)
This commit is contained in:
parent
e1864ed8d2
commit
d0dc23fb77
@ -36,7 +36,6 @@ profiler = importutils.try_import('osprofiler.profiler')
|
||||
profiler_opts = importutils.try_import('osprofiler.opts')
|
||||
|
||||
|
||||
from cinder.backup import rpcapi as backup_rpcapi
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import coordination
|
||||
@ -46,9 +45,7 @@ from cinder import objects
|
||||
from cinder.objects import base as objects_base
|
||||
from cinder.objects import fields
|
||||
from cinder import rpc
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder import version
|
||||
from cinder.volume import rpcapi as volume_rpcapi
|
||||
from cinder.volume import utils as vol_utils
|
||||
|
||||
|
||||
@ -157,18 +154,11 @@ class Service(service.Service):
|
||||
# result in us using None (if it's the first time the service is run)
|
||||
# or an old version (if this is a normal upgrade of a single service).
|
||||
ctxt = context.get_admin_context()
|
||||
self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary)
|
||||
try:
|
||||
service_ref = objects.Service.get_by_args(ctxt, host, binary)
|
||||
service_ref.rpc_current_version = manager_class.RPC_API_VERSION
|
||||
obj_version = objects_base.OBJ_VERSIONS.get_current()
|
||||
service_ref.object_current_version = obj_version
|
||||
# TODO(geguileo): In O we can remove the service upgrading part on
|
||||
# the next equation, because by then all our services will be
|
||||
# properly setting the cluster during volume migrations since
|
||||
# they'll have the new Volume ORM model. But until then we can
|
||||
# only set the cluster in the DB and pass added_to_cluster to
|
||||
# init_host when we have completed the rolling upgrade from M to N.
|
||||
|
||||
# added_to_cluster attribute marks when we consider that we have
|
||||
# just added a host to a cluster so we can include resources into
|
||||
@ -177,12 +167,9 @@ class Service(service.Service):
|
||||
# configuration has a cluster value. We don't want to do anything
|
||||
# automatic if the cluster is changed, in those cases we'll want
|
||||
# to use cinder manage command and to it manually.
|
||||
self.added_to_cluster = (not service_ref.cluster_name and cluster
|
||||
and not self.is_upgrading_to_n)
|
||||
self.added_to_cluster = (not service_ref.cluster_name and cluster)
|
||||
|
||||
# TODO(geguileo): In O - Remove self.is_upgrading_to_n part
|
||||
if (service_ref.cluster_name != cluster and
|
||||
not self.is_upgrading_to_n):
|
||||
if service_ref.cluster_name != cluster:
|
||||
LOG.info('This service has been moved from cluster '
|
||||
'%(cluster_svc)s to %(cluster_cfg)s. Resources '
|
||||
'will %(opt_no)sbe moved to the new cluster',
|
||||
@ -201,10 +188,9 @@ class Service(service.Service):
|
||||
# We don't want to include cluster information on the service or
|
||||
# create the cluster entry if we are upgrading.
|
||||
self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
|
||||
# TODO(geguileo): In O set added_to_cluster to True
|
||||
# We don't want to include resources in the cluster during the
|
||||
# start while we are still doing the rolling upgrade.
|
||||
self.added_to_cluster = not self.is_upgrading_to_n
|
||||
self.added_to_cluster = True
|
||||
|
||||
self.report_interval = report_interval
|
||||
self.periodic_interval = periodic_interval
|
||||
@ -218,17 +204,6 @@ class Service(service.Service):
|
||||
self.backend_rpcserver = None
|
||||
self.cluster_rpcserver = None
|
||||
|
||||
# TODO(geguileo): Remove method in O since it will no longer be used.
|
||||
@staticmethod
|
||||
def is_svc_upgrading_to_n(binary):
|
||||
"""Given an RPC API class determine if the service is upgrading."""
|
||||
rpcapis = {'cinder-scheduler': scheduler_rpcapi.SchedulerAPI,
|
||||
'cinder-volume': volume_rpcapi.VolumeAPI,
|
||||
'cinder-backup': backup_rpcapi.BackupAPI}
|
||||
rpc_api = rpcapis[binary]
|
||||
# If we are pinned to 1.3, then we are upgrading from M to N
|
||||
return rpc_api.determine_obj_version_cap() == '1.3'
|
||||
|
||||
def start(self):
|
||||
version_string = version.version_string()
|
||||
LOG.info('Starting %(topic)s node (version %(version_string)s)',
|
||||
@ -268,8 +243,7 @@ class Service(service.Service):
|
||||
serializer)
|
||||
self.backend_rpcserver.start()
|
||||
|
||||
# TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
|
||||
if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
|
||||
if self.cluster:
|
||||
LOG.info('Starting %(topic)s cluster %(cluster)s (version '
|
||||
'%(version)s)',
|
||||
{'topic': self.topic, 'version': version_string,
|
||||
@ -366,19 +340,15 @@ class Service(service.Service):
|
||||
'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION,
|
||||
'object_current_version': objects_base.OBJ_VERSIONS.get_current(),
|
||||
}
|
||||
# TODO(geguileo): In O unconditionally set cluster_name like above
|
||||
# If we are upgrading we have to ignore the cluster value
|
||||
if not self.is_upgrading_to_n:
|
||||
kwargs['cluster_name'] = self.cluster
|
||||
kwargs['cluster_name'] = self.cluster
|
||||
service_ref = objects.Service(context=context, **kwargs)
|
||||
service_ref.create()
|
||||
Service.service_id = service_ref.id
|
||||
# TODO(geguileo): In O unconditionally ensure that the cluster exists
|
||||
if not self.is_upgrading_to_n:
|
||||
self._ensure_cluster_exists(context, service_ref)
|
||||
# If we have updated the service_ref with replication data from
|
||||
# the cluster it will be saved.
|
||||
service_ref.save()
|
||||
self._ensure_cluster_exists(context, service_ref)
|
||||
# If we have updated the service_ref with replication data from
|
||||
# the cluster it will be saved.
|
||||
service_ref.save()
|
||||
|
||||
def __getattr__(self, key):
|
||||
manager = self.__dict__.get('manager', None)
|
||||
|
@ -70,9 +70,7 @@ class ExtendedService(service.Service):
|
||||
class ServiceManagerTestCase(test.TestCase):
|
||||
"""Test cases for Services."""
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_message_gets_to_manager(self, is_upgrading_mock):
|
||||
def test_message_gets_to_manager(self):
|
||||
serv = service.Service('test',
|
||||
'test',
|
||||
'test',
|
||||
@ -80,9 +78,7 @@ class ServiceManagerTestCase(test.TestCase):
|
||||
serv.start()
|
||||
self.assertEqual('manager', serv.test_method())
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_override_manager_method(self, is_upgrading_mock):
|
||||
def test_override_manager_method(self):
|
||||
serv = ExtendedService('test',
|
||||
'test',
|
||||
'test',
|
||||
@ -90,11 +86,9 @@ class ServiceManagerTestCase(test.TestCase):
|
||||
serv.start()
|
||||
self.assertEqual('service', serv.test_method())
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'})
|
||||
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'})
|
||||
def test_reset(self, is_upgrading_mock):
|
||||
def test_reset(self):
|
||||
serv = service.Service('test',
|
||||
'test',
|
||||
'test',
|
||||
@ -106,10 +100,7 @@ class ServiceManagerTestCase(test.TestCase):
|
||||
|
||||
|
||||
class ServiceFlagsTestCase(test.TestCase):
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_service_enabled_on_create_based_on_flag(self,
|
||||
is_upgrading_mock=False):
|
||||
def test_service_enabled_on_create_based_on_flag(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.flags(enable_new_services=True)
|
||||
host = 'foo'
|
||||
@ -125,9 +116,7 @@ class ServiceFlagsTestCase(test.TestCase):
|
||||
self.assertFalse(db_cluster.disabled)
|
||||
db.cluster_destroy(ctxt, db_cluster.id)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_service_disabled_on_create_based_on_flag(self, is_upgrading_mock):
|
||||
def test_service_disabled_on_create_based_on_flag(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.flags(enable_new_services=False)
|
||||
host = 'foo'
|
||||
@ -162,7 +151,7 @@ class ServiceTestCase(test.TestCase):
|
||||
self.ctxt = context.get_admin_context()
|
||||
|
||||
def _check_app(self, app, cluster=None, cluster_exists=None,
|
||||
is_upgrading=False, svc_id=None, added_to_cluster=None):
|
||||
svc_id=None, added_to_cluster=True):
|
||||
"""Check that Service instance and DB service and cluster are ok."""
|
||||
self.assertIsNotNone(app)
|
||||
|
||||
@ -184,9 +173,6 @@ class ServiceTestCase(test.TestCase):
|
||||
|
||||
clusters = objects.ClusterList.get_all(self.ctxt)
|
||||
|
||||
if added_to_cluster is None:
|
||||
added_to_cluster = not is_upgrading
|
||||
|
||||
if cluster_name:
|
||||
# Make sure we have created the cluster in the DB
|
||||
self.assertEqual(1, len(clusters))
|
||||
@ -199,41 +185,14 @@ class ServiceTestCase(test.TestCase):
|
||||
|
||||
self.assertEqual(added_to_cluster, app.added_to_cluster)
|
||||
|
||||
@ddt.data(False, True)
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n')
|
||||
def test_create(self, is_upgrading, is_upgrading_mock):
|
||||
"""Test non clustered service creation."""
|
||||
is_upgrading_mock.return_value = is_upgrading
|
||||
|
||||
# NOTE(vish): Create was moved out of mock replay to make sure that
|
||||
# the looping calls are created in StartService.
|
||||
app = service.Service.create(host=self.host,
|
||||
binary=self.binary,
|
||||
topic=self.topic)
|
||||
self._check_app(app, is_upgrading=is_upgrading)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_create_with_cluster_not_upgrading(self, is_upgrading_mock):
|
||||
def test_create_with_cluster_not_upgrading(self):
|
||||
"""Test DB cluster creation when service is created."""
|
||||
cluster_name = 'cluster'
|
||||
app = service.Service.create(host=self.host, binary=self.binary,
|
||||
cluster=cluster_name, topic=self.topic)
|
||||
self._check_app(app, cluster_name)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=True)
|
||||
def test_create_with_cluster_upgrading(self, is_upgrading_mock):
|
||||
"""Test that we don't create the cluster while we are upgrading."""
|
||||
cluster_name = 'cluster'
|
||||
app = service.Service.create(host=self.host, binary=self.binary,
|
||||
cluster=cluster_name, topic=self.topic)
|
||||
self._check_app(app, cluster_name, cluster_exists=False,
|
||||
is_upgrading=True)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_create_svc_exists_upgrade_cluster(self, is_upgrading_mock):
|
||||
def test_create_svc_exists_upgrade_cluster(self):
|
||||
"""Test that we update cluster_name field when cfg has changed."""
|
||||
# Create the service in the DB
|
||||
db_svc = db.service_create(context.get_admin_context(),
|
||||
@ -243,29 +202,12 @@ class ServiceTestCase(test.TestCase):
|
||||
cluster_name = 'cluster'
|
||||
app = service.Service.create(host=self.host, binary=self.binary,
|
||||
cluster=cluster_name, topic=self.topic)
|
||||
self._check_app(app, cluster_name, svc_id=db_svc.id)
|
||||
self._check_app(app, cluster_name, svc_id=db_svc.id,
|
||||
added_to_cluster=cluster_name)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=True)
|
||||
def test_create_svc_exists_not_upgrade_cluster(self, is_upgrading_mock):
|
||||
"""Test we don't update cluster_name on cfg change when upgrading."""
|
||||
# Create the service in the DB
|
||||
db_svc = db.service_create(context.get_admin_context(),
|
||||
{'host': self.host, 'binary': self.binary,
|
||||
'topic': self.topic,
|
||||
'cluster': None})
|
||||
cluster_name = 'cluster'
|
||||
app = service.Service.create(host=self.host, binary=self.binary,
|
||||
cluster=cluster_name, topic=self.topic)
|
||||
self._check_app(app, cluster_name, cluster_exists=False,
|
||||
is_upgrading=True, svc_id=db_svc.id)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
@mock.patch.object(objects.service.Service, 'get_by_args')
|
||||
@mock.patch.object(objects.service.Service, 'get_by_id')
|
||||
def test_report_state_newly_disconnected(self, get_by_id, get_by_args,
|
||||
is_upgrading_mock):
|
||||
def test_report_state_newly_disconnected(self, get_by_id, get_by_args):
|
||||
get_by_args.side_effect = exception.NotFound()
|
||||
get_by_id.side_effect = db_exc.DBConnectionError()
|
||||
with mock.patch.object(objects.service, 'db') as mock_db:
|
||||
@ -282,12 +224,9 @@ class ServiceTestCase(test.TestCase):
|
||||
self.assertTrue(serv.model_disconnected)
|
||||
self.assertFalse(mock_db.service_update.called)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
@mock.patch.object(objects.service.Service, 'get_by_args')
|
||||
@mock.patch.object(objects.service.Service, 'get_by_id')
|
||||
def test_report_state_disconnected_DBError(self, get_by_id, get_by_args,
|
||||
is_upgrading_mock):
|
||||
def test_report_state_disconnected_DBError(self, get_by_id, get_by_args):
|
||||
get_by_args.side_effect = exception.NotFound()
|
||||
get_by_id.side_effect = db_exc.DBError()
|
||||
with mock.patch.object(objects.service, 'db') as mock_db:
|
||||
@ -304,12 +243,9 @@ class ServiceTestCase(test.TestCase):
|
||||
self.assertTrue(serv.model_disconnected)
|
||||
self.assertFalse(mock_db.service_update.called)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
@mock.patch('cinder.db.sqlalchemy.api.service_update')
|
||||
@mock.patch('cinder.db.sqlalchemy.api.service_get')
|
||||
def test_report_state_newly_connected(self, get_by_id, service_update,
|
||||
is_upgrading_mock):
|
||||
def test_report_state_newly_connected(self, get_by_id, service_update):
|
||||
get_by_id.return_value = self.service_ref
|
||||
|
||||
serv = service.Service(
|
||||
@ -325,9 +261,7 @@ class ServiceTestCase(test.TestCase):
|
||||
self.assertFalse(serv.model_disconnected)
|
||||
self.assertTrue(service_update.called)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_report_state_manager_not_working(self, is_upgrading_mock):
|
||||
def test_report_state_manager_not_working(self):
|
||||
with mock.patch('cinder.db') as mock_db:
|
||||
mock_db.service_get.return_value = self.service_ref
|
||||
|
||||
@ -344,9 +278,7 @@ class ServiceTestCase(test.TestCase):
|
||||
serv.manager.is_working.assert_called_once_with()
|
||||
self.assertFalse(mock_db.service_update.called)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
def test_service_with_long_report_interval(self, is_upgrading_mock):
|
||||
def test_service_with_long_report_interval(self):
|
||||
self.override_config('service_down_time', 10)
|
||||
self.override_config('report_interval', 10)
|
||||
service.Service.create(
|
||||
@ -354,12 +286,9 @@ class ServiceTestCase(test.TestCase):
|
||||
manager="cinder.tests.unit.test_service.FakeManager")
|
||||
self.assertEqual(25, CONF.service_down_time)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
@mock.patch.object(rpc, 'get_server')
|
||||
@mock.patch('cinder.db')
|
||||
def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc,
|
||||
is_upgrading_mock):
|
||||
def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc):
|
||||
serv = service.Service(
|
||||
self.host,
|
||||
self.binary,
|
||||
@ -373,8 +302,6 @@ class ServiceTestCase(test.TestCase):
|
||||
serv.rpcserver.stop.assert_called_once_with()
|
||||
serv.rpcserver.wait.assert_called_once_with()
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
return_value=False)
|
||||
@mock.patch('cinder.service.Service.report_state')
|
||||
@mock.patch('cinder.service.Service.periodic_tasks')
|
||||
@mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall')
|
||||
@ -382,7 +309,7 @@ class ServiceTestCase(test.TestCase):
|
||||
@mock.patch('cinder.db')
|
||||
def test_service_stop_waits_for_timers(self, mock_db, mock_rpc,
|
||||
mock_loopcall, mock_periodic,
|
||||
mock_report, is_upgrading_mock):
|
||||
mock_report):
|
||||
"""Test that we wait for loopcalls only if stop succeeds."""
|
||||
serv = service.Service(
|
||||
self.host,
|
||||
@ -461,21 +388,16 @@ class ServiceTestCase(test.TestCase):
|
||||
cluster=None, topic=self.topic)
|
||||
self._check_rpc_servers_and_init_host(app, True, None)
|
||||
|
||||
@ddt.data('1.3', '1.7')
|
||||
@mock.patch('cinder.objects.Service.get_minimum_obj_version')
|
||||
def test_start_rpc_and_init_host_cluster(self, obj_version,
|
||||
get_min_obj_mock):
|
||||
def test_start_rpc_and_init_host_cluster(self, get_min_obj_mock):
|
||||
"""Test that with cluster we create the rpc service."""
|
||||
get_min_obj_mock.return_value = obj_version
|
||||
get_min_obj_mock.return_value = '1.7'
|
||||
cluster = 'cluster@backend#pool'
|
||||
self.host = 'host@backend#pool'
|
||||
app = service.Service.create(host=self.host, binary='cinder-volume',
|
||||
cluster=cluster, topic=self.topic)
|
||||
self._check_rpc_servers_and_init_host(app, obj_version != '1.3',
|
||||
cluster)
|
||||
self._check_rpc_servers_and_init_host(app, True, cluster)
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
mock.Mock(return_value=False))
|
||||
@mock.patch('cinder.objects.Cluster.get_by_id')
|
||||
def test_ensure_cluster_exists_no_cluster(self, get_mock):
|
||||
app = service.Service.create(host=self.host,
|
||||
@ -486,8 +408,6 @@ class ServiceTestCase(test.TestCase):
|
||||
get_mock.assert_not_called()
|
||||
self.assertEqual({}, svc.cinder_obj_get_changes())
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
mock.Mock(return_value=False))
|
||||
@mock.patch('cinder.objects.Cluster.get_by_id')
|
||||
def test_ensure_cluster_exists_cluster_exists_non_relicated(self,
|
||||
get_mock):
|
||||
@ -506,8 +426,6 @@ class ServiceTestCase(test.TestCase):
|
||||
binary=app.binary)
|
||||
self.assertEqual({}, svc.cinder_obj_get_changes())
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
mock.Mock(return_value=False))
|
||||
@mock.patch('cinder.objects.Cluster.get_by_id')
|
||||
def test_ensure_cluster_exists_cluster_change(self, get_mock):
|
||||
"""We copy replication fields from the cluster to the service."""
|
||||
@ -527,8 +445,6 @@ class ServiceTestCase(test.TestCase):
|
||||
binary=app.binary)
|
||||
self.assertEqual(changes, svc.cinder_obj_get_changes())
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
mock.Mock(return_value=False))
|
||||
@mock.patch('cinder.objects.Cluster.get_by_id')
|
||||
def test_ensure_cluster_exists_cluster_no_change(self, get_mock):
|
||||
"""Don't copy replication fields from cluster if replication error."""
|
||||
@ -550,8 +466,6 @@ class ServiceTestCase(test.TestCase):
|
||||
binary=app.binary)
|
||||
self.assertEqual({}, svc.cinder_obj_get_changes())
|
||||
|
||||
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
|
||||
mock.Mock(return_value=False))
|
||||
def test_ensure_cluster_exists_cluster_create_replicated_and_non(self):
|
||||
"""We use service replication fields to create the cluster."""
|
||||
changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER,
|
||||
|
Loading…
Reference in New Issue
Block a user