conf: Remove config option compute_ manager

compute_manager config options were deprecated in
13.0.0, we can remove it now.

Depends-On: I93b62372e357db4f7c39992c244b2ec9e594ad9c

Implements: bp centralize-config-options-ocata

Change-Id: I84503655bd9675ff441073e51059202142db22ab
This commit is contained in:
ChangBo Guo(gcb) 2016-05-21 13:42:28 +08:00
parent 1b785e5f0c
commit d9a7c16c30
19 changed files with 38 additions and 44 deletions

View File

@ -157,13 +157,6 @@ Possible Values:
* Any positive integer
* None (default value)
"""),
# NOTE(sdague): Ironic is still using this facility for their HA
# manager. Ensure they are sorted before removing this.
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
deprecated_for_removal=True,
deprecated_since='13.0.0',
help='Full class name for the Manager for compute'),
# NOTE(sdague): the network_manager has a bunch of different in
# tree classes that are still legit options. In Newton we should
# turn this into a selector.

View File

@ -48,6 +48,7 @@ LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SERVICE_MANAGERS = {
'nova-compute': 'nova.compute.manager.ComputeManager',
'nova-console': 'nova.console.manager.ConsoleProxyManager',
'nova-consoleauth': 'nova.consoleauth.manager.ConsoleAuthManager',
'nova-cert': 'nova.cert.manager.CertManager',
@ -208,10 +209,6 @@ class Service(service.Service):
topic = binary.rpartition('nova-')[2]
if not manager:
manager = SERVICE_MANAGERS.get(binary)
if manager is None:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:

View File

@ -98,7 +98,8 @@ class _IntegratedTestBase(test.TestCase):
self.compute = self._setup_compute_service()
self.consoleauth = self.start_service('consoleauth')
self.network = self.start_service('network')
self.network = self.start_service('network',
manager=CONF.network_manager)
self.scheduler = self._setup_scheduler_service()
def get_unused_server_name(self):

View File

@ -76,7 +76,7 @@ class NotificationSampleTestBase(test.TestCase,
self.start_service('conductor')
self.start_service('scheduler')
self.start_service('network')
self.start_service('network', manager=CONF.network_manager)
self.compute = self.start_service('compute')
def _get_notification_sample(self, sample):

View File

@ -43,7 +43,8 @@ class TestServerGet(test.TestCase):
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.network = self.start_service('network')
self.network = self.start_service('network',
manager=CONF.network_manager)
self.compute = self.start_service('compute')
self.useFixture(cast_as_call.CastAsCall(self.stubs))

View File

@ -51,7 +51,8 @@ class TestServerGet(test.TestCase):
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.network = self.start_service('network')
self.network = self.start_service('network',
manager=CONF.network_manager)
self.compute = self.start_service('compute')
self.consoleauth = self.start_service('consoleauth')

View File

@ -70,7 +70,8 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.network = self.start_service('network')
self.network = self.start_service('network',
manager=CONF.network_manager)
self.compute = self.start_service('compute', host='test_compute1')
self.consoleauth = self.start_service('consoleauth')

View File

@ -111,7 +111,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self.start_service('network')
self.start_service('network', manager=CONF.network_manager)
self.compute = self.start_service('compute')
# NOTE(gibi): start a second compute host to be able to test affinity

View File

@ -32,7 +32,6 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
@ -154,7 +153,7 @@ class BaseTestCase(test.TestCase):
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = compute_manager.ComputeManager()
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()

View File

@ -22,7 +22,6 @@ import mock
import netaddr
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
@ -73,7 +72,7 @@ CONF = nova.conf.CONF
class ComputeManagerUnitTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self.context = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
@ -3325,7 +3324,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerBuildInstanceTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self.context = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
self.instance = fake_instance.fake_instance_obj(self.context,
@ -3394,7 +3393,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def test_build_and_run_instance_with_unlimited_max_concurrent_builds(self):
self.flags(max_concurrent_builds=0)
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self._test_build_and_run_instance()
@mock.patch.object(objects.InstanceActionEvent,
@ -4563,7 +4562,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerMigrationTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self.context = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
self.image = {}
@ -5293,7 +5292,7 @@ class ComputeManagerInstanceUsageAuditTestCase(test.TestCase):
def test_deleted_instance(self, mock_task_log):
mock_task_log.get.return_value = None
compute = importutils.import_object(CONF.compute_manager)
compute = manager.ComputeManager()
admin_context = context.get_admin_context()
fake_db_flavor = fake_flavor.fake_db_flavor()

View File

@ -22,10 +22,10 @@ import uuid
import mock
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
from nova.compute import flavors
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
@ -400,7 +400,7 @@ class UsageInfoTestCase(test.TestCase):
self.flags(compute_driver='fake.FakeDriver',
network_manager='nova.network.manager.FlatManager')
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)

View File

@ -13,8 +13,8 @@
"""Tests for expectations of behaviour from the Xen driver."""
import mock
from oslo_utils import importutils
from nova.compute import manager
from nova.compute import power_state
import nova.conf
from nova import context
@ -37,7 +37,7 @@ class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()

View File

@ -14,8 +14,7 @@
# under the License.
"""Tests for compute service with multiple compute nodes."""
from oslo_utils import importutils
from nova.compute import manager
import nova.conf
from nova import context
from nova import objects
@ -65,7 +64,7 @@ class MultiNodeComputeTestCase(BaseTestCase):
def setUp(self):
super(MultiNodeComputeTestCase, self).setUp()
self.flags(compute_driver='fake.FakeDriver')
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
def fake_get_compute_nodes_in_db(context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,

View File

@ -47,7 +47,8 @@ class QuotaIntegrationTestCase(test.TestCase):
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.network = self.start_service('network',
manager=CONF.network_manager)
self.user_id = 'admin'
self.project_id = 'admin'

View File

@ -104,7 +104,8 @@ class ServiceTestCase(test.NoDBTestCase):
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=self.host, binary=self.binary,
topic=self.topic)
topic=self.topic,
manager='nova.tests.unit.test_service.FakeManager')
self.assertTrue(app)

View File

@ -47,7 +47,6 @@ from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_utils import versionutils
@ -7551,7 +7550,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_live_migration_update_graphics_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
@ -7607,7 +7606,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
False, migrate_data, guest, [])
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
@ -7661,7 +7660,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
guest, migrate_data, mock.ANY)
def test_live_migration_with_valid_target_connect_addr(self):
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
@ -7931,7 +7930,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_serial_console_xml(self, mock_xml,
mock_migrate, mock_get):
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
instance_ref = self.test_instance
xml_tmpl = ("<domain type='kvm'>"
@ -7976,7 +7975,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
dxml=target_xml, flags=mock.ANY, bandwidth=bandwidth)
def test_live_migration_fails_without_serial_console_address(self):
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self.flags(enabled=True, group='serial_console')
self.flags(proxyclient_address='', group='serial_console')
instance_dict = dict(self.test_instance)
@ -8081,7 +8080,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,

View File

@ -23,9 +23,9 @@ from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import formatters
from oslo_log import log as logging
from oslo_utils import importutils
from six.moves import cStringIO
from nova.compute import manager as compute_manager
import nova.conf
from nova import context
from nova import objects
@ -716,7 +716,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.flags(instances_path=tmpdir)
ctxt = context.get_admin_context()
mock_instance_list.return_value = fake_instances(ctxt)
compute = importutils.import_object(CONF.compute_manager)
compute = compute_manager.ComputeManager()
compute._run_image_cache_manager_pass(ctxt)
filters = {
'host': ['fake-mini'],

View File

@ -34,6 +34,7 @@ import six
import testtools
from nova.compute import api as compute_api
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
@ -2978,7 +2979,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.compute = manager.ComputeManager()
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',

View File

@ -6,6 +6,7 @@ upgrade:
options are removed.
- ``metadata_manager``
- ``compute_manager``
- ``console_manager``
- ``consoleauth_manager``
- ``cert_manager``