Pull singleton config check cruft out of SG API

We were going through a bunch of trouble to make the servicegroup API
object a singleton, and only check configuration options once. This made
the unit tests for the servicegroup drivers more complex than they
needed to be. In this patch, we just change the SG API object's
constructor to check the appropriate servicegroup configuration options
as it needs to, and we adapt the unit tests for all of the SG drivers to
not involve the Service object at all.

The memcache and DB driver unit tests are converted to NoDBTestCase
derivatives, reducing the runtime for the two test cases from around 8
seconds (~1 second per test method) to less than 0.20 seconds.

NOTE: The ZooKeeper driver's unit test never runs at all. It is always
skipped.

NOTE: The next patch in this series refactors the service group API
to make the behaviour of all drivers consistent. Currently, the
Zookeeper driver has different behaviour from the other drivers in the
get_all() method: it raises exception.ServiceGroupUnavailable() if no
services are found, unlike the DB and MC drivers, which just return an
empty list. Similarly, the ZK driver's join() method returns a
FakeLoopingCall() object for some reason, while the other drivers return
None.

Related blueprint: servicegroup-api-is-up-host-topic

Change-Id: Id84ab3fdfcd6b45e1015d6dac9faec5c01fa42ad
This commit is contained in:
Jay Pipes
2015-01-25 12:05:06 -08:00
committed by EdLeafe
parent a4f9c1b9f4
commit f05eecbc3f
6 changed files with 159 additions and 324 deletions

View File

@@ -23,6 +23,12 @@ from nova.i18n import _, _LW
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_driver_name_class_mapping = {
'db': 'nova.servicegroup.drivers.db.DbDriver',
'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver',
'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver'
}
_default_driver = 'db' _default_driver = 'db'
servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver', servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver',
default=_default_driver, default=_default_driver,
@@ -39,42 +45,12 @@ INITIAL_REPORTING_DELAY = 5
class API(object): class API(object):
_driver = None def __init__(self, *args, **kwargs):
_driver_name_class_mapping = {
'db': 'nova.servicegroup.drivers.db.DbDriver',
'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver',
'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver'
}
def __new__(cls, *args, **kwargs):
'''Create an instance of the servicegroup API. '''Create an instance of the servicegroup API.
args and kwargs are passed down to the servicegroup driver when it gets args and kwargs are passed down to the servicegroup driver when it gets
created. No args currently exist, though. Valid kwargs are: created.
db_allowed - Boolean. False if direct db access is not allowed and
alternative data access (conductor) should be used
instead.
''' '''
if not cls._driver:
LOG.debug('ServiceGroup driver defined as an instance of %s',
str(CONF.servicegroup_driver))
driver_name = CONF.servicegroup_driver
try:
driver_class = cls._driver_name_class_mapping[driver_name]
except KeyError:
raise TypeError(_("unknown ServiceGroup driver name: %s")
% driver_name)
cls._driver = importutils.import_object(driver_class,
*args, **kwargs)
return super(API, cls).__new__(cls)
def __init__(self, *args, **kwargs):
self.basic_config_check()
def basic_config_check(self):
"""Perform basic config check."""
# Make sure report interval is less than service down time # Make sure report interval is less than service down time
report_interval = CONF.report_interval report_interval = CONF.report_interval
if CONF.service_down_time <= report_interval: if CONF.service_down_time <= report_interval:
@@ -88,6 +64,16 @@ class API(object):
'report_interval': report_interval, 'report_interval': report_interval,
'new_service_down_time': new_service_down_time}) 'new_service_down_time': new_service_down_time})
CONF.set_override('service_down_time', new_service_down_time) CONF.set_override('service_down_time', new_service_down_time)
LOG.debug('ServiceGroup driver defined as an instance of %s',
str(CONF.servicegroup_driver))
driver_name = CONF.servicegroup_driver
try:
driver_class = _driver_name_class_mapping[driver_name]
except KeyError:
raise TypeError(_("unknown ServiceGroup driver name: %s")
% driver_name)
self._driver = importutils.import_object(driver_class,
*args, **kwargs)
def join(self, member_id, group_id, service=None): def join(self, member_id, group_id, service=None):
"""Add a new member to the ServiceGroup """Add a new member to the ServiceGroup

View File

@@ -34,6 +34,14 @@ LOG = logging.getLogger(__name__)
class DbDriver(base.Driver): class DbDriver(base.Driver):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
"""Creates an instance of the DB-based servicegroup driver.
Valid kwargs are:
db_allowed - Boolean. False if direct db access is not allowed and
alternative data access (conductor) should be used
instead.
"""
self.db_allowed = kwargs.get('db_allowed', True) self.db_allowed = kwargs.get('db_allowed', True)
self.conductor_api = conductor.API(use_local=self.db_allowed) self.conductor_api = conductor.API(use_local=self.db_allowed)
self.service_down_time = CONF.service_down_time self.service_down_time = CONF.service_down_time

View File

@@ -31,7 +31,6 @@ from nova.servicegroup.drivers import base
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service') CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('memcached_servers', 'nova.openstack.common.memorycache')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -40,8 +39,7 @@ LOG = logging.getLogger(__name__)
class MemcachedDriver(base.Driver): class MemcachedDriver(base.Driver):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
test = kwargs.get('test') if not CONF.memcached_servers:
if not CONF.memcached_servers and not test:
raise RuntimeError(_('memcached_servers not defined')) raise RuntimeError(_('memcached_servers not defined'))
self.mc = memorycache.get_client() self.mc = memorycache.get_client()
self.db_allowed = kwargs.get('db_allowed', True) self.db_allowed = kwargs.get('db_allowed', True)

View File

@@ -13,132 +13,95 @@
# under the License. # under the License.
import datetime import datetime
import mock
import fixtures
from oslo_utils import timeutils
from nova import context
from nova import db
from nova import service
from nova import servicegroup from nova import servicegroup
from nova import test from nova import test
class ServiceFixture(fixtures.Fixture): class DBServiceGroupTestCase(test.NoDBTestCase):
def __init__(self, host, binary, topic):
super(ServiceFixture, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.serv = None
def setUp(self):
super(ServiceFixture, self).setUp()
self.serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager',
1, 1)
self.addCleanup(self.serv.kill)
class DBServiceGroupTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(DBServiceGroupTestCase, self).setUp() super(DBServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
self.flags(servicegroup_driver='db')
self.down_time = 15 self.down_time = 15
self.flags(enable_new_services=True) self.flags(service_down_time=self.down_time,
self.flags(service_down_time=self.down_time) servicegroup_driver='db')
self.servicegroup_api = servicegroup.API() self.servicegroup_api = servicegroup.API()
self._host = 'foo'
self._binary = 'nova-fake'
self._topic = 'unittest'
self._ctx = context.get_admin_context()
def test_DB_driver(self): @mock.patch('oslo_utils.timeutils.utcnow')
serv = self.useFixture( def test_is_up(self, now_mock):
ServiceFixture(self._host, self._binary, self._topic)).serv service_ref = {
serv.start() 'host': 'fake-host',
service_ref = db.service_get_by_args(self._ctx, 'topic': 'compute',
self._host, }
self._binary)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
self.useFixture(test.TimeOverride())
timeutils.advance_time_seconds(self.down_time + 1)
self.servicegroup_api._driver._report_state(serv)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
serv.stop()
timeutils.advance_time_seconds(self.down_time + 1)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
def test_get_all(self):
host1 = self._host + '_1'
host2 = self._host + '_2'
serv1 = self.useFixture(
ServiceFixture(host1, self._binary, self._topic)).serv
serv1.start()
serv2 = self.useFixture(
ServiceFixture(host2, self._binary, self._topic)).serv
serv2.start()
service_ref1 = db.service_get_by_args(self._ctx,
host1,
self._binary)
service_ref2 = db.service_get_by_args(self._ctx,
host2,
self._binary)
services = self.servicegroup_api.get_all(self._topic)
self.assertIn(service_ref1['host'], services)
self.assertIn(service_ref2['host'], services)
service_id = self.servicegroup_api.get_one(self._topic)
self.assertIn(service_id, services)
def test_service_is_up(self):
fts_func = datetime.datetime.fromtimestamp fts_func = datetime.datetime.fromtimestamp
fake_now = 1000 fake_now = 1000
down_time = 15
self.flags(service_down_time=down_time)
self.mox.StubOutWithMock(timeutils, 'utcnow')
self.servicegroup_api = servicegroup.API()
# Up (equal) # Up (equal)
timeutils.utcnow().AndReturn(fts_func(fake_now)) now_mock.return_value = fts_func(fake_now)
service = {'updated_at': fts_func(fake_now - self.down_time), service_ref['updated_at'] = fts_func(fake_now - self.down_time)
'created_at': fts_func(fake_now - self.down_time)} service_ref['created_at'] = fts_func(fake_now - self.down_time)
self.mox.ReplayAll()
result = self.servicegroup_api.service_is_up(service) result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result) self.assertTrue(result)
self.mox.ResetAll()
# Up # Up
timeutils.utcnow().AndReturn(fts_func(fake_now)) service_ref['updated_at'] = fts_func(fake_now - self.down_time + 1)
service = {'updated_at': fts_func(fake_now - self.down_time + 1), service_ref['created_at'] = fts_func(fake_now - self.down_time + 1)
'created_at': fts_func(fake_now - self.down_time + 1)} result = self.servicegroup_api.service_is_up(service_ref)
self.mox.ReplayAll()
result = self.servicegroup_api.service_is_up(service)
self.assertTrue(result) self.assertTrue(result)
self.mox.ResetAll()
# Down # Down
timeutils.utcnow().AndReturn(fts_func(fake_now)) service_ref['updated_at'] = fts_func(fake_now - self.down_time - 3)
service = {'updated_at': fts_func(fake_now - self.down_time - 3), service_ref['created_at'] = fts_func(fake_now - self.down_time - 3)
'created_at': fts_func(fake_now - self.down_time - 3)} result = self.servicegroup_api.service_is_up(service_ref)
self.mox.ReplayAll()
result = self.servicegroup_api.service_is_up(service)
self.assertFalse(result) self.assertFalse(result)
@mock.patch('nova.conductor.api.LocalAPI.service_get_all_by_topic')
def test_get_all(self, ga_mock):
service_refs = [
{
'host': 'fake-host1',
'topic': 'compute'
},
{
'host': 'fake-host2',
'topic': 'compute'
},
{
'host': 'fake-host3',
'topic': 'compute'
},
]
ga_mock.return_value = service_refs
with mock.patch.object(self.servicegroup_api._driver,
'is_up', side_effect=[
None,
True, # fake host 2 is enabled, all others disabled
None
]):
services = self.servicegroup_api.get_all('compute')
self.assertEqual(['fake-host2'], services)
ga_mock.assert_called_once_with(mock.ANY, 'compute')
def test_join(self):
service = mock.MagicMock(report_interval=1)
self.servicegroup_api.join('fake-host', 'fake-topic', service)
fn = self.servicegroup_api._driver._report_state
service.tg.add_timer.assert_called_once_with(1, fn, 5, service)
@mock.patch('nova.conductor.api.LocalAPI.service_update')
def test_report_state(self, upd_mock):
service_ref = {
'host': 'fake-host',
'topic': 'compute',
'report_count': 10
}
service = mock.MagicMock(model_disconnected=False,
service_ref=service_ref)
fn = self.servicegroup_api._driver._report_state
fn(service)
upd_mock.assert_called_once_with(mock.ANY,
service_ref,
dict(report_count=11))

View File

@@ -15,199 +15,79 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import fixtures import mock
from oslo_utils import timeutils
from nova import context
from nova import db
from nova import service
from nova import servicegroup from nova import servicegroup
from nova import test from nova import test
class ServiceFixture(fixtures.Fixture): class MemcachedServiceGroupTestCase(test.NoDBTestCase):
def __init__(self, host, binary, topic): @mock.patch('nova.openstack.common.memorycache.get_client')
super(ServiceFixture, self).__init__() def setUp(self, mgc_mock):
self.host = host
self.binary = binary
self.topic = topic
self.serv = None
def setUp(self):
super(ServiceFixture, self).setUp()
self.serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager',
1, 1)
self.addCleanup(self.serv.kill)
class MemcachedServiceGroupTestCase(test.TestCase):
def setUp(self):
super(MemcachedServiceGroupTestCase, self).setUp() super(MemcachedServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None self.mc_client = mock.MagicMock()
self.flags(servicegroup_driver='mc') mgc_mock.return_value = self.mc_client
self.down_time = 15 self.flags(memcached_servers='ignored',
self.flags(enable_new_services=True) servicegroup_driver='mc')
self.flags(service_down_time=self.down_time)
self.servicegroup_api = servicegroup.API(test=True)
self._host = 'foo'
self._binary = 'nova-fake'
self._topic = 'unittest'
self._ctx = context.get_admin_context()
def test_memcached_driver(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
hostkey = str("%s:%s" % (self._topic, self._host))
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=self.down_time)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
self.useFixture(test.TimeOverride())
timeutils.advance_time_seconds(self.down_time + 1)
self.servicegroup_api._driver._report_state(serv)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
serv.stop()
timeutils.advance_time_seconds(self.down_time + 1)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
def test_get_all(self):
host1 = self._host + '_1'
host2 = self._host + '_2'
host3 = self._host + '_3'
serv1 = self.useFixture(
ServiceFixture(host1, self._binary, self._topic)).serv
serv1.start()
serv2 = self.useFixture(
ServiceFixture(host2, self._binary, self._topic)).serv
serv2.start()
serv3 = self.useFixture(
ServiceFixture(host3, self._binary, self._topic)).serv
serv3.start()
db.service_get_by_args(self._ctx, host1, self._binary)
db.service_get_by_args(self._ctx, host2, self._binary)
db.service_get_by_args(self._ctx, host3, self._binary)
host1key = str("%s:%s" % (self._topic, host1))
host2key = str("%s:%s" % (self._topic, host2))
host3key = str("%s:%s" % (self._topic, host3))
self.servicegroup_api._driver.mc.set(host1key,
timeutils.utcnow(),
time=self.down_time)
self.servicegroup_api._driver.mc.set(host2key,
timeutils.utcnow(),
time=self.down_time)
self.servicegroup_api._driver.mc.set(host3key,
timeutils.utcnow(),
time=-1)
services = self.servicegroup_api.get_all(self._topic)
self.assertIn(host1, services)
self.assertIn(host2, services)
self.assertNotIn(host3, services)
service_id = self.servicegroup_api.get_one(self._topic)
self.assertIn(service_id, services)
def test_service_is_up(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
fake_now = 1000
down_time = 15
self.flags(service_down_time=down_time)
self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
self.servicegroup_api = servicegroup.API() self.servicegroup_api = servicegroup.API()
hostkey = str("%s:%s" % (self._topic, self._host))
# Up (equal) def test_is_up(self):
timeutils.utcnow_ts().AndReturn(fake_now) service_ref = {
timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1) 'host': 'fake-host',
self.mox.ReplayAll() 'topic': 'compute'
self.servicegroup_api._driver.mc.set(hostkey, }
timeutils.utcnow(), self.mc_client.get.return_value = None
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result)
self.mox.ResetAll() self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
# Up self.mc_client.get.assert_called_once_with('compute:fake-host')
timeutils.utcnow_ts().AndReturn(fake_now) self.mc_client.reset_mock()
timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2) self.mc_client.get.return_value = True
self.mox.ReplayAll() self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
self.servicegroup_api._driver.mc.set(hostkey, self.mc_client.get.assert_called_once_with('compute:fake-host')
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result)
self.mox.ResetAll() @mock.patch('nova.conductor.api.LocalAPI.service_get_all_by_topic')
# Down def test_get_all(self, ga_mock):
timeutils.utcnow_ts().AndReturn(fake_now) service_refs = [
timeutils.utcnow_ts().AndReturn(fake_now + down_time) {
self.mox.ReplayAll() 'host': 'fake-host1',
self.servicegroup_api._driver.mc.set(hostkey, 'topic': 'compute'
timeutils.utcnow(), },
time=down_time) {
result = self.servicegroup_api.service_is_up(service_ref) 'host': 'fake-host2',
self.assertFalse(result) 'topic': 'compute'
},
{
'host': 'fake-host3',
'topic': 'compute'
},
]
ga_mock.return_value = service_refs
self.mc_client.get.side_effect = [
None,
True, # fake host 2 is enabled, all others disabled
None
]
self.mox.ResetAll() services = self.servicegroup_api.get_all('compute')
# Down self.assertEqual(['fake-host2'], services)
timeutils.utcnow_ts().AndReturn(fake_now) ga_mock.assert_called_once_with(mock.ANY, 'compute')
timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertFalse(result)
self.mox.ResetAll() def test_join(self):
service = mock.MagicMock(report_interval=1)
self.servicegroup_api.join('fake-host', 'fake-topic', service)
fn = self.servicegroup_api._driver._report_state
service.tg.add_timer.assert_called_once_with(1, fn, 5, service)
def test_report_state(self): def test_report_state(self):
serv = self.useFixture( service_ref = {
ServiceFixture(self._host, self._binary, self._topic)).serv 'host': 'fake-host',
serv.start() 'topic': 'compute'
db.service_get_by_args(self._ctx, self._host, self._binary) }
self.servicegroup_api = servicegroup.API() service = mock.MagicMock(model_disconnected=False,
service_ref=service_ref)
# updating model_disconnected fn = self.servicegroup_api._driver._report_state
serv.model_disconnected = True fn(service)
self.servicegroup_api._driver._report_state(serv) self.mc_client.set.assert_called_once_with('compute:fake-host',
self.assertFalse(serv.model_disconnected) mock.ANY, time=60)
# handling exception
serv.model_disconnected = True
self.servicegroup_api._driver.mc = None
self.servicegroup_api._driver._report_state(serv)
self.assertTrue(serv.model_disconnected)
delattr(serv, 'model_disconnected')
self.servicegroup_api._driver.mc = None
self.servicegroup_api._driver._report_state(serv)
self.assertTrue(serv.model_disconnected)

View File

@@ -37,7 +37,7 @@ class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self): def setUp(self):
super(ZKServiceGroupTestCase, self).setUp() super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk') self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper") self.flags(address='localhost:2181', group="zookeeper")
try: try: