Merge "queues per manager rather than per pipeline"
This commit is contained in:
commit
6d27648c29
ceilometer
releasenotes/notes
@ -105,7 +105,7 @@ class NotificationService(cotyledon.Service):
|
|||||||
proper active/active HA.
|
proper active/active HA.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
NOTIFICATION_NAMESPACE = 'ceilometer.notification'
|
NOTIFICATION_NAMESPACE = 'ceilometer.notification.v2'
|
||||||
|
|
||||||
def __init__(self, worker_id, conf, coordination_id=None):
|
def __init__(self, worker_id, conf, coordination_id=None):
|
||||||
super(NotificationService, self).__init__(worker_id)
|
super(NotificationService, self).__init__(worker_id)
|
||||||
@ -229,14 +229,10 @@ class NotificationService(cotyledon.Service):
|
|||||||
endpoints.extend(pipe_mgr.get_interim_endpoints())
|
endpoints.extend(pipe_mgr.get_interim_endpoints())
|
||||||
|
|
||||||
targets = []
|
targets = []
|
||||||
for mgr in self.managers:
|
for mgr, hash_id in itertools.product(self.managers, partitioned):
|
||||||
for pipe_set, pipe in itertools.product(partitioned,
|
topic = '-'.join([mgr.NOTIFICATION_IPC, mgr.pm_type, str(hash_id)])
|
||||||
mgr.pipelines):
|
LOG.debug('Listening to queue: %s', topic)
|
||||||
LOG.debug('Pipeline endpoint: %s from set: %s',
|
targets.append(oslo_messaging.Target(topic=topic))
|
||||||
pipe.name, pipe_set)
|
|
||||||
topic = '%s-%s-%s' % (pipe.NOTIFICATION_IPC,
|
|
||||||
pipe.name, pipe_set)
|
|
||||||
targets.append(oslo_messaging.Target(topic=topic))
|
|
||||||
|
|
||||||
if self.pipeline_listener:
|
if self.pipeline_listener:
|
||||||
self.pipeline_listener.stop()
|
self.pipeline_listener.stop()
|
||||||
|
@ -48,21 +48,19 @@ class PipelineException(agent.ConfigException):
|
|||||||
class InterimPublishContext(object):
|
class InterimPublishContext(object):
|
||||||
"""Publisher to hash/shard data to pipelines"""
|
"""Publisher to hash/shard data to pipelines"""
|
||||||
|
|
||||||
def __init__(self, conf, pipelines):
|
def __init__(self, conf, mgr):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.pipe_notifiers = []
|
self.mgr = mgr
|
||||||
transport = messaging.get_transport(conf)
|
self.notifiers = self._get_notifiers(messaging.get_transport(conf))
|
||||||
for pipe in pipelines:
|
|
||||||
self.pipe_notifiers.append(
|
|
||||||
(pipe, self._get_notifiers(transport, pipe)))
|
|
||||||
|
|
||||||
def _get_notifiers(self, transport, pipe):
|
def _get_notifiers(self, transport):
|
||||||
notifiers = []
|
notifiers = []
|
||||||
for x in range(self.conf.notification.pipeline_processing_queues):
|
for x in range(self.conf.notification.pipeline_processing_queues):
|
||||||
notifiers.append(oslo_messaging.Notifier(
|
notifiers.append(oslo_messaging.Notifier(
|
||||||
transport,
|
transport,
|
||||||
driver=self.conf.publisher_notifier.telemetry_driver,
|
driver=self.conf.publisher_notifier.telemetry_driver,
|
||||||
topics=['%s-%s-%s' % (pipe.NOTIFICATION_IPC, pipe.name, x)]))
|
topics=['-'.join(
|
||||||
|
[self.mgr.NOTIFICATION_IPC, self.mgr.pm_type, str(x)])]))
|
||||||
return notifiers
|
return notifiers
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -79,15 +77,14 @@ class InterimPublishContext(object):
|
|||||||
def p(data):
|
def p(data):
|
||||||
data = [data] if not isinstance(data, list) else data
|
data = [data] if not isinstance(data, list) else data
|
||||||
for datapoint in data:
|
for datapoint in data:
|
||||||
for pipe, notifiers in self.pipe_notifiers:
|
for pipe in self.mgr.pipelines:
|
||||||
if pipe.supported(datapoint):
|
if pipe.supported(datapoint):
|
||||||
serialized_data = pipe.serializer(datapoint)
|
serialized_data = pipe.serializer(datapoint)
|
||||||
key = (self.hash_grouping(serialized_data,
|
key = (self.hash_grouping(serialized_data,
|
||||||
pipe.get_grouping_key())
|
pipe.get_grouping_key())
|
||||||
% len(notifiers))
|
% len(self.notifiers))
|
||||||
notifier = notifiers[key]
|
self.notifiers[key].sample({}, event_type=pipe.name,
|
||||||
notifier.sample({}, event_type=pipe.name,
|
payload=[serialized_data])
|
||||||
payload=[serialized_data])
|
|
||||||
return p
|
return p
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
@ -221,8 +218,6 @@ class Sink(object):
|
|||||||
class Pipeline(object):
|
class Pipeline(object):
|
||||||
"""Represents a coupling between a sink and a corresponding source."""
|
"""Represents a coupling between a sink and a corresponding source."""
|
||||||
|
|
||||||
NOTIFICATION_IPC = 'ceilometer-pipe'
|
|
||||||
|
|
||||||
def __init__(self, conf, source, sink):
|
def __init__(self, conf, source, sink):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.source = source
|
self.source = source
|
||||||
@ -284,6 +279,8 @@ class PipelineManager(agent.ConfigManagerBase):
|
|||||||
Pipeline manager sets up pipelines according to config file
|
Pipeline manager sets up pipelines according to config file
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
NOTIFICATION_IPC = 'ceilometer_ipc'
|
||||||
|
|
||||||
def __init__(self, conf, cfg_file, transformer_manager, partition):
|
def __init__(self, conf, cfg_file, transformer_manager, partition):
|
||||||
"""Setup the pipelines according to config.
|
"""Setup the pipelines according to config.
|
||||||
|
|
||||||
@ -408,7 +405,7 @@ class PipelineManager(agent.ConfigManagerBase):
|
|||||||
|
|
||||||
def interim_publisher(self):
|
def interim_publisher(self):
|
||||||
"""Build publishing context for IPC."""
|
"""Build publishing context for IPC."""
|
||||||
return InterimPublishContext(self.conf, self.pipelines)
|
return InterimPublishContext(self.conf, self)
|
||||||
|
|
||||||
def get_main_publisher(self):
|
def get_main_publisher(self):
|
||||||
"""Return the publishing context to use"""
|
"""Return the publishing context to use"""
|
||||||
|
@ -300,10 +300,8 @@ class TestRealNotificationHA(BaseRealNotification):
|
|||||||
topics = [target.topic for target in mock_listener.call_args[0][1]]
|
topics = [target.topic for target in mock_listener.call_args[0][1]]
|
||||||
self.assertEqual(4, len(topics))
|
self.assertEqual(4, len(topics))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{'ceilometer-pipe-test_pipeline:test_sink-4',
|
{'ceilometer_ipc-sample-4', 'ceilometer_ipc-sample-9',
|
||||||
'ceilometer-pipe-event:test_event:test_sink-4',
|
'ceilometer_ipc-event-4', 'ceilometer_ipc-event-9'},
|
||||||
'ceilometer-pipe-event:test_event:test_sink-9',
|
|
||||||
'ceilometer-pipe-test_pipeline:test_sink-9'},
|
|
||||||
set(topics))
|
set(topics))
|
||||||
|
|
||||||
@mock.patch('oslo_messaging.get_batch_notification_listener')
|
@mock.patch('oslo_messaging.get_batch_notification_listener')
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Workload partitioning of notification agent is now split into queues
|
||||||
|
based on pipeline type (sample, event, etc...) rather than per individual
|
||||||
|
pipeline. This will save some memory usage specifically for pipeline
|
||||||
|
definitions with many source/sink combinations.
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
If workload partitioning of the notification agent is enabled, the
|
||||||
|
notification agent should not run alongside pre-Queens agents. Doing so
|
||||||
|
may result in missed samples when leveraging transformations. To upgrade
|
||||||
|
without loss of data, set `notification_control_exchanges` option to
|
||||||
|
empty so only existing `ceilometer-pipe-*` queues are processed. Once
|
||||||
|
cleared, reset `notification_control_exchanges` option and launch the new
|
||||||
|
notification agent(s). If `workload_partitioning` is not enabled, no
|
||||||
|
special steps are required.
|
Loading…
x
Reference in New Issue
Block a user