Browse Source

Fix for WBE sporadic timeout of tasks

This fixes the sporadic of tasks that would happen
under certain circumstances. What happened was that
a new worker notification would be sent to a callback
while at the same time a task submission would come in
and there would be a small race period where the task
would insert itself into the requests cache while the
callback was processing.

So to work around this the whole concept of a requests
cache was revamped and now the WBE executor just maintains
its own local dictionary of ongoing requests and accesses
it safely.

During the on_wait function that is periodically called
by kombu the previous expiry of work happens but now any
requests that are pending are matched to any new workers
that may have appeared.

This avoids the race (and ensures that even if a new
worker is found but a submission is in progress that the
duration until that submission happens will only be until
the next on_wait call happens).

Related-Bug: #1431097

Change-Id: I98b0caeedc77ab2f7214847763ae1eb0433d4a78
tags/1.28.0
Joshua Harlow 3 years ago
parent
commit
cea71f2799

+ 0
- 5
doc/source/types.rst View File

@@ -12,11 +12,6 @@ Types
12 12
     into *isolated* libraries (as using these types in this manner is not
13 13
     the expected and/or desired usage).
14 14
 
15
-Cache
16
-=====
17
-
18
-.. automodule:: taskflow.types.cache
19
-
20 15
 Entity
21 16
 ======
22 17
 

+ 63
- 46
taskflow/engines/worker_based/executor.py View File

@@ -15,9 +15,11 @@
15 15
 #    under the License.
16 16
 
17 17
 import functools
18
+import threading
18 19
 
19 20
 from futurist import periodics
20 21
 from oslo_utils import timeutils
22
+import six
21 23
 
22 24
 from taskflow.engines.action_engine import executor
23 25
 from taskflow.engines.worker_based import dispatcher
@@ -26,7 +28,7 @@ from taskflow.engines.worker_based import proxy
26 28
 from taskflow.engines.worker_based import types as wt
27 29
 from taskflow import exceptions as exc
28 30
 from taskflow import logging
29
-from taskflow import task as task_atom
31
+from taskflow.task import EVENT_UPDATE_PROGRESS  # noqa
30 32
 from taskflow.utils import kombu_utils as ku
31 33
 from taskflow.utils import misc
32 34
 from taskflow.utils import threading_utils as tu
@@ -42,7 +44,8 @@ class WorkerTaskExecutor(executor.TaskExecutor):
42 44
                  url=None, transport=None, transport_options=None,
43 45
                  retry_options=None):
44 46
         self._uuid = uuid
45
-        self._requests_cache = wt.RequestsCache()
47
+        self._ongoing_requests = {}
48
+        self._ongoing_requests_lock = threading.RLock()
46 49
         self._transition_timeout = transition_timeout
47 50
         type_handlers = {
48 51
             pr.RESPONSE: dispatcher.Handler(self._process_response,
@@ -61,8 +64,6 @@ class WorkerTaskExecutor(executor.TaskExecutor):
61 64
         # pre-existing knowledge of the topics those workers are on to gather
62 65
         # and update this information).
63 66
         self._finder = wt.ProxyWorkerFinder(uuid, self._proxy, topics)
64
-        self._finder.notifier.register(wt.WorkerFinder.WORKER_ARRIVED,
65
-                                       self._on_worker)
66 67
         self._helpers = tu.ThreadBundle()
67 68
         self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start),
68 69
                            after_start=lambda t: self._proxy.wait(),
@@ -74,25 +75,18 @@ class WorkerTaskExecutor(executor.TaskExecutor):
74 75
                                after_join=lambda t: p_worker.reset(),
75 76
                                before_start=lambda t: p_worker.reset())
76 77
 
77
-    def _on_worker(self, event_type, details):
78
-        """Process new worker that has arrived (and fire off any work)."""
79
-        worker = details['worker']
80
-        for request in self._requests_cache.get_waiting_requests(worker):
81
-            if request.transition_and_log_error(pr.PENDING, logger=LOG):
82
-                self._publish_request(request, worker)
83
-
84 78
     def _process_response(self, response, message):
85 79
         """Process response from remote side."""
86 80
         LOG.debug("Started processing response message '%s'",
87 81
                   ku.DelayedPretty(message))
88 82
         try:
89
-            task_uuid = message.properties['correlation_id']
83
+            request_uuid = message.properties['correlation_id']
90 84
         except KeyError:
91 85
             LOG.warning("The 'correlation_id' message property is"
92 86
                         " missing in message '%s'",
93 87
                         ku.DelayedPretty(message))
94 88
         else:
95
-            request = self._requests_cache.get(task_uuid)
89
+            request = self._ongoing_requests.get(request_uuid)
96 90
             if request is not None:
97 91
                 response = pr.Response.from_dict(response)
98 92
                 LOG.debug("Extracted response '%s' and matched it to"
@@ -105,35 +99,31 @@ class WorkerTaskExecutor(executor.TaskExecutor):
105 99
                     details = response.data['details']
106 100
                     request.notifier.notify(event_type, details)
107 101
                 elif response.state in (pr.FAILURE, pr.SUCCESS):
108
-                    moved = request.transition_and_log_error(response.state,
109
-                                                             logger=LOG)
110
-                    if moved:
111
-                        # NOTE(imelnikov): request should not be in the
112
-                        # cache when another thread can see its result and
113
-                        # schedule another request with the same uuid; so
114
-                        # we remove it, then set the result...
115
-                        del self._requests_cache[request.uuid]
102
+                    if request.transition_and_log_error(response.state,
103
+                                                        logger=LOG):
104
+                        with self._ongoing_requests_lock:
105
+                            del self._ongoing_requests[request.uuid]
116 106
                         request.set_result(**response.data)
117 107
                 else:
118 108
                     LOG.warning("Unexpected response status '%s'",
119 109
                                 response.state)
120 110
             else:
121
-                LOG.debug("Request with id='%s' not found", task_uuid)
111
+                LOG.debug("Request with id='%s' not found", request_uuid)
122 112
 
123 113
     @staticmethod
124 114
     def _handle_expired_request(request):
125
-        """Handle expired request.
115
+        """Handle a expired request.
126 116
 
127
-        When request has expired it is removed from the requests cache and
128
-        the `RequestTimeout` exception is set as a request result.
117
+        When a request has expired it is removed from the ongoing requests
118
+        dictionary and a ``RequestTimeout`` exception is set as a
119
+        request result.
129 120
         """
130 121
         if request.transition_and_log_error(pr.FAILURE, logger=LOG):
131 122
             # Raise an exception (and then catch it) so we get a nice
132 123
             # traceback that the request will get instead of it getting
133 124
             # just an exception with no traceback...
134 125
             try:
135
-                request_age = timeutils.delta_seconds(request.created_on,
136
-                                                      timeutils.utcnow())
126
+                request_age = timeutils.now() - request.created_on
137 127
                 raise exc.RequestTimeout(
138 128
                     "Request '%s' has expired after waiting for %0.2f"
139 129
                     " seconds for it to transition out of (%s) states"
@@ -142,51 +132,74 @@ class WorkerTaskExecutor(executor.TaskExecutor):
142 132
                 with misc.capture_failure() as failure:
143 133
                     LOG.debug(failure.exception_str)
144 134
                     request.set_result(failure)
135
+            return True
136
+        return False
145 137
 
146 138
     def _on_wait(self):
147 139
         """This function is called cyclically between draining events."""
148
-        self._requests_cache.cleanup(self._handle_expired_request)
140
+        with self._ongoing_requests_lock:
141
+            ongoing_requests_uuids = set(six.iterkeys(self._ongoing_requests))
142
+        waiting_requests = {}
143
+        expired_requests = {}
144
+        for request_uuid in ongoing_requests_uuids:
145
+            try:
146
+                request = self._ongoing_requests[request_uuid]
147
+            except KeyError:
148
+                # Guess it got removed before we got to it...
149
+                pass
150
+            else:
151
+                if request.expired:
152
+                    expired_requests[request_uuid] = request
153
+                elif request.state == pr.WAITING:
154
+                    worker = self._finder.get_worker_for_task(request.task)
155
+                    if worker is not None:
156
+                        waiting_requests[request_uuid] = (request, worker)
157
+        if expired_requests:
158
+            with self._ongoing_requests_lock:
159
+                while expired_requests:
160
+                    request_uuid, request = expired_requests.popitem()
161
+                    if self._handle_expired_request(request):
162
+                        del self._ongoing_requests[request_uuid]
163
+        if waiting_requests:
164
+            while waiting_requests:
165
+                request_uuid, (request, worker) = waiting_requests.popitem()
166
+                if request.transition_and_log_error(pr.PENDING, logger=LOG):
167
+                    self._publish_request(request, worker)
149 168
 
150 169
     def _submit_task(self, task, task_uuid, action, arguments,
151 170
                      progress_callback=None, **kwargs):
152 171
         """Submit task request to a worker."""
153 172
         request = pr.Request(task, task_uuid, action, arguments,
154 173
                              self._transition_timeout, **kwargs)
155
-
156 174
         # Register the callback, so that we can proxy the progress correctly.
157 175
         if (progress_callback is not None and
158
-                request.notifier.can_be_registered(
159
-                    task_atom.EVENT_UPDATE_PROGRESS)):
160
-            request.notifier.register(task_atom.EVENT_UPDATE_PROGRESS,
161
-                                      progress_callback)
176
+                request.notifier.can_be_registered(EVENT_UPDATE_PROGRESS)):
177
+            request.notifier.register(EVENT_UPDATE_PROGRESS, progress_callback)
162 178
             cleaner = functools.partial(request.notifier.deregister,
163
-                                        task_atom.EVENT_UPDATE_PROGRESS,
179
+                                        EVENT_UPDATE_PROGRESS,
164 180
                                         progress_callback)
165 181
             request.result.add_done_callback(lambda fut: cleaner())
166
-
167 182
         # Get task's worker and publish request if worker was found.
168 183
         worker = self._finder.get_worker_for_task(task)
169 184
         if worker is not None:
170
-            # NOTE(skudriashev): Make sure request is set to the PENDING state
171
-            # before putting it into the requests cache to prevent the notify
172
-            # processing thread get list of waiting requests and publish it
173
-            # before it is published here, so it wouldn't be published twice.
174 185
             if request.transition_and_log_error(pr.PENDING, logger=LOG):
175
-                self._requests_cache[request.uuid] = request
186
+                with self._ongoing_requests_lock:
187
+                    self._ongoing_requests[request.uuid] = request
176 188
                 self._publish_request(request, worker)
177 189
         else:
178 190
             LOG.debug("Delaying submission of '%s', no currently known"
179 191
                       " worker/s available to process it", request)
180
-            self._requests_cache[request.uuid] = request
181
-
192
+            with self._ongoing_requests_lock:
193
+                self._ongoing_requests[request.uuid] = request
182 194
         return request.result
183 195
 
184 196
     def _publish_request(self, request, worker):
185 197
         """Publish request to a given topic."""
186 198
         LOG.debug("Submitting execution of '%s' to worker '%s' (expecting"
187 199
                   " response identified by reply_to=%s and"
188
-                  " correlation_id=%s)", request, worker, self._uuid,
189
-                  request.uuid)
200
+                  " correlation_id=%s) - waited %0.3f seconds to"
201
+                  " get published", request, worker, self._uuid,
202
+                  request.uuid, timeutils.now() - request.created_on)
190 203
         try:
191 204
             self._proxy.publish(request, worker.topic,
192 205
                                 reply_to=self._uuid,
@@ -196,7 +209,8 @@ class WorkerTaskExecutor(executor.TaskExecutor):
196 209
                 LOG.critical("Failed to submit '%s' (transitioning it to"
197 210
                              " %s)", request, pr.FAILURE, exc_info=True)
198 211
                 if request.transition_and_log_error(pr.FAILURE, logger=LOG):
199
-                    del self._requests_cache[request.uuid]
212
+                    with self._ongoing_requests_lock:
213
+                        del self._ongoing_requests[request.uuid]
200 214
                     request.set_result(failure)
201 215
 
202 216
     def execute_task(self, task, task_uuid, arguments,
@@ -229,5 +243,8 @@ class WorkerTaskExecutor(executor.TaskExecutor):
229 243
     def stop(self):
230 244
         """Stops proxy thread and associated topic notification thread."""
231 245
         self._helpers.stop()
232
-        self._requests_cache.clear(self._handle_expired_request)
246
+        with self._ongoing_requests_lock:
247
+            while self._ongoing_requests:
248
+                _request_uuid, request = self._ongoing_requests.popitem()
249
+                self._handle_expired_request(request)
233 250
         self._finder.clear()

+ 1
- 1
taskflow/engines/worker_based/protocol.py View File

@@ -262,7 +262,7 @@ class Request(Message):
262 262
         self._watch = timeutils.StopWatch(duration=timeout).start()
263 263
         self._state = WAITING
264 264
         self._lock = threading.Lock()
265
-        self._created_on = timeutils.utcnow()
265
+        self._created_on = timeutils.now()
266 266
         self._result = futurist.Future()
267 267
         self._result.atom = task
268 268
         self._notifier = task.notifier

+ 0
- 22
taskflow/engines/worker_based/types.py View File

@@ -28,27 +28,11 @@ import six
28 28
 from taskflow.engines.worker_based import dispatcher
29 29
 from taskflow.engines.worker_based import protocol as pr
30 30
 from taskflow import logging
31
-from taskflow.types import cache as base
32
-from taskflow.types import notifier
33 31
 from taskflow.utils import kombu_utils as ku
34 32
 
35 33
 LOG = logging.getLogger(__name__)
36 34
 
37 35
 
38
-class RequestsCache(base.ExpiringCache):
39
-    """Represents a thread-safe requests cache."""
40
-
41
-    def get_waiting_requests(self, worker):
42
-        """Get list of waiting requests that the given worker can satisfy."""
43
-        waiting_requests = []
44
-        with self._lock:
45
-            for request in six.itervalues(self._data):
46
-                if request.state == pr.WAITING \
47
-                   and worker.performs(request.task):
48
-                    waiting_requests.append(request)
49
-        return waiting_requests
50
-
51
-
52 36
 # TODO(harlowja): this needs to be made better, once
53 37
 # https://blueprints.launchpad.net/taskflow/+spec/wbe-worker-info is finally
54 38
 # implemented we can go about using that instead.
@@ -101,12 +85,8 @@ class TopicWorker(object):
101 85
 class WorkerFinder(object):
102 86
     """Base class for worker finders..."""
103 87
 
104
-    #: Event type emitted when a new worker arrives.
105
-    WORKER_ARRIVED = 'worker_arrived'
106
-
107 88
     def __init__(self):
108 89
         self._cond = threading.Condition()
109
-        self.notifier = notifier.RestrictedNotifier([self.WORKER_ARRIVED])
110 90
 
111 91
     @abc.abstractmethod
112 92
     def _total_workers(self):
@@ -219,8 +199,6 @@ class ProxyWorkerFinder(WorkerFinder):
219 199
                 LOG.debug("Updated worker '%s' (%s total workers are"
220 200
                           " currently known)", worker, self._total_workers())
221 201
                 self._cond.notify_all()
222
-        if new_or_updated:
223
-            self.notifier.notify(self.WORKER_ARRIVED, {'worker': worker})
224 202
 
225 203
     def clear(self):
226 204
         with self._cond:

+ 19
- 37
taskflow/tests/unit/worker_based/test_executor.py View File

@@ -17,9 +17,6 @@
17 17
 import threading
18 18
 import time
19 19
 
20
-from oslo_utils import fixture
21
-from oslo_utils import timeutils
22
-
23 20
 from taskflow.engines.worker_based import executor
24 21
 from taskflow.engines.worker_based import protocol as pr
25 22
 from taskflow import task as task_atom
@@ -56,6 +53,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
56 53
         self.proxy_inst_mock.stop.side_effect = self._fake_proxy_stop
57 54
         self.request_inst_mock.uuid = self.task_uuid
58 55
         self.request_inst_mock.expired = False
56
+        self.request_inst_mock.created_on = 0
59 57
         self.request_inst_mock.task_cls = self.task.name
60 58
         self.message_mock = mock.MagicMock(name='message')
61 59
         self.message_mock.properties = {'correlation_id': self.task_uuid,
@@ -96,7 +94,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
96 94
     def test_on_message_response_state_running(self):
97 95
         response = pr.Response(pr.RUNNING)
98 96
         ex = self.executor()
99
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
97
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
100 98
         ex._process_response(response.to_dict(), self.message_mock)
101 99
 
102 100
         expected_calls = [
@@ -109,7 +107,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
109 107
                                event_type=task_atom.EVENT_UPDATE_PROGRESS,
110 108
                                details={'progress': 1.0})
111 109
         ex = self.executor()
112
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
110
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
113 111
         ex._process_response(response.to_dict(), self.message_mock)
114 112
 
115 113
         expected_calls = [
@@ -123,10 +121,10 @@ class TestWorkerTaskExecutor(test.MockTestCase):
123 121
         failure_dict = a_failure.to_dict()
124 122
         response = pr.Response(pr.FAILURE, result=failure_dict)
125 123
         ex = self.executor()
126
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
124
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
127 125
         ex._process_response(response.to_dict(), self.message_mock)
128 126
 
129
-        self.assertEqual(0, len(ex._requests_cache))
127
+        self.assertEqual(0, len(ex._ongoing_requests))
130 128
         expected_calls = [
131 129
             mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY),
132 130
             mock.call.set_result(result=test_utils.FailureMatcher(a_failure))
@@ -137,7 +135,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
137 135
         response = pr.Response(pr.SUCCESS, result=self.task_result,
138 136
                                event='executed')
139 137
         ex = self.executor()
140
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
138
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
141 139
         ex._process_response(response.to_dict(), self.message_mock)
142 140
 
143 141
         expected_calls = [
@@ -149,7 +147,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
149 147
     def test_on_message_response_unknown_state(self):
150 148
         response = pr.Response(state='<unknown>')
151 149
         ex = self.executor()
152
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
150
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
153 151
         ex._process_response(response.to_dict(), self.message_mock)
154 152
 
155 153
         self.assertEqual([], self.request_inst_mock.mock_calls)
@@ -158,7 +156,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
158 156
         self.message_mock.properties['correlation_id'] = '<unknown>'
159 157
         response = pr.Response(pr.RUNNING)
160 158
         ex = self.executor()
161
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
159
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
162 160
         ex._process_response(response.to_dict(), self.message_mock)
163 161
 
164 162
         self.assertEqual([], self.request_inst_mock.mock_calls)
@@ -167,48 +165,32 @@ class TestWorkerTaskExecutor(test.MockTestCase):
167 165
         self.message_mock.properties = {'type': pr.RESPONSE}
168 166
         response = pr.Response(pr.RUNNING)
169 167
         ex = self.executor()
170
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
168
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
171 169
         ex._process_response(response.to_dict(), self.message_mock)
172 170
 
173 171
         self.assertEqual([], self.request_inst_mock.mock_calls)
174 172
 
175 173
     def test_on_wait_task_not_expired(self):
176 174
         ex = self.executor()
177
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
175
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
178 176
 
179
-        self.assertEqual(1, len(ex._requests_cache))
177
+        self.assertEqual(1, len(ex._ongoing_requests))
180 178
         ex._on_wait()
181
-        self.assertEqual(1, len(ex._requests_cache))
179
+        self.assertEqual(1, len(ex._ongoing_requests))
182 180
 
183
-    def test_on_wait_task_expired(self):
184
-        now = timeutils.utcnow()
185
-        f = self.useFixture(fixture.TimeFixture(override_time=now))
181
+    @mock.patch('oslo_utils.timeutils.now')
182
+    def test_on_wait_task_expired(self, mock_now):
183
+        mock_now.side_effect = [0, 120]
186 184
 
187 185
         self.request_inst_mock.expired = True
188
-        self.request_inst_mock.created_on = now
186
+        self.request_inst_mock.created_on = 0
189 187
 
190
-        f.advance_time_seconds(120)
191 188
         ex = self.executor()
192
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
189
+        ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
190
+        self.assertEqual(1, len(ex._ongoing_requests))
193 191
 
194
-        self.assertEqual(1, len(ex._requests_cache))
195 192
         ex._on_wait()
196
-        self.assertEqual(0, len(ex._requests_cache))
197
-
198
-    def test_remove_task_non_existent(self):
199
-        ex = self.executor()
200
-        ex._requests_cache[self.task_uuid] = self.request_inst_mock
201
-
202
-        self.assertEqual(1, len(ex._requests_cache))
203
-        del ex._requests_cache[self.task_uuid]
204
-        self.assertEqual(0, len(ex._requests_cache))
205
-
206
-        # delete non-existent
207
-        try:
208
-            del ex._requests_cache[self.task_uuid]
209
-        except KeyError:
210
-            pass
211
-        self.assertEqual(0, len(ex._requests_cache))
193
+        self.assertEqual(0, len(ex._ongoing_requests))
212 194
 
213 195
     def test_execute_task(self):
214 196
         ex = self.executor()

+ 0
- 51
taskflow/tests/unit/worker_based/test_types.py View File

@@ -16,63 +16,12 @@
16 16
 
17 17
 from oslo_utils import reflection
18 18
 
19
-from taskflow.engines.worker_based import protocol as pr
20 19
 from taskflow.engines.worker_based import types as worker_types
21 20
 from taskflow import test
22 21
 from taskflow.test import mock
23 22
 from taskflow.tests import utils
24 23
 
25 24
 
26
-class TestRequestCache(test.TestCase):
27
-
28
-    def setUp(self):
29
-        super(TestRequestCache, self).setUp()
30
-        self.task = utils.DummyTask()
31
-        self.task_uuid = 'task-uuid'
32
-        self.task_action = 'execute'
33
-        self.task_args = {'a': 'a'}
34
-        self.timeout = 60
35
-
36
-    def request(self, **kwargs):
37
-        request_kwargs = dict(task=self.task,
38
-                              uuid=self.task_uuid,
39
-                              action=self.task_action,
40
-                              arguments=self.task_args,
41
-                              progress_callback=None,
42
-                              timeout=self.timeout)
43
-        request_kwargs.update(kwargs)
44
-        return pr.Request(**request_kwargs)
45
-
46
-    @mock.patch('oslo_utils.timeutils.now')
47
-    def test_requests_cache_expiry(self, now):
48
-        # Mock out the calls the underlying objects will soon use to return
49
-        # times that we can control more easily...
50
-        overrides = [
51
-            0,
52
-            1,
53
-            self.timeout + 1,
54
-        ]
55
-        now.side_effect = overrides
56
-
57
-        cache = worker_types.RequestsCache()
58
-        cache[self.task_uuid] = self.request()
59
-        cache.cleanup()
60
-        self.assertEqual(1, len(cache))
61
-        cache.cleanup()
62
-        self.assertEqual(0, len(cache))
63
-
64
-    def test_requests_cache_match(self):
65
-        cache = worker_types.RequestsCache()
66
-        cache[self.task_uuid] = self.request()
67
-        cache['task-uuid-2'] = self.request(task=utils.NastyTask(),
68
-                                            uuid='task-uuid-2')
69
-        worker = worker_types.TopicWorker("dummy-topic", [utils.DummyTask],
70
-                                          identity="dummy")
71
-        matches = cache.get_waiting_requests(worker)
72
-        self.assertEqual(1, len(matches))
73
-        self.assertEqual(2, len(cache))
74
-
75
-
76 25
 class TestTopicWorker(test.TestCase):
77 26
     def test_topic_worker(self):
78 27
         worker = worker_types.TopicWorker("dummy-topic",

+ 0
- 85
taskflow/types/cache.py View File

@@ -1,85 +0,0 @@
1
-# -*- coding: utf-8 -*-
2
-
3
-#    Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
4
-#
5
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
6
-#    not use this file except in compliance with the License. You may obtain
7
-#    a copy of the License at
8
-#
9
-#         http://www.apache.org/licenses/LICENSE-2.0
10
-#
11
-#    Unless required by applicable law or agreed to in writing, software
12
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
-#    License for the specific language governing permissions and limitations
15
-#    under the License.
16
-
17
-import threading
18
-
19
-from oslo_utils import reflection
20
-import six
21
-
22
-
23
-class ExpiringCache(object):
24
-    """Represents a thread-safe time-based expiring cache.
25
-
26
-    NOTE(harlowja): the values in this cache must have a expired attribute that
27
-    can be used to determine if the key and associated value has expired or if
28
-    it has not.
29
-    """
30
-
31
-    def __init__(self):
32
-        self._data = {}
33
-        self._lock = threading.Lock()
34
-
35
-    def __setitem__(self, key, value):
36
-        """Set a value in the cache."""
37
-        with self._lock:
38
-            self._data[key] = value
39
-
40
-    def __len__(self):
41
-        """Returns how many items are in this cache."""
42
-        return len(self._data)
43
-
44
-    def get(self, key, default=None):
45
-        """Retrieve a value from the cache (returns default if not found)."""
46
-        return self._data.get(key, default)
47
-
48
-    def __getitem__(self, key):
49
-        """Retrieve a value from the cache."""
50
-        return self._data[key]
51
-
52
-    def __delitem__(self, key):
53
-        """Delete a key & value from the cache."""
54
-        with self._lock:
55
-            del self._data[key]
56
-
57
-    def clear(self, on_cleared_callback=None):
58
-        """Removes all keys & values from the cache."""
59
-        cleared_items = []
60
-        with self._lock:
61
-            if on_cleared_callback is not None:
62
-                cleared_items.extend(six.iteritems(self._data))
63
-            self._data.clear()
64
-        if on_cleared_callback is not None:
65
-            arg_c = len(reflection.get_callable_args(on_cleared_callback))
66
-            for (k, v) in cleared_items:
67
-                if arg_c == 2:
68
-                    on_cleared_callback(k, v)
69
-                else:
70
-                    on_cleared_callback(v)
71
-
72
-    def cleanup(self, on_expired_callback=None):
73
-        """Delete out-dated keys & values from the cache."""
74
-        with self._lock:
75
-            expired_values = [(k, v) for k, v in six.iteritems(self._data)
76
-                              if v.expired]
77
-            for (k, _v) in expired_values:
78
-                del self._data[k]
79
-        if on_expired_callback is not None:
80
-            arg_c = len(reflection.get_callable_args(on_expired_callback))
81
-            for (k, v) in expired_values:
82
-                if arg_c == 2:
83
-                    on_expired_callback(k, v)
84
-                else:
85
-                    on_expired_callback(v)

Loading…
Cancel
Save