Browse Source

Merge branch 'iteration4'

Serg Melikyan 6 years ago
parent
commit
243f51e673
100 changed files with 4785 additions and 2289 deletions
  1. 20
    0
      conductor/.gitignore
  2. 1
    0
      conductor/babel.cfg
  3. 0
    3
      conductor/bin/app.py
  4. 16
    16
      conductor/bin/conductor
  5. 90
    65
      conductor/conductor/app.py
  6. 98
    38
      conductor/conductor/cloud_formation.py
  7. 169
    74
      conductor/conductor/commands/cloud_formation.py
  8. 1
    1
      conductor/conductor/commands/command.py
  9. 33
    44
      conductor/conductor/commands/dispatcher.py
  10. 32
    36
      conductor/conductor/commands/windows_agent.py
  11. 213
    19
      conductor/conductor/config.py
  12. 0
    0
      conductor/conductor/openstack/__init__.py
  13. 0
    0
      conductor/conductor/openstack/common/__init__.py
  14. 87
    0
      conductor/conductor/openstack/common/eventlet_backdoor.py
  15. 10
    15
      conductor/conductor/openstack/common/exception.py
  16. 17
    8
      conductor/conductor/openstack/common/gettextutils.py
  17. 67
    0
      conductor/conductor/openstack/common/importutils.py
  18. 141
    0
      conductor/conductor/openstack/common/jsonutils.py
  19. 48
    0
      conductor/conductor/openstack/common/local.py
  20. 543
    0
      conductor/conductor/openstack/common/log.py
  21. 95
    0
      conductor/conductor/openstack/common/loopingcall.py
  22. 1
    3
      conductor/conductor/openstack/common/notifier/__init__.py
  23. 182
    0
      conductor/conductor/openstack/common/notifier/api.py
  24. 35
    0
      conductor/conductor/openstack/common/notifier/log_notifier.py
  25. 4
    4
      conductor/conductor/openstack/common/notifier/no_op_notifier.py
  26. 46
    0
      conductor/conductor/openstack/common/notifier/rpc_notifier.py
  27. 52
    0
      conductor/conductor/openstack/common/notifier/rpc_notifier2.py
  28. 8
    5
      conductor/conductor/openstack/common/notifier/test_notifier.py
  29. 332
    0
      conductor/conductor/openstack/common/service.py
  30. 367
    0
      conductor/conductor/openstack/common/setup.py
  31. 80
    0
      conductor/conductor/openstack/common/sslutils.py
  32. 114
    0
      conductor/conductor/openstack/common/threadgroup.py
  33. 186
    0
      conductor/conductor/openstack/common/timeutils.py
  34. 17
    15
      conductor/conductor/openstack/common/uuidutils.py
  35. 94
    0
      conductor/conductor/openstack/common/version.py
  36. 128
    48
      conductor/conductor/openstack/common/wsgi.py
  37. 74
    0
      conductor/conductor/openstack/common/xmlutils.py
  38. 127
    70
      conductor/conductor/rabbitmq.py
  39. 11
    4
      conductor/conductor/reporting.py
  40. 5
    4
      conductor/conductor/version.py
  41. 29
    27
      conductor/conductor/windows_agent.py
  42. 18
    17
      conductor/conductor/workflow.py
  43. 12
    15
      conductor/data/init.ps1
  44. 2
    1
      conductor/data/templates/agent-config/Default.template
  45. 217
    198
      conductor/data/workflows/AD.xml
  46. 19
    0
      conductor/data/workflows/Common.xml
  47. 64
    59
      conductor/data/workflows/IIS.xml
  48. 0
    0
      conductor/doc/source/_static/basic.css
  49. 0
    0
      conductor/doc/source/_static/default.css
  50. BIN
      conductor/doc/source/_static/header-line.gif
  51. BIN
      conductor/doc/source/_static/header_bg.jpg
  52. 3
    3
      conductor/doc/source/_static/jquery.tweet.js
  53. 245
    0
      conductor/doc/source/_static/nature.css
  54. BIN
      conductor/doc/source/_static/openstack_logo.png
  55. 94
    0
      conductor/doc/source/_static/tweaks.css
  56. 0
    0
      conductor/doc/source/_templates/.placeholder
  57. 18
    21
      conductor/doc/source/_theme/layout.html
  58. 4
    0
      conductor/doc/source/_theme/theme.conf
  59. 29
    39
      conductor/doc/source/conf.py
  60. 4
    37
      conductor/doc/source/index.rst
  61. 0
    5
      conductor/etc/app.config
  62. 0
    0
      conductor/etc/conductor-paste.ini
  63. 14
    0
      conductor/etc/conductor.conf
  64. 4
    0
      conductor/logs/.gitignore
  65. 7
    0
      conductor/openstack-common.conf
  66. 49
    0
      conductor/run_tests.sh
  67. 33
    0
      conductor/setup.cfg
  68. 49
    0
      conductor/setup.py
  69. 2
    1
      conductor/test.json
  70. 0
    0
      conductor/tests/__init__.py
  71. 0
    0
      conductor/tests/conductor/__init__.py
  72. 13
    0
      conductor/tests/conductor/test_methods.py
  73. 11
    0
      conductor/tests/conductor/test_with_fake_service.py
  74. 220
    0
      conductor/tools/install_venv_common.py
  75. 10
    3
      conductor/tools/pip-requires
  76. 0
    31
      dashboard/ReadMe.txt
  77. 0
    126
      dashboard/api/windc.py
  78. 0
    3
      dashboard/windc/templates/windc/_services.html
  79. 0
    137
      dashboard/windcclient/common/base.py
  80. 0
    151
      dashboard/windcclient/common/client.py
  81. 0
    140
      dashboard/windcclient/common/exceptions.py
  82. 0
    62
      dashboard/windcclient/common/service_catalog.py
  83. 0
    291
      dashboard/windcclient/common/utils.py
  84. 0
    285
      dashboard/windcclient/shell.py
  85. 0
    29
      dashboard/windcclient/v1/client.py
  86. 0
    43
      dashboard/windcclient/v1/datacenters.py
  87. 0
    48
      dashboard/windcclient/v1/services.py
  88. 2
    3
      portas/etc/portas-api.conf
  89. 2
    1
      portas/portas/api/middleware/context.py
  90. 8
    8
      portas/portas/api/v1/__init__.py
  91. 1
    1
      portas/portas/api/v1/environments.py
  92. 41
    17
      portas/portas/api/v1/sessions.py
  93. 1
    1
      portas/portas/api/v1/webservers.py
  94. 1
    1
      portas/portas/common/config.py
  95. 2
    2
      portas/portas/common/service.py
  96. 3
    3
      portas/portas/context.py
  97. 1
    1
      portas/portas/tests/api/simple_test.py
  98. 3
    3
      python-portasclient/portasclient/v1/services.py
  99. 6
    4
      python-portasclient/portasclient/v1/sessions.py
  100. 0
    0
      python-portasclient/tests/portasclient/.project

+ 20
- 0
conductor/.gitignore View File

@@ -0,0 +1,20 @@
1
+#IntelJ Idea
2
+.idea/
3
+
4
+#virtualenv
5
+.venv/
6
+
7
+#Build results
8
+build/
9
+dist/
10
+*.egg-info/
11
+
12
+#Python
13
+*.pyc
14
+
15
+#Translation build
16
+*.mo
17
+*.pot
18
+
19
+#SQLite Database files
20
+*.sqlite

+ 1
- 0
conductor/babel.cfg View File

@@ -0,0 +1 @@
1
+[python: **.py]

+ 0
- 3
conductor/bin/app.py View File

@@ -1,3 +0,0 @@
1
-#!/usr/bin/env python
2
-
3
-from conductor import app

windc/windc/core/commands.py → conductor/bin/conductor View File

@@ -1,3 +1,4 @@
1
+#!/usr/bin/env python
1 2
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
2 3
 
3 4
 # Copyright 2011 OpenStack LLC.
@@ -15,22 +16,21 @@
15 16
 #    License for the specific language governing permissions and limitations
16 17
 #    under the License.
17 18
 
18
-TEMPLATE_DEPLOYMENT_COMMAND = "Template"
19
-EXECUTION_PLAN_DEPLOYMENT_COMMAND = "EPlan"
20
-CHEF_COMMAND = "Chef"
21
-CHEF_OP_CREATE_ENV = "Env"
22
-CHEF_OP_CREATE_ROLE = "Role"
23
-CHEF_OP_ASSIGN_ROLE = "AssignRole"
24
-CHEF_OP_CREATE_NODE = "CRNode"
19
+import sys
25 20
 
26
-class Command(object):
27
-	type = "Empty"
28
-	context = None
29
-
30
-
31
-	def __init__(self, type="Empty", context=None, data=None):
32
-		self.type = type
33
-		self.context = context
34
-		self.data = data
35 21
 
22
+from conductor import config
23
+from conductor.openstack.common import log
24
+from conductor.openstack.common import service
25
+from conductor.app import ConductorWorkflowService
36 26
 
27
+if __name__ == '__main__':
28
+    try:
29
+        config.parse_args()
30
+        log.setup('conductor')
31
+        launcher = service.ServiceLauncher()
32
+        launcher.launch_service(ConductorWorkflowService())
33
+        launcher.wait()
34
+    except RuntimeError, e:
35
+        sys.stderr.write("ERROR: %s\n" % e)
36
+        sys.exit(1)

+ 90
- 65
conductor/conductor/app.py View File

@@ -1,65 +1,90 @@
1
-import datetime
2
-import glob
3
-import json
4
-import time
5
-import sys
6
-import tornado.ioloop
7
-
8
-import rabbitmq
9
-from workflow import Workflow
10
-import cloud_formation
11
-import windows_agent
12
-from commands.dispatcher import CommandDispatcher
13
-from config import Config
14
-import reporting
15
-
16
-config = Config(sys.argv[1] if len(sys.argv) > 1 else None)
17
-
18
-rmqclient = rabbitmq.RabbitMqClient(
19
-    virtual_host=config.get_setting('rabbitmq', 'vhost', '/'),
20
-    login=config.get_setting('rabbitmq', 'login', 'guest'),
21
-    password=config.get_setting('rabbitmq', 'password', 'guest'),
22
-    host=config.get_setting('rabbitmq', 'host', 'localhost'))
23
-
24
-
25
-def schedule(callback, *args, **kwargs):
26
-    tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 0.1,
27
-        lambda args=args, kwargs=kwargs: callback(*args, **kwargs))
28
-
29
-
30
-def task_received(task, message_id):
31
-    print 'Starting at', datetime.datetime.now()
32
-    reporter = reporting.Reporter(rmqclient, message_id, task['id'])
33
-
34
-    command_dispatcher = CommandDispatcher(task['name'], rmqclient)
35
-    workflows = []
36
-    for path in glob.glob("data/workflows/*.xml"):
37
-        print "loading", path
38
-        workflow = Workflow(path, task, command_dispatcher, config, reporter)
39
-        workflows.append(workflow)
40
-
41
-    def loop(callback):
42
-        for workflow in workflows:
43
-            workflow.execute()
44
-        func = lambda: schedule(loop, callback)
45
-        if not command_dispatcher.execute_pending(func):
46
-            callback()
47
-
48
-    def shutdown():
49
-        command_dispatcher.close()
50
-        rmqclient.send('task-results', json.dumps(task),
51
-                       message_id=message_id)
52
-        print 'Finished at', datetime.datetime.now()
53
-
54
-    loop(shutdown)
55
-
56
-
57
-def message_received(body, message_id, **kwargs):
58
-    task_received(json.loads(body), message_id)
59
-
60
-
61
-def start():
62
-    rmqclient.subscribe("tasks", message_received)
63
-
64
-rmqclient.start(start)
65
-tornado.ioloop.IOLoop.instance().start()
1
+import datetime
2
+import glob
3
+import sys
4
+import traceback
5
+
6
+import anyjson
7
+from conductor.openstack.common import service
8
+from workflow import Workflow
9
+from commands.dispatcher import CommandDispatcher
10
+from openstack.common import log as logging
11
+from config import Config
12
+import reporting
13
+import rabbitmq
14
+
15
+import windows_agent
16
+import cloud_formation
17
+
18
+config = Config(sys.argv[1] if len(sys.argv) > 1 else None)
19
+
20
+log = logging.getLogger(__name__)
21
+
22
+
23
+def task_received(task, message_id):
24
+    with rabbitmq.RmqClient() as rmqclient:
25
+        try:
26
+            log.info('Starting processing task {0}: {1}'.format(
27
+                message_id, anyjson.dumps(task)))
28
+            reporter = reporting.Reporter(rmqclient, message_id, task['id'])
29
+
30
+            command_dispatcher = CommandDispatcher(
31
+                task['name'], rmqclient, task['token'], task['tenant_id'])
32
+            workflows = []
33
+            for path in glob.glob("data/workflows/*.xml"):
34
+                log.debug('Loading XML {0}'.format(path))
35
+                workflow = Workflow(path, task, command_dispatcher, config,
36
+                                    reporter)
37
+                workflows.append(workflow)
38
+
39
+            while True:
40
+                try:
41
+                    while True:
42
+                        result = False
43
+                        for workflow in workflows:
44
+                            if workflow.execute():
45
+                                result = True
46
+                        if not result:
47
+                            break
48
+                    if not command_dispatcher.execute_pending():
49
+                        break
50
+                except Exception as ex:
51
+                    log.exception(ex)
52
+                    break
53
+
54
+            command_dispatcher.close()
55
+        finally:
56
+            del task['token']
57
+            result_msg = rabbitmq.Message()
58
+            result_msg.body = task
59
+            result_msg.id = message_id
60
+
61
+            rmqclient.send(message=result_msg, key='task-results')
62
+    log.info('Finished processing task {0}. Result = {1}'.format(
63
+        message_id, anyjson.dumps(task)))
64
+
65
+
66
+class ConductorWorkflowService(service.Service):
67
+    def __init__(self):
68
+        super(ConductorWorkflowService, self).__init__()
69
+
70
+    def start(self):
71
+        super(ConductorWorkflowService, self).start()
72
+        self.tg.add_thread(self._start_rabbitmq)
73
+
74
+    def stop(self):
75
+        super(ConductorWorkflowService, self).stop()
76
+
77
+    def _start_rabbitmq(self):
78
+        while True:
79
+            try:
80
+                with rabbitmq.RmqClient() as rmq:
81
+                    rmq.declare('tasks', 'tasks')
82
+                    rmq.declare('task-results')
83
+                    with rmq.open('tasks') as subscription:
84
+                        while True:
85
+                            msg = subscription.get_message()
86
+                            self.tg.add_thread(
87
+                                task_received, msg.body, msg.id)
88
+            except Exception as ex:
89
+                log.exception(ex)
90
+

+ 98
- 38
conductor/conductor/cloud_formation.py View File

@@ -1,38 +1,98 @@
1
-import base64
2
-
3
-import xml_code_engine
4
-
5
-
6
-def update_cf_stack(engine, context, body, template,
7
-                    mappings, arguments, **kwargs):
8
-    command_dispatcher = context['/commandDispatcher']
9
-    print "update-cf", template
10
-
11
-    callback = lambda result: engine.evaluate_content(
12
-        body.find('success'), context)
13
-
14
-    command_dispatcher.execute(
15
-        name='cf', template=template, mappings=mappings,
16
-        arguments=arguments, callback=callback)
17
-
18
-
19
-def prepare_user_data(context, template='Default', **kwargs):
20
-    config = context['/config']
21
-    with open('data/init.ps1') as init_script_file:
22
-        with open('data/templates/agent-config/%s.template'
23
-                % template) as template_file:
24
-            init_script = init_script_file.read()
25
-            template_data = template_file.read().replace(
26
-                '%RABBITMQ_HOST%',
27
-                config.get_setting('rabbitmq', 'host') or 'localhost')
28
-
29
-            return init_script.replace(
30
-                '%WINDOWS_AGENT_CONFIG_BASE64%',
31
-                base64.b64encode(template_data))
32
-
33
-
34
-xml_code_engine.XmlCodeEngine.register_function(
35
-    update_cf_stack, "update-cf-stack")
36
-
37
-xml_code_engine.XmlCodeEngine.register_function(
38
-    prepare_user_data, "prepare_user_data")
1
+import base64
2
+
3
+import xml_code_engine
4
+import config
5
+from random import choice
6
+import time
7
+import string
8
+
9
+
10
+def update_cf_stack(engine, context, body, template,
11
+                    mappings, arguments, **kwargs):
12
+    command_dispatcher = context['/commandDispatcher']
13
+
14
+    callback = lambda result: engine.evaluate_content(
15
+        body.find('success'), context)
16
+
17
+    command_dispatcher.execute(
18
+        name='cf', command='CreateOrUpdate', template=template,
19
+        mappings=mappings, arguments=arguments, callback=callback)
20
+
21
+
22
+def delete_cf_stack(engine, context, body, **kwargs):
23
+    command_dispatcher = context['/commandDispatcher']
24
+
25
+    callback = lambda result: engine.evaluate_content(
26
+        body.find('success'), context)
27
+
28
+    command_dispatcher.execute(
29
+        name='cf', command='Delete', callback=callback)
30
+
31
+
32
+def prepare_user_data(context, hostname, service, unit, template='Default', **kwargs):
33
+    settings = config.CONF.rabbitmq
34
+
35
+    with open('data/init.ps1') as init_script_file:
36
+        with open('data/templates/agent-config/{0}.template'.format(
37
+                template)) as template_file:
38
+            init_script = init_script_file.read()
39
+            template_data = template_file.read()
40
+            template_data = template_data.replace(
41
+                '%RABBITMQ_HOST%', settings.host)
42
+            template_data = template_data.replace(
43
+                '%RABBITMQ_INPUT_QUEUE%',
44
+                '-'.join([str(context['/dataSource']['name']),
45
+                         str(service), str(unit)]).lower()
46
+            )
47
+            template_data = template_data.replace(
48
+                '%RESULT_QUEUE%',
49
+                '-execution-results-{0}'.format(
50
+                    str(context['/dataSource']['name'])).lower())
51
+
52
+            init_script = init_script.replace(
53
+                '%WINDOWS_AGENT_CONFIG_BASE64%',
54
+                base64.b64encode(template_data))
55
+
56
+            init_script = init_script.replace('%INTERNAL_HOSTNAME%', hostname)
57
+
58
+            return init_script
59
+
60
+counter = 0
61
+
62
+
63
+def int2base(x, base):
64
+    digs = string.digits + string.lowercase
65
+    if x < 0: sign = -1
66
+    elif x==0: return '0'
67
+    else: sign = 1
68
+    x *= sign
69
+    digits = []
70
+    while x:
71
+        digits.append(digs[x % base])
72
+        x /= base
73
+    if sign < 0:
74
+        digits.append('-')
75
+    digits.reverse()
76
+    return ''.join(digits)
77
+
78
+
79
+def generate_hostname(**kwargs):
80
+    global counter
81
+    prefix = ''.join(choice(string.lowercase) for _ in range(5))
82
+    timestamp = int2base(int(time.time() * 1000), 36)[:8]
83
+    suffix = int2base(counter, 36)
84
+    counter = (counter + 1) % 1296
85
+    return prefix + timestamp + suffix
86
+
87
+
88
+xml_code_engine.XmlCodeEngine.register_function(
89
+    update_cf_stack, "update-cf-stack")
90
+
91
+xml_code_engine.XmlCodeEngine.register_function(
92
+    delete_cf_stack, "delete-cf-stack")
93
+
94
+xml_code_engine.XmlCodeEngine.register_function(
95
+    prepare_user_data, "prepare-user-data")
96
+
97
+xml_code_engine.XmlCodeEngine.register_function(
98
+    generate_hostname, "generate-hostname")

+ 169
- 74
conductor/conductor/commands/cloud_formation.py View File

@@ -1,74 +1,169 @@
1
-import json
2
-import os
3
-import uuid
4
-
5
-import conductor.helpers
6
-from command import CommandBase
7
-from subprocess import call
8
-
9
-
10
-class HeatExecutor(CommandBase):
11
-    def __init__(self, stack):
12
-        self._pending_list = []
13
-        self._stack = stack
14
-
15
-    def execute(self, template, mappings, arguments, callback):
16
-        with open('data/templates/cf/%s.template' % template) as template_file:
17
-            template_data = template_file.read()
18
-
19
-        template_data = conductor.helpers.transform_json(
20
-            json.loads(template_data), mappings)
21
-
22
-        self._pending_list.append({
23
-            'template': template_data,
24
-            'arguments': arguments,
25
-            'callback': callback
26
-        })
27
-
28
-    def has_pending_commands(self):
29
-        return len(self._pending_list) > 0
30
-
31
-    def execute_pending(self, callback):
32
-        if not self.has_pending_commands():
33
-            return False
34
-
35
-        template = {}
36
-        arguments = {}
37
-        for t in self._pending_list:
38
-            template = conductor.helpers.merge_dicts(
39
-                template, t['template'], max_levels=2)
40
-            arguments = conductor.helpers.merge_dicts(
41
-                arguments, t['arguments'], max_levels=1)
42
-
43
-        print 'Executing heat template', json.dumps(template), \
44
-            'with arguments', arguments, 'on stack', self._stack
45
-
46
-        if not os.path.exists("tmp"):
47
-            os.mkdir("tmp")
48
-        file_name = "tmp/" + str(uuid.uuid4())
49
-        print "Saving template to", file_name
50
-        with open(file_name, "w") as f:
51
-            f.write(json.dumps(template))
52
-
53
-        arguments_str = ';'.join(['%s=%s' % (key, value)
54
-                                  for (key, value) in arguments.items()])
55
-        call([
56
-            "./heat_run", "stack-create",
57
-            "-f" + file_name,
58
-            "-P" + arguments_str,
59
-            self._stack
60
-        ])
61
-
62
-        callbacks = []
63
-        for t in self._pending_list:
64
-            if t['callback']:
65
-                callbacks.append(t['callback'])
66
-
67
-        self._pending_list = []
68
-
69
-        for cb in callbacks:
70
-            cb(True)
71
-
72
-        callback()
73
-
74
-        return True
1
+import anyjson
2
+import eventlet
3
+
4
+import jsonpath
5
+from conductor.openstack.common import log as logging
6
+import conductor.helpers
7
+from command import CommandBase
8
+import conductor.config
9
+from heatclient.client import Client
10
+import heatclient.exc
11
+from keystoneclient.v2_0 import client as ksclient
12
+import types
13
+
14
+log = logging.getLogger(__name__)
15
+
16
+
17
+class HeatExecutor(CommandBase):
18
+    def __init__(self, stack, token, tenant_id):
19
+        self._update_pending_list = []
20
+        self._delete_pending_list = []
21
+        self._stack = stack
22
+        settings = conductor.config.CONF.heat
23
+
24
+        client = ksclient.Client(endpoint=settings.auth_url)
25
+        auth_data = client.tokens.authenticate(
26
+            tenant_id=tenant_id,
27
+            token=token)
28
+
29
+        scoped_token = auth_data.id
30
+
31
+        heat_url = jsonpath.jsonpath(auth_data.serviceCatalog,
32
+            "$[?(@.name == 'heat')].endpoints[0].publicURL")[0]
33
+
34
+        self._heat_client = Client('1', heat_url,
35
+            token_only=True, token=scoped_token)
36
+
37
+    def execute(self, command, callback, **kwargs):
38
+        log.debug('Got command {0} on stack {1}'.format(command, self._stack))
39
+
40
+        if command == 'CreateOrUpdate':
41
+            return self._execute_create_update(
42
+                kwargs['template'],
43
+                kwargs['mappings'],
44
+                kwargs['arguments'],
45
+                callback)
46
+        elif command == 'Delete':
47
+            return self._execute_delete(callback)
48
+
49
+    def _execute_create_update(self, template, mappings, arguments, callback):
50
+        with open('data/templates/cf/%s.template' % template) as template_file:
51
+            template_data = template_file.read()
52
+
53
+        template_data = conductor.helpers.transform_json(
54
+            anyjson.loads(template_data), mappings)
55
+
56
+        self._update_pending_list.append({
57
+            'template': template_data,
58
+            'arguments': arguments,
59
+            'callback': callback
60
+        })
61
+
62
+    def _execute_delete(self, callback):
63
+        self._delete_pending_list.append({
64
+            'callback': callback
65
+        })
66
+
67
+    def has_pending_commands(self):
68
+        return len(self._update_pending_list) + \
69
+            len(self._delete_pending_list) > 0
70
+
71
+    def execute_pending(self):
72
+        r1 = self._execute_pending_updates()
73
+        r2 = self._execute_pending_deletes()
74
+        return r1 or r2
75
+
76
+    def _execute_pending_updates(self):
77
+        if not len(self._update_pending_list):
78
+            return False
79
+
80
+        template, arguments = self._get_current_template()
81
+        stack_exists = (template != {})
82
+
83
+        for t in self._update_pending_list:
84
+            template = conductor.helpers.merge_dicts(
85
+                template, t['template'], max_levels=2)
86
+            arguments = conductor.helpers.merge_dicts(
87
+                arguments, t['arguments'], max_levels=1)
88
+
89
+        log.info(
90
+            'Executing heat template {0} with arguments {1} on stack {2}'
91
+            .format(anyjson.dumps(template), arguments, self._stack))
92
+
93
+        if stack_exists:
94
+            self._heat_client.stacks.update(
95
+                stack_id=self._stack,
96
+                parameters=arguments,
97
+                template=template)
98
+            log.debug(
99
+                'Waiting for the stack {0} to be update'.format(self._stack))
100
+            self._wait_state('UPDATE_COMPLETE')
101
+            log.info('Stack {0} updated'.format(self._stack))
102
+        else:
103
+            self._heat_client.stacks.create(
104
+                stack_name=self._stack,
105
+                parameters=arguments,
106
+                template=template)
107
+            log.debug('Waiting for the stack {0} to be create'.format(
108
+                self._stack))
109
+            self._wait_state('CREATE_COMPLETE')
110
+            log.info('Stack {0} created'.format(self._stack))
111
+
112
+        pending_list = self._update_pending_list
113
+        self._update_pending_list = []
114
+
115
+        for item in pending_list:
116
+            item['callback'](True)
117
+
118
+        return True
119
+
120
+    def _execute_pending_deletes(self):
121
+        if not len(self._delete_pending_list):
122
+            return False
123
+
124
+        log.debug('Deleting stack {0}'.format(self._stack))
125
+        try:
126
+            self._heat_client.stacks.delete(
127
+                stack_id=self._stack)
128
+            log.debug(
129
+                'Waiting for the stack {0} to be deleted'.format(self._stack))
130
+            self._wait_state(['DELETE_COMPLETE', ''])
131
+            log.info('Stack {0} deleted'.format(self._stack))
132
+        except Exception as ex:
133
+            log.exception(ex)
134
+
135
+        pending_list = self._delete_pending_list
136
+        self._delete_pending_list = []
137
+
138
+        for item in pending_list:
139
+            item['callback'](True)
140
+        return True
141
+
142
+    def _get_current_template(self):
143
+        try:
144
+            stack_info = self._heat_client.stacks.get(stack_id=self._stack)
145
+            template = self._heat_client.stacks.template(
146
+                stack_id='{0}/{1}'.format(stack_info.stack_name, stack_info.id))
147
+            return template, stack_info.parameters
148
+        except heatclient.exc.HTTPNotFound:
149
+            return {}, {}
150
+
151
+    def _wait_state(self, state):
152
+        if isinstance(state, types.ListType):
153
+            states = state
154
+        else:
155
+            states = [state]
156
+
157
+        while True:
158
+            try:
159
+                status = self._heat_client.stacks.get(
160
+                    stack_id=self._stack).stack_status
161
+            except heatclient.exc.HTTPNotFound:
162
+                status = ''
163
+
164
+            if 'IN_PROGRESS' in status:
165
+                eventlet.sleep(1)
166
+                continue
167
+            if status not in states:
168
+                raise EnvironmentError()
169
+            return

+ 1
- 1
conductor/conductor/commands/command.py View File

@@ -2,7 +2,7 @@ class CommandBase(object):
2 2
     def execute(self, **kwargs):
3 3
         pass
4 4
 
5
-    def execute_pending(self, callback):
5
+    def execute_pending(self):
6 6
         return False
7 7
 
8 8
     def has_pending_commands(self):

+ 33
- 44
conductor/conductor/commands/dispatcher.py View File

@@ -1,44 +1,33 @@
1
-import command
2
-import cloud_formation
3
-import windows_agent
4
-
5
-
6
-class CommandDispatcher(command.CommandBase):
7
-    def __init__(self, environment_name, rmqclient):
8
-        self._command_map = {
9
-            'cf': cloud_formation.HeatExecutor(environment_name),
10
-            'agent': windows_agent.WindowsAgentExecutor(
11
-                environment_name, rmqclient)
12
-        }
13
-
14
-    def execute(self, name, **kwargs):
15
-        self._command_map[name].execute(**kwargs)
16
-
17
-    def execute_pending(self, callback):
18
-        result = 0
19
-        count = [0]
20
-
21
-        def on_result():
22
-            count[0] -= 1
23
-            if not count[0]:
24
-                callback()
25
-
26
-        for command in self._command_map.values():
27
-            count[0] += 1
28
-            result += 1
29
-            if not command.execute_pending(on_result):
30
-                count[0] -= 1
31
-                result -= 1
32
-
33
-        return result > 0
34
-
35
-    def has_pending_commands(self):
36
-        result = False
37
-        for command in self._command_map.values():
38
-            result |= command.has_pending_commands()
39
-
40
-        return result
41
-
42
-    def close(self):
43
-        for t in self._command_map.values():
44
-            t.close()
1
+import command
2
+import cloud_formation
3
+import windows_agent
4
+
5
+
6
+class CommandDispatcher(command.CommandBase):
7
+    def __init__(self, environment_id, rmqclient, token, tenant_id):
8
+        self._command_map = {
9
+            'cf': cloud_formation.HeatExecutor(environment_id, token, tenant_id),
10
+            'agent': windows_agent.WindowsAgentExecutor(
11
+                environment_id, rmqclient)
12
+        }
13
+
14
+    def execute(self, name, **kwargs):
15
+        self._command_map[name].execute(**kwargs)
16
+
17
+    def execute_pending(self):
18
+        result = False
19
+        for command in self._command_map.values():
20
+            result |= command.execute_pending()
21
+
22
+        return result
23
+
24
+    def has_pending_commands(self):
25
+        result = False
26
+        for command in self._command_map.values():
27
+            result |= command.has_pending_commands()
28
+
29
+        return result
30
+
31
+    def close(self):
32
+        for t in self._command_map.values():
33
+            t.close()

+ 32
- 36
conductor/conductor/commands/windows_agent.py View File

@@ -1,65 +1,61 @@
1 1
 import json
2 2
 import uuid
3 3
 
4
+from conductor.openstack.common import log as logging
5
+from conductor.rabbitmq import Message
4 6
 import conductor.helpers
5 7
 from command import CommandBase
6 8
 
9
+log = logging.getLogger(__name__)
10
+
7 11
 
8 12
 class WindowsAgentExecutor(CommandBase):
9 13
     def __init__(self, stack, rmqclient):
10 14
         self._stack = stack
11 15
         self._rmqclient = rmqclient
12
-        self._callback = None
13 16
         self._pending_list = []
14
-        self._current_pending_list = []
15
-        rmqclient.subscribe('-execution-results', self._on_message)
17
+        self._results_queue = '-execution-results-%s' % str(stack).lower()
18
+        rmqclient.declare(self._results_queue)
16 19
 
17
-    def execute(self, template, mappings, host, callback):
18
-        with open('data/templates/agent/%s.template' %
19
-                  template) as template_file:
20
-            template_data = template_file.read()
20
+    def execute(self, template, mappings, host, service, callback):
21
+        with open('data/templates/agent/%s.template' % template) as file:
22
+            template_data = file.read()
21 23
 
22
-        template_data = json.dumps(conductor.helpers.transform_json(
23
-            json.loads(template_data), mappings))
24
+        template_data = conductor.helpers.transform_json(
25
+            json.loads(template_data), mappings)
24 26
 
27
+        id = str(uuid.uuid4()).lower()
28
+        host = ('%s-%s-%s' % (self._stack, service, host)).lower()
25 29
         self._pending_list.append({
26
-            'id': str(uuid.uuid4()).lower(),
27
-            'template': template_data,
28
-            'host': ('%s-%s' % (self._stack, host)).lower().replace(' ', '-'),
30
+            'id': id,
29 31
             'callback': callback
30 32
         })
31 33
 
32
-    def _on_message(self, body, message_id, **kwargs):
33
-        msg_id = message_id.lower()
34
-        item, index = conductor.helpers.find(lambda t: t['id'] == msg_id,
35
-                                             self._current_pending_list)
36
-        if item:
37
-            self._current_pending_list.pop(index)
38
-            item['callback'](json.loads(body))
39
-            if self._callback and not self._current_pending_list:
40
-                cb = self._callback
41
-                self._callback = None
42
-                cb()
34
+        msg = Message()
35
+        msg.body = template_data
36
+        msg.id = id
37
+        self._rmqclient.declare(host)
38
+        self._rmqclient.send(message=msg, key=host)
39
+        log.info('Sending RMQ message {0} to {1} with id {2}'.format(
40
+            template_data, host, id))
43 41
 
44 42
     def has_pending_commands(self):
45 43
         return len(self._pending_list) > 0
46 44
 
47
-    def execute_pending(self, callback):
45
+    def execute_pending(self):
48 46
         if not self.has_pending_commands():
49 47
             return False
50 48
 
51
-        self._current_pending_list = self._pending_list
52
-        self._pending_list = []
53
-
54
-        self._callback = callback
55
-
56
-        for rec in self._current_pending_list:
57
-            self._rmqclient.send(
58
-                queue=rec['host'], data=rec['template'], message_id=rec['id'])
59
-            print 'Sending RMQ message %s to %s' % (
60
-                rec['template'], rec['host'])
49
+        with self._rmqclient.open(self._results_queue) as subscription:
50
+            while self.has_pending_commands():
51
+                msg = subscription.get_message()
52
+                msg_id = msg.id.lower()
53
+                item, index = conductor.helpers.find(
54
+                    lambda t: t['id'] == msg_id, self._pending_list)
55
+                if item:
56
+                    self._pending_list.pop(index)
57
+                    item['callback'](msg.body)
61 58
 
62 59
         return True
63 60
 
64
-    def close(self):
65
-        self._rmqclient.unsubscribe('-execution-results')
61
+

+ 213
- 19
conductor/conductor/config.py View File

@@ -1,19 +1,213 @@
1
-from ConfigParser import SafeConfigParser
2
-
3
-
4
-class Config(object):
5
-    CONFIG_PATH = './etc/app.config'
6
-
7
-    def __init__(self, filename=None):
8
-        self.config = SafeConfigParser()
9
-        self.config.read(filename or self.CONFIG_PATH)
10
-
11
-    def get_setting(self, section, name, default=None):
12
-        if not self.config.has_option(section, name):
13
-            return default
14
-        return self.config.get(section, name)
15
-
16
-    def __getitem__(self, item):
17
-        parts = item.rsplit('.', 1)
18
-        return self.get_setting(
19
-            parts[0] if len(parts) == 2 else 'DEFAULT', parts[-1])
1
+#!/usr/bin/env python
2
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
+
4
+# Copyright 2011 OpenStack LLC.
5
+# All Rights Reserved.
6
+#
7
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
8
+#    not use this file except in compliance with the License. You may obtain
9
+#    a copy of the License at
10
+#
11
+#         http://www.apache.org/licenses/LICENSE-2.0
12
+#
13
+#    Unless required by applicable law or agreed to in writing, software
14
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16
+#    License for the specific language governing permissions and limitations
17
+#    under the License.
18
+
19
+"""
20
+Routines for configuring Glance
21
+"""
22
+
23
+import logging
24
+import logging.config
25
+import logging.handlers
26
+import os
27
+import sys
28
+
29
+from oslo.config import cfg
30
+from paste import deploy
31
+
32
+from conductor.version import version_info as version
33
+from ConfigParser import SafeConfigParser
34
+
35
+paste_deploy_opts = [
36
+    cfg.StrOpt('flavor'),
37
+    cfg.StrOpt('config_file'),
38
+]
39
+
40
+rabbit_opts = [
41
+    cfg.StrOpt('host', default='localhost'),
42
+    cfg.IntOpt('port', default=5672),
43
+    cfg.StrOpt('login', default='guest'),
44
+    cfg.StrOpt('password', default='guest'),
45
+    cfg.StrOpt('virtual_host', default='/'),
46
+]
47
+
48
+heat_opts = [
49
+    cfg.StrOpt('auth_url'),
50
+]
51
+
52
+CONF = cfg.CONF
53
+CONF.register_opts(paste_deploy_opts, group='paste_deploy')
54
+CONF.register_opts(rabbit_opts, group='rabbitmq')
55
+CONF.register_opts(heat_opts, group='heat')
56
+
57
+
58
+CONF.import_opt('verbose', 'conductor.openstack.common.log')
59
+CONF.import_opt('debug', 'conductor.openstack.common.log')
60
+CONF.import_opt('log_dir', 'conductor.openstack.common.log')
61
+CONF.import_opt('log_file', 'conductor.openstack.common.log')
62
+CONF.import_opt('log_config', 'conductor.openstack.common.log')
63
+CONF.import_opt('log_format', 'conductor.openstack.common.log')
64
+CONF.import_opt('log_date_format', 'conductor.openstack.common.log')
65
+CONF.import_opt('use_syslog', 'conductor.openstack.common.log')
66
+CONF.import_opt('syslog_log_facility', 'conductor.openstack.common.log')
67
+
68
+
69
+def parse_args(args=None, usage=None, default_config_files=None):
70
+    CONF(args=args,
71
+         project='conductor',
72
+         version=version.cached_version_string(),
73
+         usage=usage,
74
+         default_config_files=default_config_files)
75
+
76
+
77
+def setup_logging():
78
+    """
79
+    Sets up the logging options for a log with supplied name
80
+    """
81
+
82
+    if CONF.log_config:
83
+        # Use a logging configuration file for all settings...
84
+        if os.path.exists(CONF.log_config):
85
+            logging.config.fileConfig(CONF.log_config)
86
+            return
87
+        else:
88
+            raise RuntimeError("Unable to locate specified logging "
89
+                               "config file: %s" % CONF.log_config)
90
+
91
+    root_logger = logging.root
92
+    if CONF.debug:
93
+        root_logger.setLevel(logging.DEBUG)
94
+    elif CONF.verbose:
95
+        root_logger.setLevel(logging.INFO)
96
+    else:
97
+        root_logger.setLevel(logging.WARNING)
98
+
99
+    formatter = logging.Formatter(CONF.log_format, CONF.log_date_format)
100
+
101
+    if CONF.use_syslog:
102
+        try:
103
+            facility = getattr(logging.handlers.SysLogHandler,
104
+                               CONF.syslog_log_facility)
105
+        except AttributeError:
106
+            raise ValueError(_("Invalid syslog facility"))
107
+
108
+        handler = logging.handlers.SysLogHandler(address='/dev/log',
109
+                                                 facility=facility)
110
+    elif CONF.log_file:
111
+        logfile = CONF.log_file
112
+        if CONF.log_dir:
113
+            logfile = os.path.join(CONF.log_dir, logfile)
114
+        handler = logging.handlers.WatchedFileHandler(logfile)
115
+    else:
116
+        handler = logging.StreamHandler(sys.stdout)
117
+
118
+    handler.setFormatter(formatter)
119
+    root_logger.addHandler(handler)
120
+
121
+
122
+def _get_deployment_flavor():
123
+    """
124
+    Retrieve the paste_deploy.flavor config item, formatted appropriately
125
+    for appending to the application name.
126
+    """
127
+    flavor = CONF.paste_deploy.flavor
128
+    return '' if not flavor else ('-' + flavor)
129
+
130
+
131
+def _get_paste_config_path():
132
+    paste_suffix = '-paste.ini'
133
+    conf_suffix = '.conf'
134
+    if CONF.config_file:
135
+        # Assume paste config is in a paste.ini file corresponding
136
+        # to the last config file
137
+        path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
138
+    else:
139
+        path = CONF.prog + '-paste.ini'
140
+    return CONF.find_file(os.path.basename(path))
141
+
142
+
143
+def _get_deployment_config_file():
144
+    """
145
+    Retrieve the deployment_config_file config item, formatted as an
146
+    absolute pathname.
147
+    """
148
+    path = CONF.paste_deploy.config_file
149
+    if not path:
150
+        path = _get_paste_config_path()
151
+    if not path:
152
+        msg = "Unable to locate paste config file for %s." % CONF.prog
153
+        raise RuntimeError(msg)
154
+    return os.path.abspath(path)
155
+
156
+
157
+def load_paste_app(app_name=None):
158
+    """
159
+    Builds and returns a WSGI app from a paste config file.
160
+
161
+    We assume the last config file specified in the supplied ConfigOpts
162
+    object is the paste config file.
163
+
164
+    :param app_name: name of the application to load
165
+
166
+    :raises RuntimeError when config file cannot be located or application
167
+            cannot be loaded from config file
168
+    """
169
+    if app_name is None:
170
+        app_name = CONF.prog
171
+
172
+    # append the deployment flavor to the application name,
173
+    # in order to identify the appropriate paste pipeline
174
+    app_name += _get_deployment_flavor()
175
+
176
+    conf_file = _get_deployment_config_file()
177
+
178
+    try:
179
+        logger = logging.getLogger(__name__)
180
+        logger.debug(_("Loading %(app_name)s from %(conf_file)s"),
181
+                     {'conf_file': conf_file, 'app_name': app_name})
182
+
183
+        app = deploy.loadapp("config:%s" % conf_file, name=app_name)
184
+
185
+        # Log the options used when starting if we're in debug mode...
186
+        if CONF.debug:
187
+            CONF.log_opt_values(logger, logging.DEBUG)
188
+
189
+        return app
190
+    except (LookupError, ImportError), e:
191
+        msg = _("Unable to load %(app_name)s from "
192
+                "configuration file %(conf_file)s."
193
+                "\nGot: %(e)r") % locals()
194
+        logger.error(msg)
195
+        raise RuntimeError(msg)
196
+
197
+
198
+class Config(object):
199
+    CONFIG_PATH = './etc/app.config'
200
+
201
+    def __init__(self, filename=None):
202
+        self.config = SafeConfigParser()
203
+        self.config.read(filename or self.CONFIG_PATH)
204
+
205
+    def get_setting(self, section, name, default=None):
206
+        if not self.config.has_option(section, name):
207
+            return default
208
+        return self.config.get(section, name)
209
+
210
+    def __getitem__(self, item):
211
+        parts = item.rsplit('.', 1)
212
+        return self.get_setting(
213
+            parts[0] if len(parts) == 2 else 'DEFAULT', parts[-1])

dashboard/windc/__init__.py → conductor/conductor/openstack/__init__.py View File


dashboard/windcclient/__init__.py → conductor/conductor/openstack/common/__init__.py View File


+ 87
- 0
conductor/conductor/openstack/common/eventlet_backdoor.py View File

@@ -0,0 +1,87 @@
1
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
+
3
+# Copyright (c) 2012 OpenStack Foundation.
4
+# Administrator of the National Aeronautics and Space Administration.
5
+# All Rights Reserved.
6
+#
7
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
8
+#    not use this file except in compliance with the License. You may obtain
9
+#    a copy of the License at
10
+#
11
+#         http://www.apache.org/licenses/LICENSE-2.0
12
+#
13
+#    Unless required by applicable law or agreed to in writing, software
14
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16
+#    License for the specific language governing permissions and limitations
17
+#    under the License.
18
+
19
+import gc
20
+import pprint
21
+import sys
22
+import traceback
23
+
24
+import eventlet
25
+import eventlet.backdoor
26
+import greenlet
27
+from oslo.config import cfg
28
+
29
+eventlet_backdoor_opts = [
30
+    cfg.IntOpt('backdoor_port',
31
+               default=None,
32
+               help='port for eventlet backdoor to listen')
33
+]
34
+
35
+CONF = cfg.CONF
36
+CONF.register_opts(eventlet_backdoor_opts)
37
+
38
+
39
+def _dont_use_this():
40
+    print "Don't use this, just disconnect instead"
41
+
42
+
43
+def _find_objects(t):
44
+    return filter(lambda o: isinstance(o, t), gc.get_objects())
45
+
46
+
47
+def _print_greenthreads():
48
+    for i, gt in enumerate(_find_objects(greenlet.greenlet)):
49
+        print i, gt
50
+        traceback.print_stack(gt.gr_frame)
51
+        print
52
+
53
+
54
+def _print_nativethreads():
55
+    for threadId, stack in sys._current_frames().items():
56
+        print threadId
57
+        traceback.print_stack(stack)
58
+        print
59
+
60
+
61
+def initialize_if_enabled():
62
+    backdoor_locals = {
63
+        'exit': _dont_use_this,      # So we don't exit the entire process
64
+        'quit': _dont_use_this,      # So we don't exit the entire process
65
+        'fo': _find_objects,
66
+        'pgt': _print_greenthreads,
67
+        'pnt': _print_nativethreads,
68
+    }
69
+
70
+    if CONF.backdoor_port is None:
71
+        return None
72
+
73
+    # NOTE(johannes): The standard sys.displayhook will print the value of
74
+    # the last expression and set it to __builtin__._, which overwrites
75
+    # the __builtin__._ that gettext sets. Let's switch to using pprint
76
+    # since it won't interact poorly with gettext, and it's easier to
77
+    # read the output too.
78
+    def displayhook(val):
79
+        if val is not None:
80
+            pprint.pprint(val)
81
+    sys.displayhook = displayhook
82
+
83
+    sock = eventlet.listen(('localhost', CONF.backdoor_port))
84
+    port = sock.getsockname()[1]
85
+    eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
86
+                     locals=backdoor_locals)
87
+    return port

windc/openstack/oldcommon/exception.py → conductor/conductor/openstack/common/exception.py View File

@@ -1,6 +1,6 @@
1 1
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
2 2
 
3
-# Copyright 2011 OpenStack LLC.
3
+# Copyright 2011 OpenStack Foundation.
4 4
 # All Rights Reserved.
5 5
 #
6 6
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -21,17 +21,9 @@ Exceptions common to OpenStack projects
21 21
 
22 22
 import logging
23 23
 
24
+from conductor.openstack.common.gettextutils import _
24 25
 
25
-class ProcessExecutionError(IOError):
26
-    def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
27
-                 description=None):
28
-        if description is None:
29
-            description = "Unexpected error while running command."
30
-        if exit_code is None:
31
-            exit_code = '-'
32
-        message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (
33
-                  description, cmd, exit_code, stdout, stderr)
34
-        IOError.__init__(self, message)
26
+_FATAL_EXCEPTION_FORMAT_ERRORS = False
35 27
 
36 28
 
37 29
 class Error(Exception):
@@ -109,7 +101,7 @@ def wrap_exception(f):
109 101
         except Exception, e:
110 102
             if not isinstance(e, Error):
111 103
                 #exc_type, exc_value, exc_traceback = sys.exc_info()
112
-                logging.exception('Uncaught exception')
104
+                logging.exception(_('Uncaught exception'))
113 105
                 #logging.error(traceback.extract_stack(exc_traceback))
114 106
                 raise Error(str(e))
115 107
             raise
@@ -131,9 +123,12 @@ class OpenstackException(Exception):
131 123
         try:
132 124
             self._error_string = self.message % kwargs
133 125
 
134
-        except Exception:
135
-            # at least get the core message out if something happened
136
-            self._error_string = self.message
126
+        except Exception as e:
127
+            if _FATAL_EXCEPTION_FORMAT_ERRORS:
128
+                raise e
129
+            else:
130
+                # at least get the core message out if something happened
131
+                self._error_string = self.message
137 132
 
138 133
     def __str__(self):
139 134
         return self._error_string

windc/openstack/__init__.py → conductor/conductor/openstack/common/gettextutils.py View File

@@ -1,6 +1,6 @@
1 1
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
2 2
 
3
-# Copyright 2011 OpenStack LLC.
3
+# Copyright 2012 Red Hat, Inc.
4 4
 # All Rights Reserved.
5 5
 #
6 6
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,10 +15,19 @@
15 15
 #    License for the specific language governing permissions and limitations
16 16
 #    under the License.
17 17
 
18
-# This ensures the openstack namespace is defined
19
-try:
20
-    import pkg_resources
21
-    pkg_resources.declare_namespace(__name__)
22
-except ImportError:
23
-    import pkgutil
24
-    __path__ = pkgutil.extend_path(__path__, __name__)
18
+"""
19
+gettext for openstack-common modules.
20
+
21
+Usual usage in an openstack.common module:
22
+
23
+    from conductor.openstack.common.gettextutils import _
24
+"""
25
+
26
+import gettext
27
+
28
+
29
+t = gettext.translation('conductor', 'locale', fallback=True)
30
+
31
+
32
+def _(msg):
33
+    return t.ugettext(msg)

+ 67
- 0
conductor/conductor/openstack/common/importutils.py View File

@@ -0,0 +1,67 @@
1
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
+
3
+# Copyright 2011 OpenStack Foundation.
4
+# All Rights Reserved.
5
+#
6
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
7
+#    not use this file except in compliance with the License. You may obtain
8
+#    a copy of the License at
9
+#
10
+#         http://www.apache.org/licenses/LICENSE-2.0
11
+#
12
+#    Unless required by applicable law or agreed to in writing, software
13
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
+#    License for the specific language governing permissions and limitations
16
+#    under the License.
17
+
18
+"""
19
+Import related utilities and helper functions.
20
+"""
21
+
22
+import sys
23
+import traceback
24
+
25
+
26
+def import_class(import_str):
27
+    """Returns a class from a string including module and class"""
28
+    mod_str, _sep, class_str = import_str.rpartition('.')
29
+    try:
30
+        __import__(mod_str)
31
+        return getattr(sys.modules[mod_str], class_str)
32
+    except (ValueError, AttributeError):
33
+        raise ImportError('Class %s cannot be found (%s)' %
34
+                          (class_str,
35
+                           traceback.format_exception(*sys.exc_info())))
36
+
37
+
38
+def import_object(import_str, *args, **kwargs):
39
+    """Import a class and return an instance of it."""
40
+    return import_class(import_str)(*args, **kwargs)
41
+
42
+
43
+def import_object_ns(name_space, import_str, *args, **kwargs):
44
+    """
45
+    Import a class and return an instance of it, first by trying
46
+    to find the class in a default namespace, then failing back to
47
+    a full path if not found in the default namespace.
48
+    """
49
+    import_value = "%s.%s" % (name_space, import_str)
50
+    try:
51
+        return import_class(import_value)(*args, **kwargs)
52
+    except ImportError:
53
+        return import_class(import_str)(*args, **kwargs)
54
+
55
+
56
+def import_module(import_str):
57
+    """Import a module."""
58
+    __import__(import_str)
59
+    return sys.modules[import_str]
60
+
61
+
62
+def try_import(import_str, default=None):
63
+    """Try to import a module and if it fails return default."""
64
+    try:
65
+        return import_module(import_str)
66
+    except ImportError:
67
+        return default

+ 141
- 0
conductor/conductor/openstack/common/jsonutils.py View File

@@ -0,0 +1,141 @@
1
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
+
3
+# Copyright 2010 United States Government as represented by the
4
+# Administrator of the National Aeronautics and Space Administration.
5
+# Copyright 2011 Justin Santa Barbara
6
+# All Rights Reserved.
7
+#
8
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
9
+#    not use this file except in compliance with the License. You may obtain
10
+#    a copy of the License at
11
+#
12
+#         http://www.apache.org/licenses/LICENSE-2.0
13
+#
14
+#    Unless required by applicable law or agreed to in writing, software
15
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17
+#    License for the specific language governing permissions and limitations
18
+#    under the License.
19
+
20
+'''
21
+JSON related utilities.
22
+
23
+This module provides a few things:
24
+
25
+    1) A handy function for getting an object down to something that can be
26
+    JSON serialized.  See to_primitive().
27
+
28
+    2) Wrappers around loads() and dumps().  The dumps() wrapper will
29
+    automatically use to_primitive() for you if needed.
30
+
31
+    3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
32
+    is available.
33
+'''
34
+
35
+
36
+import datetime
37
+import functools
38
+import inspect
39
+import itertools
40
+import json
41
+import xmlrpclib
42
+
43
+from conductor.openstack.common import timeutils
44
+
45
+
46
+def to_primitive(value, convert_instances=False, convert_datetime=True,
47
+                 level=0, max_depth=3):
48
+    """Convert a complex object into primitives.
49
+
50
+    Handy for JSON serialization. We can optionally handle instances,
51
+    but since this is a recursive function, we could have cyclical
52
+    data structures.
53
+
54
+    To handle cyclical data structures we could track the actual objects
55
+    visited in a set, but not all objects are hashable. Instead we just
56
+    track the depth of the object inspections and don't go too deep.
57
+
58
+    Therefore, convert_instances=True is lossy ... be aware.
59
+
60
+    """
61
+    nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
62
+             inspect.isfunction, inspect.isgeneratorfunction,
63
+             inspect.isgenerator, inspect.istraceback, inspect.isframe,
64
+             inspect.iscode, inspect.isbuiltin, inspect.isroutine,
65
+             inspect.isabstract]
66
+    for test in nasty:
67
+        if test(value):
68
+            return unicode(value)
69
+
70
+    # value of itertools.count doesn't get caught by inspects
71
+    # above and results in infinite loop when list(value) is called.
72
+    if type(value) == itertools.count:
73
+        return unicode(value)
74
+
75
+    # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
76
+    #              tests that raise an exception in a mocked method that
77
+    #              has a @wrap_exception with a notifier will fail. If
78
+    #              we up the dependency to 0.5.4 (when it is released) we
79
+    #              can remove this workaround.
80
+    if getattr(value, '__module__', None) == 'mox':
81
+        return 'mock'
82
+
83
+    if level > max_depth:
84
+        return '?'
85
+
86
+    # The try block may not be necessary after the class check above,
87
+    # but just in case ...
88
+    try:
89
+        recursive = functools.partial(to_primitive,
90
+                                      convert_instances=convert_instances,
91
+                                      convert_datetime=convert_datetime,
92
+                                      level=level,
93
+                                      max_depth=max_depth)
94
+        # It's not clear why xmlrpclib created their own DateTime type, but
95
+        # for our purposes, make it a datetime type which is explicitly
96
+        # handled
97
+        if isinstance(value, xmlrpclib.DateTime):
98
+            value = datetime.datetime(*tuple(value.timetuple())[:6])
99
+
100
+        if isinstance(value, (list, tuple)):
101
+            return [recursive(v) for v in value]
102
+        elif isinstance(value, dict):
103
+            return dict((k, recursive(v)) for k, v in value.iteritems())
104
+        elif convert_datetime and isinstance(value, datetime.datetime):
105
+            return timeutils.strtime(value)
106
+        elif hasattr(value, 'iteritems'):
107
+            return recursive(dict(value.iteritems()), level=level + 1)
108
+        elif hasattr(value, '__iter__'):
109
+            return recursive(list(value))
110
+        elif convert_instances and hasattr(value, '__dict__'):
111
+            # Likely an instance of something. Watch for cycles.
112
+            # Ignore class member vars.
113
+            return recursive(value.__dict__, level=level + 1)
114
+        else:
115
+            return value
116
+    except TypeError:
117
+        # Class objects are tricky since they may define something like
118
+        # __iter__ defined but it isn't callable as list().
119
+        return unicode(value)
120
+
121
+
122
+def dumps(value, default=to_primitive, **kwargs):
123
+    return json.dumps(value, default=default, **kwargs)
124
+
125
+
126
+def loads(s):
127
+    return json.loads(s)
128
+
129
+
130
+def load(s):
131
+    return json.load(s)
132
+
133
+
134
+try:
135
+    import anyjson
136
+except ImportError:
137
+    pass
138
+else:
139
+    anyjson._modules.append((__name__, 'dumps', TypeError,
140
+                                       'loads', ValueError, 'load'))
141
+    anyjson.force_implementation(__name__)

+ 48
- 0
conductor/conductor/openstack/common/local.py View File

@@ -0,0 +1,48 @@
1
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
+
3
+# Copyright 2011 OpenStack Foundation.
4
+# All Rights Reserved.
5
+#
6
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
7
+#    not use this file except in compliance with the License. You may obtain
8
+#    a copy of the License at
9
+#
10
+#         http://www.apache.org/licenses/LICENSE-2.0
11
+#
12
+#    Unless required by applicable law or agreed to in writing, software
13
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
+#    License for the specific language governing permissions and limitations
16
+#    under the License.
17
+
18
+"""Greenthread local storage of variables using weak references"""
19
+
20
+import weakref
21
+
22
+from eventlet import corolocal
23
+
24
+
25
+class WeakLocal(corolocal.local):
26
+    def __getattribute__(self, attr):
27
+        rval = corolocal.local.__getattribute__(self, attr)
28
+        if rval:
29
+            # NOTE(mikal): this bit is confusing. What is stored is a weak
30
+            # reference, not the value itself. We therefore need to lookup
31
+            # the weak reference and return the inner value here.
32
+            rval = rval()
33
+        return rval
34
+
35
+    def __setattr__(self, attr, value):
36
+        value = weakref.ref(value)
37
+        return corolocal.local.__setattr__(self, attr, value)
38
+
39
+
40
+# NOTE(mikal): the name "store" should be deprecated in the future
41
+store = WeakLocal()
42
+
43
+# A "weak" store uses weak references and allows an object to fall out of scope
44
+# when it falls out of scope in the code that uses the thread local storage. A
45
+# "strong" store will hold a reference to the object so that it never falls out
46
+# of scope.
47
+weak_store = WeakLocal()
48
+strong_store = corolocal.local

+ 543
- 0
conductor/conductor/openstack/common/log.py View File

@@ -0,0 +1,543 @@
1
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
+
3
+# Copyright 2011 OpenStack Foundation.
4
+# Copyright 2010 United States Government as represented by the
5
+# Administrator of the National Aeronautics and Space Administration.
6
+# All Rights Reserved.
7
+#
8
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
9
+#    not use this file except in compliance with the License. You may obtain
10
+#    a copy of the License at
11
+#
12
+#         http://www.apache.org/licenses/LICENSE-2.0
13
+#
14
+#    Unless required by applicable law or agreed to in writing, software
15
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17
+#    License for the specific language governing permissions and limitations
18
+#    under the License.
19
+
20
+"""Openstack logging handler.
21
+
22
+This module adds to logging functionality by adding the option to specify
23
+a context object when calling the various log methods.  If the context object
24
+is not specified, default formatting is used. Additionally, an instance uuid
25
+may be passed as part of the log message, which is intended to make it easier
26
+for admins to find messages related to a specific instance.
27
+
28
+It also allows setting of formatting information through conf.
29
+
30
+"""
31
+
32
+import ConfigParser
33
+import cStringIO
34
+import inspect
35
+import itertools
36
+import logging
37
+import logging.config
38
+import logging.handlers
39
+import os
40
+import stat
41
+import sys
42
+import traceback
43
+
44
+from oslo.config import cfg
45
+
46
+from conductor.openstack.common.gettextutils import _
47
+from conductor.openstack.common import jsonutils
48
+from conductor.openstack.common import local
49
+from conductor.openstack.common import notifier
50
+
51
+
52
+_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
53
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
54
+
55
+common_cli_opts = [
56
+    cfg.BoolOpt('debug',
57
+                short='d',
58
+                default=False,
59
+                help='Print debugging output (set logging level to '
60
+                     'DEBUG instead of default WARNING level).'),
61
+    cfg.BoolOpt('verbose',
62
+                short='v',
63
+                default=False,
64
+                help='Print more verbose output (set logging level to '
65
+                     'INFO instead of default WARNING level).'),
66
+]
67
+
68
+logging_cli_opts = [
69
+    cfg.StrOpt('log-config',
70
+               metavar='PATH',
71
+               help='If this option is specified, the logging configuration '
72
+                    'file specified is used and overrides any other logging '
73
+                    'options specified. Please see the Python logging module '
74
+                    'documentation for details on logging configuration '
75
+                    'files.'),
76
+    cfg.StrOpt('log-format',
77
+               default=_DEFAULT_LOG_FORMAT,
78
+               metavar='FORMAT',
79
+               help='A logging.Formatter log message format string which may '
80
+                    'use any of the available logging.LogRecord attributes. '
81
+                    'Default: %(default)s'),
82
+    cfg.StrOpt('log-date-format',
83
+               default=_DEFAULT_LOG_DATE_FORMAT,
84
+               metavar='DATE_FORMAT',
85
+               help='Format string for %%(asctime)s in log records. '
86
+                    'Default: %(default)s'),
87
+    cfg.StrOpt('log-file',
88
+               metavar='PATH',
89
+               deprecated_name='logfile',
90
+               help='(Optional) Name of log file to output to. '
91
+                    'If no default is set, logging will go to stdout.'),
92
+    cfg.StrOpt('log-dir',
93
+               deprecated_name='logdir',
94
+               help='(Optional) The base directory used for relative '
95
+                    '--log-file paths'),
96
+    cfg.BoolOpt('use-syslog',
97
+                default=False,
98
+                help='Use syslog for logging.'),
99
+    cfg.StrOpt('syslog-log-facility',
100
+               default='LOG_USER',
101
+               help='syslog facility to receive log lines')
102
+]
103
+
104
+generic_log_opts = [
105
+    cfg.BoolOpt('use_stderr',
106
+                default=True,
107
+                help='Log output to standard error'),
108
+    cfg.StrOpt('logfile_mode',
109
+               default='0644',
110
+               help='Default file mode used when creating log files'),
111
+]
112
+
113
+log_opts = [
114
+    cfg.StrOpt('logging_context_format_string',
115
+               default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
116
+                       '[%(request_id)s %(user)s %(tenant)s] %(instance)s'
117
+                       '%(message)s',
118
+               help='format string to use for log messages with context'),
119
+    cfg.StrOpt('logging_default_format_string',
120
+               default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
121
+                       '%(name)s [-] %(instance)s%(message)s',
122
+               help='format string to use for log messages without context'),
123
+    cfg.StrOpt('logging_debug_format_suffix',
124
+               default='%(funcName)s %(pathname)s:%(lineno)d',
125
+               help='data to append to log format when level is DEBUG'),
126
+    cfg.StrOpt('logging_exception_prefix',
127
+               default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
128
+               '%(instance)s',
129
+               help='prefix each line of exception output with this format'),
130
+    cfg.ListOpt('default_log_levels',
131
+                default=[
132
+                    'amqplib=WARN',
133
+                    'sqlalchemy=WARN',
134
+                    'boto=WARN',
135
+                    'suds=INFO',
136
+                    'keystone=INFO',
137
+                    'eventlet.wsgi.server=WARN'
138
+                ],
139
+                help='list of logger=LEVEL pairs'),
140
+    cfg.BoolOpt('publish_errors',
141
+                default=False,
142
+                help='publish error events'),
143
+    cfg.BoolOpt('fatal_deprecations',
144
+                default=False,
145
+                help='make deprecations fatal'),
146
+
147
+    # NOTE(mikal): there are two options here because sometimes we are handed
148
+    # a full instance (and could include more information), and other times we
149
+    # are just handed a UUID for the instance.
150
+    cfg.StrOpt('instance_format',
151
+               default='[instance: %(uuid)s] ',
152
+               help='If an instance is passed with the log message, format '
153
+                    'it like this'),
154
+    cfg.StrOpt('instance_uuid_format',
155
+               default='[instance: %(uuid)s] ',
156
+               help='If an instance UUID is passed with the log message, '
157
+                    'format it like this'),
158
+]
159
+
160
+CONF = cfg.CONF
161
+CONF.register_cli_opts(common_cli_opts)
162
+CONF.register_cli_opts(logging_cli_opts)
163
+CONF.register_opts(generic_log_opts)
164
+CONF.register_opts(log_opts)
165
+
166
+# our new audit level
167
+# NOTE(jkoelker) Since we synthesized an audit level, make the logging
168
+#                module aware of it so it acts like other levels.
169
+logging.AUDIT = logging.INFO + 1
170
+logging.addLevelName(logging.AUDIT, 'AUDIT')
171
+
172
+
173
+try:
174
+    NullHandler = logging.NullHandler
175
+except AttributeError:  # NOTE(jkoelker) NullHandler added in Python 2.7
176
+    class NullHandler(logging.Handler):
177
+        def handle(self, record):
178
+            pass
179
+
180
+        def emit(self, record):
181
+            pass
182
+
183
+        def createLock(self):
184
+            self.lock = None
185
+
186
+
187
+def _dictify_context(context):
188
+    if context is None:
189
+        return None
190
+    if not isinstance(context, dict) and getattr(context, 'to_dict', None):
191
+        context = context.to_dict()
192
+    return context
193
+
194
+
195
+def _get_binary_name():
196
+    return os.path.basename(inspect.stack()[-1][1])
197
+
198
+
199
+def _get_log_file_path(binary=None):
200
+    logfile = CONF.log_file
201
+    logdir = CONF.log_dir
202
+
203
+    if logfile and not logdir:
204
+        return logfile
205
+
206
+    if logfile and logdir:
207
+        return os.path.join(logdir, logfile)
208
+
209
+    if logdir:
210
+        binary = binary or _get_binary_name()
211
+        return '%s.log' % (os.path.join(logdir, binary),)
212
+
213
+
214
+class ContextAdapter(logging.LoggerAdapter):
215
+    warn = logging.LoggerAdapter.warning
216
+
217
+    def __init__(self, logger, project_name, version_string):
218
+        self.logger = logger
219
+        self.project = project_name
220
+        self.version = version_string
221
+
222
+    def audit(self, msg, *args, **kwargs):
223
+        self.log(logging.AUDIT, msg, *args, **kwargs)
224
+
225
+    def deprecated(self, msg, *args, **kwargs):
226
+        stdmsg = _("Deprecated: %s") % msg
227
+        if CONF.fatal_deprecations:
228
+            self.critical(stdmsg, *args, **kwargs)
229
+            raise DeprecatedConfig(msg=stdmsg)
230
+        else:
231
+            self.warn(stdmsg, *args, **kwargs)
232
+
233
+    def process(self, msg, kwargs):
234
+        if 'extra' not in kwargs:
235
+            kwargs['extra'] = {}
236
+        extra = kwargs['extra']
237
+
238
+        context = kwargs.pop('context', None)
239
+        if not context:
240
+            context = getattr(local.store, 'context', None)
241
+        if context:
242
+            extra.update(_dictify_context(context))
243
+
244
+        instance = kwargs.pop('instance', None)
245
+        instance_extra = ''
246
+        if instance:
247
+            instance_extra = CONF.instance_format % instance
248
+        else:
249
+            instance_uuid = kwargs.pop('instance_uuid', None)
250
+            if instance_uuid:
251
+                instance_extra = (CONF.instance_uuid_format
252
+                                  % {'uuid': instance_uuid})
253
+        extra.update({'instance': instance_extra})
254
+
255
+        extra.update({"project": self.project})
256
+        extra.update({"version": self.version})
257
+        extra['extra'] = extra.copy()
258
+        return msg, kwargs
259
+
260
+
261
+class JSONFormatter(logging.Formatter):
262
+    def __init__(self, fmt=None, datefmt=None):
263
+        # NOTE(jkoelker) we ignore the fmt argument, but its still there
264
+        #                since logging.config.fileConfig passes it.
265
+        self.datefmt = datefmt
266
+
267
+    def formatException(self, ei, strip_newlines=True):
268
+        lines = traceback.format_exception(*ei)
269
+        if strip_newlines:
270
+            lines = [itertools.ifilter(
271
+                lambda x: x,
272
+                line.rstrip().splitlines()) for line in lines]
273
+            lines = list(itertools.chain(*lines))
274
+        return lines
275
+
276
+    def format(self, record):
277
+        message = {'message': record.getMessage(),
278
+                   'asctime': self.formatTime(record, self.datefmt),
279
+                   'name': record.name,
280
+                   'msg': record.msg,
281
+                   'args': record.args,
282
+                   'levelname': record.levelname,
283
+                   'levelno': record.levelno,
284
+                   'pathname': record.pathname,
285
+                   'filename': record.filename,
286
+                   'module': record.module,
287
+                   'lineno': record.lineno,
288
+                   'funcname': record.funcName,
289
+                   'created': record.created,
290
+                   'msecs': record.msecs,
291
+                   'relative_created': record.relativeCreated,
292
+                   'thread': record.thread,
293
+                   'thread_name': record.threadName,
294
+                   'process_name': record.processName,
295
+                   'process': record.process,
296
+                   'traceback': None}
297
+
298
+        if hasattr(record, 'extra'):
299
+            message['extra'] = record.extra
300
+
301
+        if record.exc_info:
302
+            message['traceback'] = self.formatException(record.exc_info)
303
+
304
+        return jsonutils.dumps(message)
305
+
306
+
307
+class PublishErrorsHandler(logging.Handler):
308
+    def emit(self, record):
309
+        if ('conductor.openstack.common.notifier.log_notifier' in
310
+                CONF.notification_driver):
311
+            return
312
+        notifier.api.notify(None, 'error.publisher',
313
+                            'error_notification',
314
+                            notifier.api.ERROR,
315
+                            dict(error=record.msg))
316
+
317
+
318
+def _create_logging_excepthook(product_name):
319
+    def logging_excepthook(type, value, tb):
320
+        extra = {}
321
+        if CONF.verbose:
322
+            extra['exc_info'] = (type, value, tb)
323
+        getLogger(product_name).critical(str(value), **extra)
324
+    return logging_excepthook
325
+
326
+
327
+class LogConfigError(Exception):
328
+
329
+    message = _('Error loading logging config %(log_config)s: %(err_msg)s')
330
+
331
+    def __init__(self, log_config, err_msg):
332
+        self.log_config = log_config
333
+        self.err_msg = err_msg
334
+
335
+    def __str__(self):
336
+        return self.message % dict(log_config=self.log_config,
337
+                                   err_msg=self.err_msg)
338
+
339
+
340
+def _load_log_config(log_config):
341
+    try:
342
+        logging.config.fileConfig(log_config)
343
+    except ConfigParser.Error, exc:
344
+        raise LogConfigError(log_config, str(exc))
345
+
346
+
347
+def setup(product_name):
348
+    """Setup logging."""
349
+    if CONF.log_config:
350
+        _load_log_config(CONF.log_config)
351
+    else:
352
+        _setup_logging_from_conf()
353
+    sys.excepthook = _create_logging_excepthook(product_name)
354
+
355
+
356
+def set_defaults(logging_context_format_string):
357
+    cfg.set_defaults(log_opts,
358
+                     logging_context_format_string=
359
+                     logging_context_format_string)
360
+
361
+
362
+def _find_facility_from_conf():
363
+    facility_names = logging.handlers.SysLogHandler.facility_names
364
+    facility = getattr(logging.handlers.SysLogHandler,
365
+                       CONF.syslog_log_facility,
366
+                       None)
367
+
368
+    if facility is None and CONF.syslog_log_facility in facility_names:
369
+        facility = facility_names.get(CONF.syslog_log_facility)
370
+
371
+    if facility is None:
372
+        valid_facilities = facility_names.keys()
373
+        consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
374
+                  'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
375
+                  'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
376
+                  'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
377
+                  'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
378
+        valid_facilities.extend(consts)
379
+        raise TypeError(_('syslog facility must be one of: %s') %
380
+                        ', '.join("'%s'" % fac
381
+                                  for fac in valid_facilities))
382
+
383
+    return facility
384
+
385
+
386
+def _setup_logging_from_conf():
387
+    log_root = getLogger(None).logger
388
+    for handler in log_root.handlers:
389
+        log_root.removeHandler(handler)
390
+
391
+    if CONF.use_syslog:
392
+        facility = _find_facility_from_conf()
393
+        syslog = logging.handlers.SysLogHandler(address='/dev/log',
394
+                                                facility=facility)
395
+        log_root.addHandler(syslog)
396
+
397
+    logpath = _get_log_file_path()
398
+    if logpath:
399
+        filelog = logging.handlers.WatchedFileHandler(logpath)
400
+        log_root.addHandler(filelog)
401
+
402
+        mode = int(CONF.logfile_mode, 8)
403
+        st = os.stat(logpath)
404
+        if st.st_mode != (stat.S_IFREG | mode):
405
+            os.chmod(logpath, mode)
406
+
407
+    if CONF.use_stderr:
408
+        streamlog = ColorHandler()
409
+        log_root.addHandler(streamlog)
410
+
411
+    elif not CONF.log_file:
412
+        # pass sys.stdout as a positional argument
413
+        # python2.6 calls the argument strm, in 2.7 it's stream
414
+        streamlog = logging.StreamHandler(sys.stdout)
415
+        log_root.addHandler(streamlog)
416
+
417
+    if CONF.publish_errors:
418
+        log_root.addHandler(PublishErrorsHandler(logging.ERROR))
419
+
420
+    for handler in log_root.handlers:
421
+        datefmt = CONF.log_date_format
422
+        if CONF.log_format:
423
+            handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
424
+                                                   datefmt=datefmt))
425
+        else:
426
+            handler.setFormatter(LegacyFormatter(datefmt=datefmt))
427
+
428
+    if CONF.debug:
429
+        log_root.setLevel(logging.DEBUG)
430
+    elif CONF.verbose:
431
+        log_root.setLevel(logging.INFO)
432
+    else:
433
+        log_root.setLevel(logging.WARNING)
434
+
435
+    level = logging.NOTSET
436
+    for pair in CONF.default_log_levels:
437
+        mod, _sep, level_name = pair.partition('=')
438
+        level = logging.getLevelName(level_name)
439
+        logger = logging.getLogger(mod)
440
+        logger.setLevel(level)
441
+        for handler in log_root.handlers:
442
+            logger.addHandler(handler)
443
+
444
+_loggers = {}
445
+
446
+
447
+def getLogger(name='unknown', version='unknown'):
448
+    if name not in _loggers:
449
+        _loggers[name] = ContextAdapter(logging.getLogger(name),
450
+                                        name,
451
+                                        version)
452
+    return _loggers[name]
453
+
454
+
455
+class WritableLogger(object):
456
+    """A thin wrapper that responds to `write` and logs."""
457
+
458
+    def __init__(self, logger, level=logging.INFO):
459
+        self.logger = logger
460
+        self.level = level
461
+
462
+    def write(self, msg):
463
+        self.logger.log(self.level, msg)
464
+
465
+
466
+class LegacyFormatter(logging.Formatter):
467
+    """A context.RequestContext aware formatter configured through flags.
468
+
469
+    The flags used to set format strings are: logging_context_format_string
470
+    and logging_default_format_string.  You can also specify
471
+    logging_debug_format_suffix to append extra formatting if the log level is
472
+    debug.
473
+
474
+    For information about what variables are available for the formatter see:
475
+    http://docs.python.org/library/logging.html#formatter
476
+
477
+    """
478
+
479
+    def format(self, record):
480
+        """Uses contextstring if request_id is set, otherwise default."""
481
+        # NOTE(sdague): default the fancier formating params
482
+        # to an empty string so we don't throw an exception if
483
+        # they get used
484
+        for key in ('instance', 'color'):
485
+            if key not in record.__dict__:
486
+                record.__dict__[key] = ''
487
+
488
+        if record.__dict__.get('request_id', None):
489
+            self._fmt = CONF.logging_context_format_string
490
+        else:
491
+            self._fmt = CONF.logging_default_format_string
492
+
493
+        if (record.levelno == logging.DEBUG and
494
+                CONF.logging_debug_format_suffix):
495
+            self._fmt += " " + CONF.logging_debug_format_suffix
496
+
497
+        # Cache this on the record, Logger will respect our formated copy
498
+        if record.exc_info:
499
+            record.exc_text = self.formatException(record.exc_info, record)
500
+        return logging.Formatter.format(self, record)
501
+
502
+    def formatException(self, exc_info, record=None):
503
+        """Format exception output with CONF.logging_exception_prefix."""
504
+        if not record:
505
+            return logging.Formatter.formatException(self, exc_info)
506
+
507
+        stringbuffer = cStringIO.StringIO()
508
+        traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
509
+                                  None, stringbuffer)
510
+        lines = stringbuffer.getvalue().split('\n')
511
+        stringbuffer.close()
512
+
513
+        if CONF.logging_exception_prefix.find('%(asctime)') != -1:
514
+            record.asctime = self.formatTime(record, self.datefmt)
515
+
516
+        formatted_lines = []
517
+        for line in lines:
518
+            pl = CONF.logging_exception_prefix % record.__dict__
519
+            fl = '%s%s' % (pl, line)
520
+            formatted_lines.append(fl)
521
+        return '\n'.join(formatted_lines)
522
+
523
+
524
+class ColorHandler(logging.StreamHandler):
525
+    LEVEL_COLORS = {
526
+        logging.DEBUG: '\033[00;32m',  # GREEN
527
+        logging.INFO: '\033[00;36m',  # CYAN
528
+        logging.AUDIT: '\033[01;36m',  # BOLD CYAN
529
+        logging.WARN: '\033[01;33m',  # BOLD YELLOW
530
+        logging.ERROR: '\033[01;31m',  # BOLD RED
531
+        logging.CRITICAL: '\033[01;31m',  # BOLD RED
532
+    }
533
+
534
+    def format(self, record):
535
+        record.color = self.LEVEL_COLORS[record.levelno]
536
+        return logging.StreamHandler.format(self, record)
537
+
538
+
539
+class DeprecatedConfig(Exception):
540
+    message = _("Fatal call to deprecated config: %(msg)s")
541
+
542
+    def __init__(self, msg):
543
+        super(Exception, self).__init__(self.message % dict(msg=msg))

+ 95
- 0
conductor/conductor/openstack/common/loopingcall.py View File

@@ -0,0 +1,95 @@
1
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
+
3
+# Copyright 2010 United States Government as represented by the
4
+# Administrator of the National Aeronautics and Space Administration.
5
+# Copyright 2011 Justin Santa Barbara
6
+# All Rights Reserved.
7
+#
8
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
9
+#    not use this file except in compliance with the License. You may obtain
10
+#    a copy of the License at
11
+#
12
+#         http://www.apache.org/licenses/LICENSE-2.0
13
+#
14
+#    Unless required by applicable law or agreed to in writing, software
15
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17
+#    License for the specific language governing permissions and limitations
18
+#    under the License.
19
+
20
+import sys
21
+
22
+from eventlet import event
23
+from eventlet import greenthread
24
+
25
+from conductor.openstack.common.gettextutils import _
26
+from conductor.openstack.common import log as logging
27
+from conductor.openstack.common import timeutils
28
+
29
+LOG = logging.getLogger(__name__)
30
+
31
+
32
+class LoopingCallDone(Exception):
33
+    """Exception to break out and stop a LoopingCall.
34
+
35
+    The poll-function passed to LoopingCall can raise this exception to
36
+    break out of the loop normally. This is somewhat analogous to
37
+    StopIteration.
38
+
39
+    An optional return-value can be included as the argument to the exception;
40
+    this return-value will be returned by LoopingCall.wait()
41
+
42
+    """
43
+
44
+    def __init__(self, retvalue=True):
45
+        """:param retvalue: Value that LoopingCall.wait() should return."""
46
+        self.retvalue = retvalue
47
+
48
+
49
+class LoopingCall(object):
50
+    def __init__(self, f=None, *args, **kw):
51
+        self.args = args
52
+        self.kw = kw
53
+        self.f = f
54
+        self._running = False
55
+
56
+    def start(self, interval, initial_delay=None):
57
+        self._running = True
58
+        done = event.Event()
59
+
60
+        def _inner():
61
+            if initial_delay:
62
+                greenthread.sleep(initial_delay)
63
+
64
+            try:
65
+                while self._running:
66
+                    start = timeutils.utcnow()
67
+                    self.f(*self.args, **self.kw)
68
+                    end = timeutils.utcnow()
69
+                    if not self._running:
70
+                        break
71
+                    delay = interval - timeutils.delta_seconds(start, end)
72
+                    if delay <= 0:
73
+                        LOG.warn(_('task run outlasted interval by %s sec') %
74
+                                 -delay)
75
+                    greenthread.sleep(delay if delay > 0 else 0)
76
+            except LoopingCallDone, e:
77
+                self.stop()
78
+                done.send(e.retvalue)
79
+            except Exception:
80
+                LOG.exception(_('in looping call'))
81
+                done.send_exception(*sys.exc_info())
82
+                return
83
+            else:
84
+                done.send(True)
85
+
86
+        self.done = done
87
+
88
+        greenthread.spawn_n(_inner)
89
+        return self.done
90
+
91
+    def stop(self):
92
+        self._running = False
93
+
94
+    def wait(self):
95
+        return self.done.wait()

windc/windc/common/__init__.py → conductor/conductor/openstack/common/notifier/__init__.py View File

@@ -1,6 +1,4 @@
1
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
-
3
-# Copyright 2010-2011 OpenStack LLC.
1
+# Copyright 2011 OpenStack Foundation.
4 2
 # All Rights Reserved.
5 3
 #
6 4
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may

+ 182
- 0
conductor/conductor/openstack/common/notifier/api.py View File

@@ -0,0 +1,182 @@
1
+# Copyright 2011 OpenStack Foundation.
2
+# All Rights Reserved.
3
+#
4
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
+#    not use this file except in compliance with the License. You may obtain
6
+#    a copy of the License at
7
+#
8
+#         http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+#    Unless required by applicable law or agreed to in writing, software
11
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+#    License for the specific language governing permissions and limitations
14
+#    under the License.
15
+
16
+import uuid
17
+
18
+from oslo.config import cfg
19
+
20
+from conductor.openstack.common import context
21
+from conductor.openstack.common.gettextutils import _
22
+from conductor.openstack.common import importutils
23
+from conductor.openstack.common import jsonutils
24
+from conductor.openstack.common import log as logging
25
+from conductor.openstack.common import timeutils
26
+
27
+
28
+LOG = logging.getLogger(__name__)
29
+
30
+notifier_opts = [
31
+    cfg.MultiStrOpt('notification_driver',
32
+                    default=[],
33
+                    help='Driver or drivers to handle sending notifications'),
34
+    cfg.StrOpt('default_notification_level',
35
+               default='INFO',
36
+               help='Default notification level for outgoing notifications'),
37
+    cfg.StrOpt('default_publisher_id',
38
+               default='$host',
39
+               help='Default publisher_id for outgoing notifications'),
40
+]
41
+
42
+CONF = cfg.CONF
43
+CONF.register_opts(notifier_opts)
44
+
45
+WARN = 'WARN'
46
+INFO = 'INFO'
47
+ERROR = 'ERROR'
48
+CRITICAL = 'CRITICAL'
49
+DEBUG = 'DEBUG'
50
+
51
+log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
52
+
53
+
54
+class BadPriorityException(Exception):
55
+    pass
56
+
57
+
58
+def notify_decorator(name, fn):
59
+    """ decorator for notify which is used from utils.monkey_patch()
60
+
61
+        :param name: name of the function
62
+        :param function: - object of the function
63
+        :returns: function -- decorated function
64
+
65
+    """
66
+    def wrapped_func(*args, **kwarg):
67
+        body = {}
68
+        body['args'] = []
69
+        body['kwarg'] = {}
70
+        for arg in args:
71
+            body['args'].append(arg)
72
+        for key in kwarg:
73
+            body['kwarg'][key] = kwarg[key]
74
+
75
+        ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
76
+        notify(ctxt,
77
+               CONF.default_publisher_id,
78
+               name,
79
+               CONF.default_notification_level,
80
+               body)
81
+        return fn(*args, **kwarg)
82
+    return wrapped_func
83
+
84
+
85
+def publisher_id(service, host=None):
86
+    if not host:
87
+        host = CONF.host
88
+    return "%s.%s" % (service, host)
89
+
90
+
91
+def notify(context, publisher_id, event_type, priority, payload):
92
+    """Sends a notification using the specified driver
93
+
94
+    :param publisher_id: the source worker_type.host of the message
95
+    :param event_type:   the literal type of event (ex. Instance Creation)
96
+    :param priority:     patterned after the enumeration of Python logging
97
+                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
98
+    :param payload:       A python dictionary of attributes
99
+
100
+    Outgoing message format includes the above parameters, and appends the
101
+    following:
102
+
103
+    message_id
104
+      a UUID representing the id for this notification
105
+
106
+    timestamp
107
+      the GMT timestamp the notification was sent at
108
+
109
+    The composite message will be constructed as a dictionary of the above
110
+    attributes, which will then be sent via the transport mechanism defined
111
+    by the driver.
112
+
113
+    Message example::
114
+
115
+        {'message_id': str(uuid.uuid4()),
116
+         'publisher_id': 'compute.host1',
117
+         'timestamp': timeutils.utcnow(),
118
+         'priority': 'WARN',
119
+         'event_type': 'compute.create_instance',
120
+         'payload': {'instance_id': 12, ... }}
121
+
122
+    """
123
+    if priority not in log_levels:
124
+        raise BadPriorityException(
125
+            _('%s not in valid priorities') % priority)
126
+
127
+    # Ensure everything is JSON serializable.
128
+    payload = jsonutils.to_primitive(payload, convert_instances=True)
129
+
130
+    msg = dict(message_id=str(uuid.uuid4()),
131
+               publisher_id=publisher_id,
132
+               event_type=event_type,
133
+               priority=priority,
134
+               payload=payload,
135
+               timestamp=str(timeutils.utcnow()))
136
+
137
+    for driver in _get_drivers():
138
+        try:
139
+            driver.notify(context, msg)
140
+        except Exception as e:
141
+            LOG.exception(_("Problem '%(e)s' attempting to "
142
+                            "send to notification system. "
143
+                            "Payload=%(payload)s")
144
+                          % dict(e=e, payload=payload))
145
+
146
+
147
+_drivers = None
148
+
149
+
150
+def _get_drivers():
151
+    """Instantiate, cache, and return drivers based on the CONF."""
152
+    global _drivers
153
+    if _drivers is None:
154
+        _drivers = {}
155
+        for notification_driver in CONF.notification_driver:
156
+            add_driver(notification_driver)
157
+
158
+    return _drivers.values()
159
+
160
+
161
+def add_driver(notification_driver):
162
+    """Add a notification driver at runtime."""
163
+    # Make sure the driver list is initialized.
164
+    _get_drivers()
165
+    if isinstance(notification_driver, basestring):
166
+        # Load and add
167
+        try:
168
+            driver = importutils.import_module(notification_driver)
169
+            _drivers[notification_driver] = driver
170
+        except ImportError:
171
+            LOG.exception(_("Failed to load notifier %s. "
172
+                            "These notifications will not be sent.") %
173
+                          notification_driver)
174
+    else:
175
+        # Driver is already loaded; just add the object.
176
+        _drivers[notification_driver] = notification_driver
177
+
178
+
179
+def _reset_drivers():
180
+    """Used by unit tests to reset the drivers."""
181
+    global _drivers
182
+    _drivers = None

+ 35
- 0
conductor/conductor/openstack/common/notifier/log_notifier.py View File

@@ -0,0 +1,35 @@
1
+# Copyright 2011 OpenStack Foundation.
2
+# All Rights Reserved.
3
+#
4
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
+#    not use this file except in compliance with the License. You may obtain
6
+#    a copy of the License at
7
+#
8
+#         http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+#    Unless required by applicable law or agreed to in writing, software
11
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+#    License for the specific language governing permissions and limitations
14
+#    under the License.
15
+
16
+from oslo.config import cfg
17
+
18
+from conductor.openstack.common import jsonutils
19
+from conductor.openstack.common import log as logging
20
+
21
+
22
+CONF = cfg.CONF
23
+
24
+
25
+def notify(_context, message):
26
+    """Notifies the recipient of the desired event given the model.
27
+    Log notifications using openstack's default logging system"""
28
+
29
+    priority = message.get('priority',
30
+                           CONF.default_notification_level)
31
+    priority = priority.lower()
32
+    logger = logging.getLogger(
33
+        'conductor.openstack.common.notification.%s' %
34
+        message['event_type'])
35
+    getattr(logger, priority)(jsonutils.dumps(message))

windc/windc/adapters/openstack.py → conductor/conductor/openstack/common/notifier/no_op_notifier.py View File

@@ -1,6 +1,4 @@
1
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
-
3
-# Copyright 2011 OpenStack LLC.
1
+# Copyright 2011 OpenStack Foundation.
4 2
 # All Rights Reserved.
5 3
 #
6 4
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,5 +13,7 @@
15 13
 #    License for the specific language governing permissions and limitations
16 14
 #    under the License.
17 15
 
18
-from heatclient import Client
19 16
 
17
+def notify(_context, message):
18
+    """Notifies the recipient of the desired event given the model"""
19
+    pass

+ 46
- 0
conductor/conductor/openstack/common/notifier/rpc_notifier.py View File

@@ -0,0 +1,46 @@
1
+# Copyright 2011 OpenStack Foundation.
2
+# All Rights Reserved.
3
+#
4
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
+#    not use this file except in compliance with the License. You may obtain
6
+#    a copy of the License at
7
+#
8
+#         http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+#    Unless required by applicable law or agreed to in writing, software
11
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+#    License for the specific language governing permissions and limitations
14
+#    under the License.
15
+
16
+from oslo.config import cfg
17
+
18
+from conductor.openstack.common import context as req_context
19
+from conductor.openstack.common.gettextutils import _
20
+from conductor.openstack.common import log as logging
21
+from conductor.openstack.common import rpc
22
+
23
+LOG = logging.getLogger(__name__)
24
+
25
+notification_topic_opt = cfg.ListOpt(
26
+    'notification_topics', default=['notifications', ],
27
+    help='AMQP topic used for openstack notifications')
28
+
29
+CONF = cfg.CONF
30
+CONF.register_opt(notification_topic_opt)
31
+
32
+
33
+def notify(context, message):
34
+    """Sends a notification via RPC"""
35
+    if not context:
36
+        context = req_context.get_admin_context()
37
+    priority = message.get('priority',
38
+                           CONF.default_notification_level)
39
+    priority = priority.lower()
40
+    for topic in CONF.notification_topics:
41
+        topic = '%s.%s' % (topic, priority)
42
+        try:
43
+            rpc.notify(context, topic, message)
44
+        except Exception:
45
+            LOG.exception(_("Could not send notification to %(topic)s. "
46
+                            "Payload=%(message)s"), locals())

+ 52
- 0
conductor/conductor/openstack/common/notifier/rpc_notifier2.py View File

@@ -0,0 +1,52 @@
1
+# Copyright 2011 OpenStack Foundation.
2
+# All Rights Reserved.
3
+#
4
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
+#    not use this file except in compliance with the License. You may obtain
6
+#    a copy of the License at
7
+#
8
+#         http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+#    Unless required by applicable law or agreed to in writing, software
11
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+#    License for the specific language governing permissions and limitations
14
+#    under the License.
15
+
16
+'''messaging based notification driver, with message envelopes'''
17
+
18
+from oslo.config import cfg
19
+
20
+from conductor.openstack.common import context as req_context
21
+from conductor.openstack.common.gettextutils import _
22
+from conductor.openstack.common import log as logging
23
+from conductor.openstack.common import rpc
24
+
25
+LOG = logging.getLogger(__name__)
26
+
27
+notification_topic_opt = cfg.ListOpt(
28
+    'topics', default=['notifications', ],
29
+    help='AMQP topic(s) used for openstack notifications')
30
+
31
+opt_group = cfg.OptGroup(name='rpc_notifier2',
32
+                         title='Options for rpc_notifier2')
33
+
34
+CONF = cfg.CONF
35
+CONF.register_group(opt_group)
36
+CONF.register_opt(notification_topic_opt, opt_group)
37
+
38
+
39
+def notify(context, message):
40
+    """Sends a notification via RPC"""
41
+    if not context:
42
+        context = req_context.get_admin_context()
43
+    priority = message.get('priority',
44
+                           CONF.default_notification_level)
45
+    priority = priority.lower()
46
+    for topic in CONF.rpc_notifier2.topics:
47
+        topic = '%s.%s' % (topic, priority)
48
+        try:
49
+            rpc.notify(context, topic, message, envelope=True)
50
+        except Exception:
51
+            LOG.exception(_("Could not send notification to %(topic)s. "
52
+                            "Payload=%(message)s"), locals())

windc/openstack/common/__init__.py → conductor/conductor/openstack/common/notifier/test_notifier.py View File

@@ -1,6 +1,4 @@
1
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
-
3
-# Copyright 2011 OpenStack LLC.
1
+# Copyright 2011 OpenStack Foundation.
4 2
 # All Rights Reserved.
5 3
 #
6 4
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,5 +13,10 @@
15 13
 #    License for the specific language governing permissions and limitations
16 14
 #    under the License.
17 15
 
18
-# TODO(jaypipes) Code in this module is intended to be ported to the eventual 
19
-#                openstack-common library
16
+
17
+NOTIFICATIONS = []
18
+
19
+
20
+def notify(_context, message):
21
+    """Test notifier, stores notifications in memory for unittests."""
22
+    NOTIFICATIONS.append(message)

+ 332