Browse Source

re-factored the processor

Change-Id: I4d06b563b5170e72cb5f691fb569c4f77d74539c
changes/71/307071/1
Kanagaraj Manickam 3 years ago
parent
commit
e8f5479949

+ 51
- 0
README.rst View File

@@ -12,3 +12,54 @@ Features
12 12
 --------
13 13
 
14 14
 * Automatic discovery of OpenStack deployment architecture
15
+
16
+How to setup db
17
+----------------
18
+* create the 'namos' db using below command
19
+
20
+  `create database namos`
21
+
22
+* update database.connection in /etc/namos/namos.conf with db username and
23
+  password
24
+
25
+* Run the below command to sync the namos schema
26
+
27
+  `namos-manage create_schema`
28
+
29
+How to setup namos
30
+------------------
31
+* Assume, namos is cloned at /opt/stack/namos, then run below command to
32
+  install namos from this directory.
33
+
34
+  `sudo python setup.py install`
35
+
36
+How to run namos
37
+-----------------
38
+* namos-api - Namos API starts to listen on port 9999. Now it does have support
39
+  for keystone authendication
40
+
41
+  `namos-api`
42
+
43
+* namos-manager - Namos backend service, to configured the number of workers,
44
+  update os_manager->workers
45
+
46
+ `namos-manager --config-file=/etc/namos/namos.conf`
47
+
48
+NOTE: Before running the namos-manager, please add os-namos agent in the
49
+console scripts of respective service components.
50
+
51
+To find the 360 view of OpenStack deployment
52
+--------------------------------------------
53
+Run http://localhost:8888/v1/view_360
54
+
55
+It provides 360 degree view under region->service_node in the response. In
56
+addition, gives the current live status of each servicec components.
57
+
58
+To find the status of components
59
+--------------------------------
60
+Run the below command
61
+
62
+`namos-manage status`
63
+
64
+NOTE: This command supports to query status based on given node name, node type
65
+, service and component. To find more details run this command with --help

+ 1
- 1
namos/cmd/conductor.py View File

@@ -16,7 +16,7 @@
16 16
 #    under the License.
17 17
 
18 18
 """
19
-The Namos Infra Management Service
19
+The Namos Manager
20 20
 """
21 21
 
22 22
 import eventlet

+ 1
- 1
namos/cmd/manage.py View File

@@ -21,7 +21,6 @@ from namos.common import exception
21 21
 from namos.common import utils
22 22
 
23 23
 from namos.db import api
24
-from namos.db import sample
25 24
 from namos.db.sqlalchemy import migration
26 25
 
27 26
 
@@ -154,6 +153,7 @@ class DBCommand(object):
154 153
         migration.history()
155 154
 
156 155
     def demo_data(self):
156
+        from namos.db import sample
157 157
         if CONF.command.purge:
158 158
             sample.purge_demo_data()
159 159
         else:

namos/common/generator.py.bkup → namos/common/generator.py View File

@@ -405,7 +405,7 @@ def _append_opts_json(f, group, namespaces):
405 405
             f[group][namespace][opt.name]['deprecated'] = []
406 406
             for d in opt.deprecated_opts:
407 407
                 f[group][namespace][opt.name]['deprecated'].append(
408
-                 (d.group or 'DEFAULT', d.name or opt.dest))
408
+                    (d.group or 'DEFAULT', d.name or opt.dest))
409 409
 
410 410
             f[group][namespace][opt.name][
411 411
                 'deprecated_for_removal'] = opt.deprecated_for_removal

+ 256
- 0
namos/conductor/config_processor.py View File

@@ -0,0 +1,256 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+from oslo_config import cfg
16
+from oslo_log import log
17
+
18
+from namos.common import exception
19
+from namos.common import utils
20
+from namos.db import api as db_api
21
+
22
+LOG = log.getLogger(__name__)
23
+
24
+CONF = cfg.CONF
25
+
26
+
27
+class ConfigProcessor(object):
28
+    def __init__(self, context, manager, registration_info, service_worker_id):
29
+        self.context = context
30
+        self.manager = manager
31
+        self.registration_info = registration_info
32
+        self.service_worker_id = service_worker_id
33
+        self.service_component_id = db_api.service_worker_get(
34
+            self.context,
35
+            self.service_worker_id).service_component_id
36
+        sc = db_api.service_component_get(
37
+            self.context,
38
+            self.service_component_id
39
+        )
40
+        self.service_node_id = sc.node_id
41
+        self.project = db_api.service_get(self.context, sc.service_id).name
42
+
43
+    def file_to_configs(self, file_content):
44
+        import uuid
45
+        tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4())
46
+        with open(tmp_file_path, 'w') as file:
47
+            file.write(file_content)
48
+
49
+        conf_dict = utils.file_to_configs(tmp_file_path)
50
+
51
+        import os
52
+        os.remove(tmp_file_path)
53
+
54
+        return conf_dict
55
+
56
+    def _form_config_name(self, group, key):
57
+        return '%s.%s' % (group, key)
58
+
59
+    def process_config_files(self):
60
+        # config file
61
+        conf_name_to_file_id = dict()
62
+        for cfg_f in self.registration_info['config_file_dict'].keys():
63
+            try:
64
+                config_file = db_api.config_file_create(
65
+                    self.context,
66
+                    dict(name=cfg_f,
67
+                         file=self.registration_info[
68
+                             'config_file_dict'][cfg_f],
69
+                         service_node_id=self.service_node_id))
70
+                LOG.info('Oslo config file %s is created' % config_file)
71
+            except exception.AlreadyExist:
72
+                config_files = \
73
+                    db_api.config_file_get_by_name_for_service_node(
74
+                        self.context,
75
+                        service_node_id=self.service_node_id,
76
+                        name=cfg_f
77
+                    )
78
+                if len(config_files) == 1:
79
+                    config_file = \
80
+                        db_api.config_file_update(
81
+                            self.context,
82
+                            config_files[0].id,
83
+                            dict(file=self.registration_info[
84
+                                'config_file_dict'][cfg_f]))
85
+                    LOG.info('Oslo config file %s is existing and is updated'
86
+                             % config_file)
87
+
88
+            config_dict = self.file_to_configs(
89
+                config_file.file
90
+            )
91
+
92
+            # config file entry
93
+            for grp, keys in config_dict.items():
94
+                for key, value in keys.items():
95
+                    # find config schema
96
+                    cfg_schs = db_api.config_schema_get_by(
97
+                        context=self.context,
98
+                        group=grp,
99
+                        name=key,
100
+                        project=self.project
101
+                    )
102
+
103
+                    cfg_sche = None
104
+                    if len(cfg_schs) == 0:
105
+                        LOG.debug("[%s] No Config Schema is existing, so "
106
+                                  "no schema is associated for Config Entry "
107
+                                  "%s::%s" %
108
+                                  (self.service_component_id,
109
+                                   grp,
110
+                                   key))
111
+                    elif len(cfg_schs) > 1:
112
+                        LOG.debug("[%s] More than one Config Schema is "
113
+                                  "existing, so no schema is associated for "
114
+                                  "Config Entry %s::%s" %
115
+                                  (self.service_component_id,
116
+                                   grp,
117
+                                   key))
118
+                    else:
119
+                        cfg_sche = cfg_schs[0]
120
+                        LOG.debug("[%s] Config Schema %s is existing and is "
121
+                                  "used to associated for Config Entry"
122
+                                  " %s::%s" %
123
+                                  (self.service_component_id,
124
+                                   cfg_sche.id,
125
+                                   grp,
126
+                                   key))
127
+
128
+                    # config file entry
129
+                    cfg_name = self._form_config_name(grp, key)
130
+
131
+                    cfg_obj_ = dict(
132
+                        service_component_id=self.service_component_id,
133
+                        name=cfg_name,
134
+                        value=value,
135
+                        oslo_config_schema_id=cfg_sche.id if
136
+                        cfg_sche else None,
137
+                        oslo_config_file_id=config_file.id
138
+                    )
139
+
140
+                    try:
141
+                        config = db_api.config_file_entry_create(
142
+                            self.context,
143
+                            cfg_obj_)
144
+                        LOG.debug("Config Entry %s is created" % config)
145
+                    except exception.AlreadyExist:
146
+                        configs = db_api.config_file_entry_get_all_by(
147
+                            self.context,
148
+                            service_component_id=cfg_obj_[
149
+                                'service_component_id'],
150
+                            oslo_config_file_id=config_file.id,
151
+                            name=cfg_obj_['name'])
152
+                        if len(configs) == 1:
153
+                            config = db_api.config_file_entry_update(
154
+                                self.context,
155
+                                configs[0].id,
156
+                                cfg_obj_)
157
+                            LOG.debug("Config Entry %s is existing and is "
158
+                                      "updated" % config)
159
+
160
+                    conf_name_to_file_id[cfg_name] = config.id
161
+
162
+        return conf_name_to_file_id
163
+
164
+    def process_configs(self):
165
+        conf_name_to_file_id = self.process_config_files()
166
+        # Config
167
+        for cfg_obj in self.registration_info['config_list']:
168
+            # This format is used by DriverProcessor
169
+            cfg_name = self._form_config_name(cfg_obj['group'],
170
+                                              cfg_obj['name'])
171
+
172
+            if not conf_name_to_file_id.get(cfg_name):
173
+                cfg_schm_id = None
174
+                cfg_f_entry = None
175
+
176
+                # find config schema
177
+                # ignore the config file_name right now !!, assumed conf unique
178
+                # across the service wth given group and name
179
+                cfg_schs = db_api.config_schema_get_by(
180
+                    context=self.context,
181
+                    group=cfg_obj['group'],
182
+                    name=cfg_obj['name'],
183
+                    project=self.project
184
+                )
185
+
186
+                if len(cfg_schs) == 0:
187
+                    LOG.debug("[%s] No Config Schema is existing, so "
188
+                              "no schema is associated for Config %s::%s" %
189
+                              (self.service_worker_id,
190
+                               cfg_obj['group'],
191
+                               cfg_obj['name']))
192
+                elif len(cfg_schs) > 1:
193
+                    LOG.debug("[%s] More than one Config Schema is existing, "
194
+                              "so no schema is associated for Config %s::%s" %
195
+                              (self.service_worker_id,
196
+                               cfg_obj['group'],
197
+                               cfg_obj['name']))
198
+                else:
199
+                    # try:
200
+                    #     cfg_sche = db_api.config_schema_create(
201
+                    #         self.context,
202
+                    #         dict(
203
+                    #             namespace='UNKNOWN-tagged-by-NAMOS',
204
+                    #             default_value=cfg_obj['default_value'],
205
+                    #             type=cfg_obj['type'],
206
+                    #             help=cfg_obj['help'],
207
+                    #             required=cfg_obj['required'],
208
+                    #             secret=cfg_obj['secret'],
209
+                    #             mutable=False,
210
+                    #             group_name=cfg_obj['group'],
211
+                    #             name=cfg_obj['name']
212
+                    #         )
213
+                    #     )
214
+                    #     LOG.info("Config Schema %s is created" % cfg_sche)
215
+                    # except exception.AlreadyExist:
216
+                    #     cfg_schs = db_api.config_schema_get_by(
217
+                    #         context=self.context,
218
+                    #         group=cfg_obj['group'],
219
+                    #         name=cfg_obj['name'],
220
+                    #         namespace='UNKNOWN-tagged-by-NAMOS'
221
+                    #     )
222
+
223
+                    cfg_sche = cfg_schs[0]
224
+                    LOG.debug("[%s] Config Schema %s is existing and is used "
225
+                              "for Config %s::%s" %
226
+                              (self.service_worker_id,
227
+                               cfg_sche.id,
228
+                               cfg_obj['group'],
229
+                               cfg_obj['name']))
230
+                    cfg_schm_id = cfg_sche.id
231
+            else:
232
+                cfg_schm_id = None
233
+                cfg_f_entry = conf_name_to_file_id[cfg_name]
234
+
235
+            cfg_obj_ = dict(
236
+                service_worker_id=self.service_worker_id,
237
+                name=cfg_name,
238
+                value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[
239
+                    'default_value'],
240
+                oslo_config_schema_id=cfg_schm_id,
241
+                oslo_config_file_entry_id=cfg_f_entry
242
+            )
243
+
244
+            try:
245
+                config = db_api.config_create(self.context, cfg_obj_)
246
+                LOG.debug("Config %s is created" % config)
247
+            except exception.AlreadyExist:
248
+                configs = db_api.config_get_by_name_for_service_worker(
249
+                    self.context,
250
+                    service_worker_id=cfg_obj_['service_worker_id'],
251
+                    name=cfg_obj_['name'])
252
+                if len(configs) == 1:
253
+                    config = db_api.config_update(self.context,
254
+                                                  configs[0].id,
255
+                                                  cfg_obj_)
256
+                    LOG.debug("Config %s is existing and is updated" % config)

+ 51
- 656
namos/conductor/manager.py View File

@@ -20,11 +20,13 @@ from oslo_log import log
20 20
 from oslo_utils import timeutils
21 21
 
22 22
 from namos.common import config as namos_config
23
-from namos.common import exception
24 23
 from namos.common import messaging
25 24
 from namos.common import utils
25
+from namos.conductor.config_processor import ConfigProcessor
26
+from namos.conductor.namespace_processor import NamespaceProcessor
27
+from namos.conductor.region_processor import RegionProcessor
28
+from namos.conductor.service_processor import ServiceProcessor
26 29
 from namos.db import api as db_api
27
-from namos.db import openstack_drivers
28 30
 
29 31
 LOG = log.getLogger(__name__)
30 32
 
@@ -48,6 +50,49 @@ class ConductorManager(object):
48 50
     RPC_API_VERSION = '1.0'
49 51
     TOPIC = namos_config.MESSAGE_QUEUE_CONDUCTOR_TOPIC
50 52
 
53
+    def _regisgration_ackw(self, context, identification):
54
+        client = messaging.get_rpc_client(
55
+            topic=self._os_namos_listener_topic(identification),
56
+            version=self.RPC_API_VERSION,
57
+            exchange=namos_config.PROJECT_NAME)
58
+        client.cast(context,
59
+                    'regisgration_ackw',
60
+                    identification=identification)
61
+        LOG.info("REGISTER [%s] ACK" % identification)
62
+
63
+    def _os_namos_listener_topic(self, identification):
64
+        return 'namos.CONF.%s' % identification
65
+
66
+    def _ping(self, context, identification):
67
+        client = messaging.get_rpc_client(
68
+            topic=self._os_namos_listener_topic(identification),
69
+            version=self.RPC_API_VERSION,
70
+            exchange=namos_config.PROJECT_NAME,
71
+            timeout=1)
72
+        try:
73
+            client.call(context,
74
+                        'ping_me',
75
+                        identification=identification)
76
+
77
+            LOG.info("PING [%s] SUCCESSFUL" % identification)
78
+            return True
79
+        except:  # noqa
80
+            LOG.info("PING [%s] FAILED" % identification)
81
+            return False
82
+
83
+    def _update_config_file(self, context, identification, name, content):
84
+        client = messaging.get_rpc_client(
85
+            topic=self._os_namos_listener_topic(identification),
86
+            version=self.RPC_API_VERSION,
87
+            exchange=namos_config.PROJECT_NAME,
88
+            timeout=2)
89
+        client.call(context,
90
+                    'update_config_file',
91
+                    identification=identification,
92
+                    name=name,
93
+                    content=content)
94
+        LOG.info("CONF FILE [%s] UPDATE [%s] DONE" % (name, identification))
95
+
51 96
     @request_context
52 97
     def add_region(self, context, region):
53 98
         return db_api.region_create(context, region)
@@ -120,10 +165,10 @@ class ConductorManager(object):
120 165
         cp.process_configs()
121 166
         #  Device Driver processing
122 167
         # TODO(mrkanag) if this to be per service component??
123
-        dp = DriverProcessor(context,
124
-                             self,
125
-                             service_worker_id,
126
-                             region_id)
168
+        dp = NamespaceProcessor(context,
169
+                                self,
170
+                                service_worker_id,
171
+                                region_id)
127 172
         dp.process_drivers()
128 173
 
129 174
         self._regisgration_ackw(context,
@@ -140,49 +185,6 @@ class ConductorManager(object):
140 185
         sp.cleanup(service_component_id)
141 186
         return service_worker_id
142 187
 
143
-    def _regisgration_ackw(self, context, identification):
144
-        client = messaging.get_rpc_client(
145
-            topic=self._os_namos_listener_topic(identification),
146
-            version=self.RPC_API_VERSION,
147
-            exchange=namos_config.PROJECT_NAME)
148
-        client.cast(context,
149
-                    'regisgration_ackw',
150
-                    identification=identification)
151
-        LOG.info("REGISTER [%s] ACK" % identification)
152
-
153
-    def _os_namos_listener_topic(self, identification):
154
-        return 'namos.CONF.%s' % identification
155
-
156
-    def _ping(self, context, identification):
157
-        client = messaging.get_rpc_client(
158
-            topic=self._os_namos_listener_topic(identification),
159
-            version=self.RPC_API_VERSION,
160
-            exchange=namos_config.PROJECT_NAME,
161
-            timeout=1)
162
-        try:
163
-            client.call(context,
164
-                        'ping_me',
165
-                        identification=identification)
166
-
167
-            LOG.info("PING [%s] SUCCESSFUL" % identification)
168
-            return True
169
-        except:  # noqa
170
-            LOG.info("PING [%s] FAILED" % identification)
171
-            return False
172
-
173
-    def _update_config_file(self, context, identification, name, content):
174
-        client = messaging.get_rpc_client(
175
-            topic=self._os_namos_listener_topic(identification),
176
-            version=self.RPC_API_VERSION,
177
-            exchange=namos_config.PROJECT_NAME,
178
-            timeout=2)
179
-        client.call(context,
180
-                    'update_config_file',
181
-                    identification=identification,
182
-                    name=name,
183
-                    content=content)
184
-        LOG.info("CONF FILE [%s] UPDATE [%s] DONE" % (name, identification))
185
-
186 188
     @request_context
187 189
     def heart_beat(self, context, identification, dieing=False):
188 190
         try:
@@ -339,610 +341,3 @@ class ConductorManager(object):
339 341
                     cfg_s.name]['entries'] = cfg_es
340 342
 
341 343
         return file_schema
342
-
343
-
344
-class RegionProcessor(object):
345
-    def __init__(self,
346
-                 context,
347
-                 manager,
348
-                 registration_info):
349
-        self.registration_info = registration_info
350
-        self.manager = manager
351
-        self.context = context
352
-
353
-    def process_region(self):
354
-        # region
355
-        # If region is not provided, make it as belongs to namos's region
356
-        if not self.registration_info.get('region_name'):
357
-            self.registration_info[
358
-                'region_name'] = cfg.CONF.os_namos.region_name
359
-
360
-        try:
361
-            region = db_api.region_create(
362
-                self.context,
363
-                dict(name=self.registration_info.get('region_name'))
364
-            )
365
-            LOG.info('Region %s is created' % region)
366
-        except exception.AlreadyExist:
367
-            region = db_api.region_get_by_name(
368
-                self.context,
369
-                name=self.registration_info.get('region_name')
370
-            )
371
-            LOG.info('Region %s is existing' % region)
372
-
373
-        return region.id
374
-
375
-
376
-class ServiceProcessor(object):
377
-    def __init__(self,
378
-                 context,
379
-                 manager,
380
-                 region_id,
381
-                 registration_info):
382
-        self.registration_info = registration_info
383
-        self.manager = manager
384
-        self.context = context
385
-        self.region_id = region_id
386
-
387
-    def process_service(self):
388
-        # Service Node
389
-        try:
390
-            # TODO(mrkanag) user proper node name instead of fqdn
391
-            node = db_api.service_node_create(
392
-                self.context,
393
-                dict(name=self.registration_info.get('fqdn'),
394
-                     fqdn=self.registration_info.get('fqdn'),
395
-                     region_id=self.region_id,
396
-                     extra={'ips': self.registration_info.get('ips')}))
397
-            LOG.info('Service node %s is created' % node)
398
-        except exception.AlreadyExist:
399
-            # TODO(mrkanag) is this to be region specifc search
400
-            node = db_api.service_node_get_by_name(
401
-                self.context,
402
-                self.registration_info.get('fqdn'))
403
-            LOG.info('Service node %s is existing' % node)
404
-
405
-        # Service
406
-        try:
407
-            s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591'
408
-            service = db_api.service_create(
409
-                self.context,
410
-                # TODO(mrkanag) use keystone python client and
411
-                # use real service id here
412
-                dict(name=self.registration_info.get('project_name'),
413
-                     keystone_service_id=s_id))
414
-
415
-            LOG.info('Service %s is created' % service)
416
-        except exception.AlreadyExist:
417
-            service = db_api.service_get_by_name(
418
-                self.context,
419
-                self.registration_info.get('project_name'))
420
-            LOG.info('Service %s is existing' % service)
421
-
422
-        # Service Component
423
-        try:
424
-            service_component = db_api.service_component_create(
425
-                self.context,
426
-                dict(name=self.registration_info['prog_name'],
427
-                     node_id=node.id,
428
-                     service_id=service.id,
429
-                     type=namos_config.find_type(self.registration_info[
430
-                         'prog_name'])))
431
-            LOG.info('Service Component %s is created' % service_component)
432
-        except exception.AlreadyExist:
433
-            service_components = \
434
-                db_api.service_component_get_all_by_node_for_service(
435
-                    self.context,
436
-                    node_id=node.id,
437
-                    service_id=service.id,
438
-                    name=self.registration_info['prog_name']
439
-                )
440
-            if len(service_components) == 1:
441
-                service_component = service_components[0]
442
-                LOG.info('Service Component %s is existing' %
443
-                         service_component)
444
-            # TODO(mrkanag) what to do when service_components size is > 1
445
-
446
-        # Service Worker
447
-        try:
448
-            service_worker = db_api.service_worker_create(
449
-                self.context,
450
-                # TODO(mrkanag) Fix the name, device driver proper !
451
-                dict(name='%s@%s' % (service_component.name,
452
-                                     self.registration_info['pid']),
453
-                     pid=self.registration_info['identification'],
454
-                     host=self.registration_info['host'],
455
-                     service_component_id=service_component.id,
456
-                     deleted_at=None,
457
-                     is_launcher=self.registration_info['i_am_launcher']
458
-                     ))
459
-            LOG.info('Service Worker %s is created' % service_worker)
460
-        except exception.AlreadyExist:
461
-            service_worker = db_api.service_worker_get_all_by(
462
-                self.context,
463
-                pid=self.registration_info['identification'],
464
-                service_component_id=service_component.id
465
-            )[0]
466
-            LOG.info('Service Worker %s is existing' %
467
-                     service_worker)
468
-
469
-        return service_component.id, service_worker.id
470
-
471
-    def cleanup(self, service_component_id):
472
-        # clean up the dead service workers
473
-        db_api.cleanup(self.context, service_component_id)
474
-
475
-
476
-class ConfigProcessor(object):
477
-    def __init__(self, context, manager, registration_info, service_worker_id):
478
-        self.context = context
479
-        self.manager = manager
480
-        self.registration_info = registration_info
481
-        self.service_worker_id = service_worker_id
482
-        self.service_component_id = db_api.service_worker_get(
483
-            self.context,
484
-            self.service_worker_id).service_component_id
485
-        sc = db_api.service_component_get(
486
-            self.context,
487
-            self.service_component_id
488
-        )
489
-        self.service_node_id = sc.node_id
490
-        self.project = db_api.service_get(self.context, sc.service_id).name
491
-
492
-    def file_to_configs(self, file_content):
493
-        import uuid
494
-        tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4())
495
-        with open(tmp_file_path, 'w') as file:
496
-            file.write(file_content)
497
-
498
-        conf_dict = utils.file_to_configs(tmp_file_path)
499
-
500
-        import os
501
-        os.remove(tmp_file_path)
502
-
503
-        return conf_dict
504
-
505
-    def _form_config_name(self, group, key):
506
-        return '%s.%s' % (group, key)
507
-
508
-    def process_config_files(self):
509
-        # config file
510
-        conf_name_to_file_id = dict()
511
-        for cfg_f in self.registration_info['config_file_dict'].keys():
512
-            try:
513
-                config_file = db_api.config_file_create(
514
-                    self.context,
515
-                    dict(name=cfg_f,
516
-                         file=self.registration_info[
517
-                             'config_file_dict'][cfg_f],
518
-                         service_node_id=self.service_node_id))
519
-                LOG.info('Oslo config file %s is created' % config_file)
520
-            except exception.AlreadyExist:
521
-                config_files = \
522
-                    db_api.config_file_get_by_name_for_service_node(
523
-                        self.context,
524
-                        service_node_id=self.service_node_id,
525
-                        name=cfg_f
526
-                    )
527
-                if len(config_files) == 1:
528
-                    config_file = \
529
-                        db_api.config_file_update(
530
-                            self.context,
531
-                            config_files[0].id,
532
-                            dict(file=self.registration_info[
533
-                                'config_file_dict'][cfg_f]))
534
-                    LOG.info('Oslo config file %s is existing and is updated'
535
-                             % config_file)
536
-
537
-            config_dict = self.file_to_configs(
538
-                config_file.file
539
-            )
540
-
541
-            # config file entry
542
-            for grp, keys in config_dict.items():
543
-                for key, value in keys.items():
544
-                    # find config schema
545
-                    cfg_schs = db_api.config_schema_get_by(
546
-                        context=self.context,
547
-                        group=grp,
548
-                        name=key,
549
-                        project=self.project
550
-                    )
551
-
552
-                    cfg_sche = None
553
-                    if len(cfg_schs) == 0:
554
-                        LOG.debug("[%s] No Config Schema is existing, so "
555
-                                  "no schema is associated for Config Entry "
556
-                                  "%s::%s" %
557
-                                  (self.service_component_id,
558
-                                   grp,
559
-                                   key))
560
-                    elif len(cfg_schs) > 1:
561
-                        LOG.debug("[%s] More than one Config Schema is "
562
-                                  "existing, so no schema is associated for "
563
-                                  "Config Entry %s::%s" %
564
-                                  (self.service_component_id,
565
-                                   grp,
566
-                                   key))
567
-                    else:
568
-                        cfg_sche = cfg_schs[0]
569
-                        LOG.debug("[%s] Config Schema %s is existing and is "
570
-                                  "used to associated for Config Entry"
571
-                                  " %s::%s" %
572
-                                  (self.service_component_id,
573
-                                   cfg_sche.id,
574
-                                   grp,
575
-                                   key))
576
-
577
-                    # config file entry
578
-                    cfg_name = self._form_config_name(grp, key)
579
-
580
-                    cfg_obj_ = dict(
581
-                        service_component_id=self.service_component_id,
582
-                        name=cfg_name,
583
-                        value=value,
584
-                        oslo_config_schema_id=cfg_sche.id if
585
-                        cfg_sche else None,
586
-                        oslo_config_file_id=config_file.id
587
-                    )
588
-
589
-                    try:
590
-                        config = db_api.config_file_entry_create(
591
-                            self.context,
592
-                            cfg_obj_)
593
-                        LOG.debug("Config Entry %s is created" % config)
594
-                    except exception.AlreadyExist:
595
-                        configs = db_api.config_file_entry_get_all_by(
596
-                            self.context,
597
-                            service_component_id=cfg_obj_[
598
-                                'service_component_id'],
599
-                            oslo_config_file_id=config_file.id,
600
-                            name=cfg_obj_['name'])
601
-                        if len(configs) == 1:
602
-                            config = db_api.config_file_entry_update(
603
-                                self.context,
604
-                                configs[0].id,
605
-                                cfg_obj_)
606
-                            LOG.debug("Config Entry %s is existing and is "
607
-                                      "updated" % config)
608
-
609
-                    conf_name_to_file_id[cfg_name] = config.id
610
-
611
-        return conf_name_to_file_id
612
-
613
-    def process_configs(self):
614
-        conf_name_to_file_id = self.process_config_files()
615
-        # Config
616
-        for cfg_obj in self.registration_info['config_list']:
617
-            # This format is used by DriverProcessor
618
-            cfg_name = self._form_config_name(cfg_obj['group'],
619
-                                              cfg_obj['name'])
620
-
621
-            if not conf_name_to_file_id.get(cfg_name):
622
-                cfg_schm_id = None
623
-                cfg_f_entry = None
624
-
625
-                # find config schema
626
-                # ignore the config file_name right now !!, assumed conf unique
627
-                # across the service wth given group and name
628
-                cfg_schs = db_api.config_schema_get_by(
629
-                    context=self.context,
630
-                    group=cfg_obj['group'],
631
-                    name=cfg_obj['name'],
632
-                    project=self.project
633
-                )
634
-
635
-                if len(cfg_schs) == 0:
636
-                    LOG.debug("[%s] No Config Schema is existing, so "
637
-                              "no schema is associated for Config %s::%s" %
638
-                              (self.service_worker_id,
639
-                               cfg_obj['group'],
640
-                               cfg_obj['name']))
641
-                elif len(cfg_schs) > 1:
642
-                    LOG.debug("[%s] More than one Config Schema is existing, "
643
-                              "so no schema is associated for Config %s::%s" %
644
-                              (self.service_worker_id,
645
-                               cfg_obj['group'],
646
-                               cfg_obj['name']))
647
-                else:
648
-                    # try:
649
-                    #     cfg_sche = db_api.config_schema_create(
650
-                    #         self.context,
651
-                    #         dict(
652
-                    #             namespace='UNKNOWN-tagged-by-NAMOS',
653
-                    #             default_value=cfg_obj['default_value'],
654
-                    #             type=cfg_obj['type'],
655
-                    #             help=cfg_obj['help'],
656
-                    #             required=cfg_obj['required'],
657
-                    #             secret=cfg_obj['secret'],
658
-                    #             mutable=False,
659
-                    #             group_name=cfg_obj['group'],
660
-                    #             name=cfg_obj['name']
661
-                    #         )
662
-                    #     )
663
-                    #     LOG.info("Config Schema %s is created" % cfg_sche)
664
-                    # except exception.AlreadyExist:
665
-                    #     cfg_schs = db_api.config_schema_get_by(
666
-                    #         context=self.context,
667
-                    #         group=cfg_obj['group'],
668
-                    #         name=cfg_obj['name'],
669
-                    #         namespace='UNKNOWN-tagged-by-NAMOS'
670
-                    #     )
671
-
672
-                    cfg_sche = cfg_schs[0]
673
-                    LOG.debug("[%s] Config Schema %s is existing and is used "
674
-                              "for Config %s::%s" %
675
-                              (self.service_worker_id,
676
-                               cfg_sche.id,
677
-                               cfg_obj['group'],
678
-                               cfg_obj['name']))
679
-                    cfg_schm_id = cfg_sche.id
680
-            else:
681
-                cfg_schm_id = None
682
-                cfg_f_entry = conf_name_to_file_id[cfg_name]
683
-
684
-            # config_file_entry_id = None
685
-            # for f_id, conf_groups in conf_name_to_file_id.items():
686
-            #     if cfg_obj['group'] in list(conf_groups):
687
-            #         if cfg_obj['name'] in list(conf_groups[cfg_obj[
688
-            #            'group']]):
689
-            #             config_entrys=db_api.config_file_entry_get_all_by(
690
-            #                 self.context,
691
-            #                 service_component_id=self.service_component_id,
692
-            #                 oslo_config_file_id=f_id,
693
-            #                 name=cfg_name)
694
-            #             if len(config_entrys) == 1:
695
-            #                 config_file_entry_id = config_entrys[0].id
696
-            #
697
-            #             break
698
-
699
-            cfg_obj_ = dict(
700
-                service_worker_id=self.service_worker_id,
701
-                name=cfg_name,
702
-                value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[
703
-                    'default_value'],
704
-                oslo_config_schema_id=cfg_schm_id,
705
-                oslo_config_file_entry_id=cfg_f_entry
706
-            )
707
-
708
-            try:
709
-                config = db_api.config_create(self.context, cfg_obj_)
710
-                LOG.debug("Config %s is created" % config)
711
-            except exception.AlreadyExist:
712
-                configs = db_api.config_get_by_name_for_service_worker(
713
-                    self.context,
714
-                    service_worker_id=cfg_obj_['service_worker_id'],
715
-                    name=cfg_obj_['name'])
716
-                if len(configs) == 1:
717
-                    config = db_api.config_update(self.context,
718
-                                                  configs[0].id,
719
-                                                  cfg_obj_)
720
-                    LOG.debug("Config %s is existing and is updated" % config)
721
-
722
-
723
-class DriverProcessor(object):
724
-    def __init__(self, context, manager, service_worker_id, region_id):
725
-        self.context = context
726
-        self.manager = manager
727
-        self.service_worker_id = service_worker_id
728
-        self.region_id = region_id
729
-        self.config_dict = self._get_config_dict()
730
-
731
-    def _get_config_dict(self):
732
-        conf_dict = {}
733
-        for c in db_api.config_get_by_name_for_service_worker(
734
-            self.context,
735
-            self.service_worker_id
736
-        ):
737
-            conf_dict[c.name] = c.to_dict()
738
-
739
-        return conf_dict
740
-
741
-    def _identify_drivers(self):
742
-        return (set(openstack_drivers.get_drivers_config().keys()) &
743
-                set(self.config_dict.keys()))
744
-
745
-    def _get_value(self, name):
746
-        if name is None:
747
-            return name
748
-
749
-        if isinstance(name, str):
750
-            # Constant naming
751
-            if name[0] == '#':
752
-                return name[1:]
753
-            return (self.config_dict[name].get('value'))
754
-        elif isinstance(name, tuple):
755
-            fn = name[0]
756
-            args = list()
757
-            for var in name[1:]:
758
-                args.append(self._get_value(var))
759
-            return fn(*args)
760
-        elif isinstance(name, list):
761
-            fmt_str = name[0]
762
-            params = [self._get_value(param) for param in name[1:]]
763
-            return fmt_str % tuple(params)
764
-
765
-    def process_drivers(self):
766
-        for driver_key in self._identify_drivers():
767
-            try:
768
-                drivers = self._get_value(driver_key)
769
-                drivers = utils._to_list(drivers)
770
-                for driver_name in drivers:
771
-                    self.process_driver(driver_key, driver_name)
772
-            except KeyError:  # noqa
773
-                # TODO(mrkanag) run namos-manager and restart nova-scheduler
774
-                # KeyError: 'libvirt.virt_type' is thrown, fix it
775
-                LOG.error('Failed to process driver %s in service worker %s' %
776
-                          (driver_key, self.service_worker_id))
777
-                continue
778
-
779
-    def process_driver(self, driver_key, driver_name):
780
-            driver_config = \
781
-                openstack_drivers.get_drivers_config()[driver_key][driver_name]
782
-
783
-            if driver_config.get('alias') is not None:
784
-                alias = driver_config.get('alias')
785
-                driver_config = \
786
-                    openstack_drivers.get_drivers_config()
787
-                for key in alias.split(':'):
788
-                    driver_config = driver_config[key]
789
-                driver_name = key
790
-
791
-            driver_def = \
792
-                openstack_drivers.get_drivers_def()[driver_name]
793
-
794
-            connection = dict()
795
-
796
-            endpoint_type = None
797
-            connection_cfg = None
798
-            device_endpoint_name = None
799
-            device_cfg = None
800
-            child_device_cfg = None
801
-
802
-            if driver_config.get('device') is not None:
803
-                device_cfg = driver_config['device']
804
-
805
-            if driver_config['endpoint'].get('type') is not None:
806
-                endpoint_type = driver_config['endpoint']['type']
807
-                if endpoint_type[0] != '#':
808
-                    endpoint_type = self._get_value(endpoint_type)
809
-
810
-                connection_cfg = driver_config['endpoint'][endpoint_type][
811
-                    'connection']
812
-                device_endpoint_name = self._get_value(
813
-                    driver_config['endpoint'][endpoint_type]['name'])
814
-                # override the device name
815
-                if driver_config['endpoint'][endpoint_type].get(
816
-                        'device') is not None:
817
-                    device_cfg = driver_config['endpoint'][endpoint_type][
818
-                        'device']
819
-                if driver_config['endpoint'][endpoint_type].get(
820
-                        'child_device') is not None:
821
-                    child_device_cfg = driver_config['endpoint'][
822
-                        endpoint_type]['child_device']
823
-            else:
824
-                endpoint_type = None
825
-                connection_cfg = driver_config['endpoint']['connection']
826
-                device_endpoint_name = self._get_value(
827
-                    driver_config['endpoint']['name']
828
-                )
829
-                # override the device name
830
-                if driver_config['endpoint'].get('device') is not None:
831
-                    device_cfg = driver_config['endpoint']['device']
832
-
833
-                if driver_config['endpoint'].get('child_device') is not None:
834
-                    child_device_cfg = driver_config['endpoint'][
835
-                        'child_device']
836
-
837
-            # Device
838
-            device_name = self._get_value(device_cfg['name'])
839
-            try:
840
-                # TODO(mrkanag) Set the right status
841
-                device = db_api.device_create(
842
-                    self.context,
843
-                    dict(name=device_name,
844
-                         status='active',
845
-                         region_id=self.region_id))
846
-
847
-                LOG.info('Device %s is created' % device)
848
-            except exception.AlreadyExist:
849
-                device = db_api.device_get_by_name(
850
-                    self.context,
851
-                    device_name)
852
-                LOG.info('Device %s is existing' % device)
853
-
854
-            # TODO(mrkanag) Poperly Handle child devices
855
-            if child_device_cfg is not None:
856
-                for d_name in self._get_value(child_device_cfg['key']):
857
-                    base_name = self._get_value(child_device_cfg['base_name'])
858
-                    d_name = '%s-%s' % (base_name, d_name)
859
-                    try:
860
-                        device = db_api.device_get_by_name(
861
-                            self.context,
862
-                            d_name)
863
-                        LOG.info('Device %s is existing' % device)
864
-                    except exception.DeviceNotFound:
865
-                        # TODO(mrkanag) region_id is hard-coded, fix it !
866
-                        # Set the right status as well
867
-                        r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0'
868
-                        device = db_api.device_create(
869
-                            self.context,
870
-                            dict(name=d_name,
871
-                                 status='active',
872
-                                 parent_id=device.id,
873
-                                 region_id=r_id))
874
-
875
-                LOG.info('Device %s is created' % device)
876
-
877
-            # Device Endpoint
878
-            try:
879
-                for k, v in connection_cfg.iteritems():
880
-                    connection[k] = self._get_value(k)
881
-
882
-                device_endpoint = db_api.device_endpoint_create(
883
-                    self.context,
884
-                    dict(name=device_endpoint_name,
885
-                         connection=connection,
886
-                         type=endpoint_type,
887
-                         device_id=device.id))
888
-                LOG.info('Device Endpoint %s is created' % device_endpoint)
889
-            except exception.AlreadyExist:
890
-                device_endpoints = db_api.device_endpoint_get_by_device_type(
891
-                    self.context,
892
-                    device_id=device.id,
893
-                    type=endpoint_type,
894
-                    name=device_endpoint_name)
895
-                if len(device_endpoints) >= 1:
896
-                    device_endpoint = device_endpoints[0]
897
-                    LOG.info('Device Endpoint %s is existing' %
898
-                             device_endpoints[0])
899
-
900
-            # Device Driver Class
901
-            try:
902
-                device_driver_class = db_api.device_driver_class_create(
903
-                    self.context,
904
-                    dict(name=driver_name,
905
-                         python_class=driver_name,
906
-                         type=driver_def['type'],
907
-                         device_id=device.id,
908
-                         endpoint_id=device_endpoint.id,
909
-                         service_worker_id=self.service_worker_id,
910
-                         extra=driver_def.get('extra')))
911
-                LOG.info('Device Driver Class %s is created' %
912
-                         device_driver_class)
913
-            except exception.AlreadyExist:
914
-                device_driver_class = db_api.device_driver_class_get_by_name(
915
-                    self.context,
916
-                    driver_name)
917
-                LOG.info('Device Driver Class %s is existing' %
918
-                         device_driver_class)
919
-
920
-            # Device Driver
921
-            try:
922
-                device_driver = db_api.device_driver_create(
923
-                    self.context,
924
-                    dict(device_id=device.id,
925
-                         name=driver_name,
926
-                         endpoint_id=device_endpoint.id,
927
-                         device_driver_class_id=device_driver_class.id,
928
-                         service_worker_id=self.service_worker_id)
929
-                )
930
-                LOG.info('Device Driver %s is created' %
931
-                         device_driver)
932
-            except exception.AlreadyExist:
933
-                device_drivers = \
934
-                    db_api.device_driver_get_by_device_endpoint_service_worker(
935
-                        self.context,
936
-                        device_id=device.id,
937
-                        endpoint_id=device_endpoint.id,
938
-                        device_driver_class_id=device_driver_class.id,
939
-                        service_worker_id=self.service_worker_id
940
-                    )
941
-                if len(device_drivers) >= 1:
942
-                    device_driver = device_drivers[0]
943
-                    LOG.info('Device Driver %s is existing' %
944
-                             device_driver)
945
-
946
-
947
-if __name__ == '__main__':
948
-    print (DriverProcessor(None, None)._to_list("[\"file\', \'http\']"))

+ 252
- 0
namos/conductor/namespace_processor.py View File

@@ -0,0 +1,252 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+from oslo_config import cfg
16
+from oslo_log import log
17
+
18
+from namos.common import exception
19
+from namos.common import utils
20
+from namos.db import api as db_api
21
+from namos.db import openstack_drivers
22
+
23
+LOG = log.getLogger(__name__)
24
+
25
+CONF = cfg.CONF
26
+
27
+
28
+class NamespaceProcessor(object):
29
+    # TODO(mrkanag) check Fuel driver at
30
+    # http://docs.openstack.org/mitaka/config-reference/content/
31
+    # hpe-3par-driver.html
32
+    def __init__(self, context, manager, service_worker_id, region_id):
33
+        self.context = context
34
+        self.manager = manager
35
+        self.service_worker_id = service_worker_id
36
+        self.region_id = region_id
37
+        self.config_dict = self._get_config_dict()
38
+
39
+    def _get_config_dict(self):
40
+        conf_dict = {}
41
+        for c in db_api.config_get_by_name_for_service_worker(
42
+            self.context,
43
+            self.service_worker_id
44
+        ):
45
+            conf_dict[c.name] = c.to_dict()
46
+
47
+        return conf_dict
48
+
49
+    def _identify_drivers(self):
50
+        return (set(openstack_drivers.get_drivers_config().keys()) &
51
+                set(self.config_dict.keys()))
52
+
53
+    def _get_value(self, name):
54
+        if name is None:
55
+            return name
56
+
57
+        if isinstance(name, str):
58
+            # Constant naming
59
+            if name[0] == '#':
60
+                return name[1:]
61
+            return (self.config_dict[name].get('value'))
62
+        elif isinstance(name, tuple):
63
+            fn = name[0]
64
+            args = list()
65
+            for var in name[1:]:
66
+                args.append(self._get_value(var))
67
+            return fn(*args)
68
+        elif isinstance(name, list):
69
+            fmt_str = name[0]
70
+            params = [self._get_value(param) for param in name[1:]]
71
+            return fmt_str % tuple(params)
72
+
73
+    def process_drivers(self):
74
+        for driver_key in self._identify_drivers():
75
+            try:
76
+                drivers = self._get_value(driver_key)
77
+                drivers = utils._to_list(drivers)
78
+                for driver_name in drivers:
79
+                    self.process_driver(driver_key, driver_name)
80
+            except KeyError:  # noqa
81
+                # TODO(mrkanag) run namos-manager and restart nova-scheduler
82
+                # KeyError: 'libvirt.virt_type' is thrown, fix it
83
+                LOG.error('Failed to process driver %s in service worker %s' %
84
+                          (driver_key, self.service_worker_id))
85
+                continue
86
+
87
+    def process_driver(self, driver_key, driver_name):
88
+            driver_config = \
89
+                openstack_drivers.get_drivers_config()[driver_key][driver_name]
90
+
91
+            if driver_config.get('alias') is not None:
92
+                alias = driver_config.get('alias')
93
+                driver_config = \
94
+                    openstack_drivers.get_drivers_config()
95
+                for key in alias.split(':'):
96
+                    driver_config = driver_config[key]
97
+                driver_name = key
98
+
99
+            driver_def = \
100
+                openstack_drivers.get_drivers_def()[driver_name]
101
+
102
+            connection = dict()
103
+
104
+            endpoint_type = None
105
+            connection_cfg = None
106
+            device_endpoint_name = None
107
+            device_cfg = None
108
+            child_device_cfg = None
109
+
110
+            if driver_config.get('device') is not None:
111
+                device_cfg = driver_config['device']
112
+
113
+            if driver_config['endpoint'].get('type') is not None:
114
+                endpoint_type = driver_config['endpoint']['type']
115
+                if endpoint_type[0] != '#':
116
+                    endpoint_type = self._get_value(endpoint_type)
117
+
118
+                connection_cfg = driver_config['endpoint'][endpoint_type][
119
+                    'connection']
120
+                device_endpoint_name = self._get_value(
121
+                    driver_config['endpoint'][endpoint_type]['name'])
122
+                # override the device name
123
+                if driver_config['endpoint'][endpoint_type].get(
124
+                        'device') is not None:
125
+                    device_cfg = driver_config['endpoint'][endpoint_type][
126
+                        'device']
127
+                if driver_config['endpoint'][endpoint_type].get(
128
+                        'child_device') is not None:
129
+                    child_device_cfg = driver_config['endpoint'][
130
+                        endpoint_type]['child_device']
131
+            else:
132
+                endpoint_type = None
133
+                connection_cfg = driver_config['endpoint']['connection']
134
+                device_endpoint_name = self._get_value(
135
+                    driver_config['endpoint']['name']
136
+                )
137
+                # override the device name
138
+                if driver_config['endpoint'].get('device') is not None:
139
+                    device_cfg = driver_config['endpoint']['device']
140
+
141
+                if driver_config['endpoint'].get('child_device') is not None:
142
+                    child_device_cfg = driver_config['endpoint'][
143
+                        'child_device']
144
+
145
+            # Device
146
+            device_name = self._get_value(device_cfg['name'])
147
+            try:
148
+                # TODO(mrkanag) Set the right status
149
+                device = db_api.device_create(
150
+                    self.context,
151
+                    dict(name=device_name,
152
+                         status='active',
153
+                         region_id=self.region_id))
154
+
155
+                LOG.info('Device %s is created' % device)
156
+            except exception.AlreadyExist:
157
+                device = db_api.device_get_by_name(
158
+                    self.context,
159
+                    device_name)
160
+                LOG.info('Device %s is existing' % device)
161
+
162
+            # TODO(mrkanag) Poperly Handle child devices
163
+            if child_device_cfg is not None:
164
+                for d_name in self._get_value(child_device_cfg['key']):
165
+                    base_name = self._get_value(child_device_cfg['base_name'])
166
+                    d_name = '%s-%s' % (base_name, d_name)
167
+                    try:
168
+                        device = db_api.device_get_by_name(
169
+                            self.context,
170
+                            d_name)
171
+                        LOG.info('Device %s is existing' % device)
172
+                    except exception.DeviceNotFound:
173
+                        # TODO(mrkanag) region_id is hard-coded, fix it !
174
+                        # Set the right status as well
175
+                        r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0'
176
+                        device = db_api.device_create(
177
+                            self.context,
178
+                            dict(name=d_name,
179
+                                 status='active',
180
+                                 parent_id=device.id,
181
+                                 region_id=r_id))
182
+
183
+                LOG.info('Device %s is created' % device)
184
+
185
+            # Device Endpoint
186
+            try:
187
+                for k, v in connection_cfg.iteritems():
188
+                    connection[k] = self._get_value(k)
189
+
190
+                device_endpoint = db_api.device_endpoint_create(
191
+                    self.context,
192
+                    dict(name=device_endpoint_name,
193
+                         connection=connection,
194
+                         type=endpoint_type,
195
+                         device_id=device.id))
196
+                LOG.info('Device Endpoint %s is created' % device_endpoint)
197
+            except exception.AlreadyExist:
198
+                device_endpoints = db_api.device_endpoint_get_by_device_type(
199
+                    self.context,
200
+                    device_id=device.id,
201
+                    type=endpoint_type,
202
+                    name=device_endpoint_name)
203
+                if len(device_endpoints) >= 1:
204
+                    device_endpoint = device_endpoints[0]
205
+                    LOG.info('Device Endpoint %s is existing' %
206
+                             device_endpoints[0])
207
+
208
+            # Device Driver Class
209
+            try:
210
+                device_driver_class = db_api.device_driver_class_create(
211
+                    self.context,
212
+                    dict(name=driver_name,
213
+                         python_class=driver_name,
214
+                         type=driver_def['type'],
215
+                         device_id=device.id,
216
+                         endpoint_id=device_endpoint.id,
217
+                         service_worker_id=self.service_worker_id,
218
+                         extra=driver_def.get('extra')))
219
+                LOG.info('Device Driver Class %s is created' %
220
+                         device_driver_class)
221
+            except exception.AlreadyExist:
222
+                device_driver_class = db_api.device_driver_class_get_by_name(
223
+                    self.context,
224
+                    driver_name)
225
+                LOG.info('Device Driver Class %s is existing' %
226
+                         device_driver_class)
227
+
228
+            # Device Driver
229
+            try:
230
+                device_driver = db_api.device_driver_create(
231
+                    self.context,
232
+                    dict(device_id=device.id,
233
+                         name=driver_name,
234
+                         endpoint_id=device_endpoint.id,
235
+                         device_driver_class_id=device_driver_class.id,
236
+                         service_worker_id=self.service_worker_id)
237
+                )
238
+                LOG.info('Device Driver %s is created' %
239
+                         device_driver)
240
+            except exception.AlreadyExist:
241
+                device_drivers = \
242
+                    db_api.device_driver_get_by_device_endpoint_service_worker(
243
+                        self.context,
244
+                        device_id=device.id,
245
+                        endpoint_id=device_endpoint.id,
246
+                        device_driver_class_id=device_driver_class.id,
247
+                        service_worker_id=self.service_worker_id
248
+                    )
249
+                if len(device_drivers) >= 1:
250
+                    device_driver = device_drivers[0]
251
+                    LOG.info('Device Driver %s is existing' %
252
+                             device_driver)

+ 55
- 0
namos/conductor/region_processor.py View File

@@ -0,0 +1,55 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+from oslo_config import cfg
16
+from oslo_log import log
17
+
18
+from namos.common import exception
19
+from namos.db import api as db_api
20
+
21
+LOG = log.getLogger(__name__)
22
+
23
+CONF = cfg.CONF
24
+
25
+
26
+class RegionProcessor(object):
27
+    def __init__(self,
28
+                 context,
29
+                 manager,
30
+                 registration_info):
31
+        self.registration_info = registration_info
32
+        self.manager = manager
33
+        self.context = context
34
+
35
+    def process_region(self):
36
+        # region
37
+        # If region is not provided, make it as belongs to namos's region
38
+        if not self.registration_info.get('region_name'):
39
+            self.registration_info[
40
+                'region_name'] = cfg.CONF.os_namos.region_name
41
+
42
+        try:
43
+            region = db_api.region_create(
44
+                self.context,
45
+                dict(name=self.registration_info.get('region_name'))
46
+            )
47
+            LOG.info('Region %s is created' % region)
48
+        except exception.AlreadyExist:
49
+            region = db_api.region_get_by_name(
50
+                self.context,
51
+                name=self.registration_info.get('region_name')
52
+            )
53
+            LOG.info('Region %s is existing' % region)
54
+
55
+        return region.id

+ 124
- 0
namos/conductor/service_processor.py View File

@@ -0,0 +1,124 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+from oslo_config import cfg
16
+from oslo_log import log
17
+
18
+from namos.common import config as namos_config
19
+from namos.common import exception
20
+from namos.db import api as db_api
21
+
22
+LOG = log.getLogger(__name__)
23
+
24
+CONF = cfg.CONF
25
+
26
+
27
+class ServiceProcessor(object):
28
+    def __init__(self,
29
+                 context,
30
+                 manager,
31
+                 region_id,
32
+                 registration_info):
33
+        self.registration_info = registration_info
34
+        self.manager = manager
35
+        self.context = context
36
+        self.region_id = region_id
37
+
38
+    def process_service(self):
39
+        # Service Node
40
+        try:
41
+            # TODO(mrkanag) user proper node name instead of fqdn
42
+            node = db_api.service_node_create(
43
+                self.context,
44
+                dict(name=self.registration_info.get('fqdn'),
45
+                     fqdn=self.registration_info.get('fqdn'),
46
+                     region_id=self.region_id,
47
+                     extra={'ips': self.registration_info.get('ips')}))
48
+            LOG.info('Service node %s is created' % node)
49
+        except exception.AlreadyExist:
50
+            # TODO(mrkanag) is this to be region specifc search
51
+            node = db_api.service_node_get_by_name(
52
+                self.context,
53
+                self.registration_info.get('fqdn'))
54
+            LOG.info('Service node %s is existing' % node)
55
+
56
+        # Service
57
+        try:
58
+            s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591'
59
+            service = db_api.service_create(
60
+                self.context,
61
+                # TODO(mrkanag) use keystone python client and
62
+                # use real service id here
63
+                dict(name=self.registration_info.get('project_name'),
64
+                     keystone_service_id=s_id))
65
+
66
+            LOG.info('Service %s is created' % service)
67
+        except exception.AlreadyExist:
68
+            service = db_api.service_get_by_name(
69
+                self.context,
70
+                self.registration_info.get('project_name'))
71
+            LOG.info('Service %s is existing' % service)
72
+
73
+        # Service Component
74
+        try:
75
+            service_component = db_api.service_component_create(
76
+                self.context,
77
+                dict(name=self.registration_info['prog_name'],
78
+                     node_id=node.id,
79
+                     service_id=service.id,
80
+                     type=namos_config.find_type(self.registration_info[
81
+                         'prog_name'])))
82
+            LOG.info('Service Component %s is created' % service_component)
83
+        except exception.AlreadyExist:
84
+            service_components = \
85
+                db_api.service_component_get_all_by_node_for_service(
86
+                    self.context,
87
+                    node_id=node.id,
88
+                    service_id=service.id,
89
+                    name=self.registration_info['prog_name']
90
+                )
91
+            if len(service_components) == 1:
92
+                service_component = service_components[0]
93
+                LOG.info('Service Component %s is existing' %
94
+                         service_component)
95
+            # TODO(mrkanag) what to do when service_components size is > 1
96
+
97
+        # Service Worker
98
+        try:
99
+            service_worker = db_api.service_worker_create(
100
+                self.context,
101
+                # TODO(mrkanag) Fix the name, device driver proper !
102
+                dict(name='%s@%s' % (service_component.name,
103
+                                     self.registration_info['pid']),
104
+                     pid=self.registration_info['identification'],
105
+                     host=self.registration_info['host'],
106
+                     service_component_id=service_component.id,
107
+                     deleted_at=None,
108
+                     is_launcher=self.registration_info['i_am_launcher']
109
+                     ))
110
+            LOG.info('Service Worker %s is created' % service_worker)
111
+        except exception.AlreadyExist:
112
+            service_worker = db_api.service_worker_get_all_by(
113
+                self.context,
114
+                pid=self.registration_info['identification'],
115
+                service_component_id=service_component.id
116
+            )[0]
117
+            LOG.info('Service Worker %s is existing' %
118
+                     service_worker)
119
+
120
+        return service_component.id, service_worker.id
121
+
122
+    def cleanup(self, service_component_id):
123
+        # clean up the dead service workers
124
+        db_api.cleanup(self.context, service_component_id)

+ 4
- 1416
namos/db/openstack_drivers.py
File diff suppressed because it is too large
View File


+ 293
- 293
namos/db/sample.py View File

@@ -15,329 +15,329 @@
15 15
 from namos.db import api
16 16
 
17 17
 REGION_LIST = [
18
-    {'f7dcd175-27ef-46b5-997f-e6e572f320af':
19
-         {'name': 'RegionOne',
20
-          'keystone_region_id': 'region_one',
21
-          'extra': {'location': 'bangalore'}}
22
-    },
23
-    {'f7dcd175-27ef-46b5-997f-e6e572f320b0':
24
-         {'name': 'RegionTwo',
25
-          'keystone_region_id': 'region_two',
26
-          'extra': {'location': 'chennai'}}
27
-    }
18
+    # {'f7dcd175-27ef-46b5-997f-e6e572f320af':
19
+    #      {'name': 'RegionOne',
20
+    #       'keystone_region_id': 'region_one',
21
+    #       'extra': {'location': 'bangalore'}}
22
+    # },
23
+    # {'f7dcd175-27ef-46b5-997f-e6e572f320b0':
24
+    #      {'name': 'RegionTwo',
25
+    #       'keystone_region_id': 'region_two',
26
+    #       'extra': {'location': 'chennai'}}
27
+    # }
28 28
 ]
29 29
 
30 30
 DEVICE_LIST = [
31 31
     # vCenter
32
-    {'91007d3c-9c95-40c5-8f94-c7b071f9b577':
33
-        {
34
-            'name': 'Vmware_vCenter_1',
35
-            'display_name': 'VMWare vCenter 1',
36
-            'description': 'vCenter 5.0',
37
-            'status': 'active',
38
-            'extra': {'owner': 'mkr1481@namos.com'},
39
-            'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
40
-        },
41
-    # Clusters
42
-    {'d468ea2e-74f6-4a55-a7f4-a56d18e91c66':
43
-         {
44
-             'name': 'vmware_vc_Cluster_1',
45
-             'display_name': 'VMWare vCenter 1 Cluster 1',
46
-             'description': 'Cluster 1 having 3 hosts',
47
-             'status': 'active',
48
-             'extra': {'owner': 'mkr1481@namos.com',
49
-                    'vcpus': 1000,
50
-                    'ram_in_gb': 1024},
51
-          'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
52
-          'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
53
-    },
54
-    {'6c97f476-8e27-4e21-8528-a5ec236306f3':
55
-         {'name': 'vmware_vc_Cluster_2',
56
-          'display_name': 'VMWare vCenter 1 Cluster 2',
57
-          'description': 'Cluster 2 having 5 hosts',
58
-          'status': 'active',
59
-          'extra': {'owner': 'mkr1481@namos.com'},
60
-          'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
61
-          'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
62
-    },
63
-    # Datastores
64
-    {'fdab6c51-38fb-4fb1-a76f-9c243a8b8296':
65
-         {'name': 'Vmware_vCenter_1_datastore_1',
66
-          'display_name': 'VMWare vCenter 1 datastore 1',
67
-          'description': 'vCenter 5.0 Datastore created from FC',
68
-          'status': 'active',
69
-          'extra': {'owner': 'mkr1481@namos.com',
70
-                    'size_in_gb': '102400'},
71
-          'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
72
-          'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
73
-    },
74
-    {'05b935b3-942c-439c-a6a4-9c3c73285430':
75
-         {'name': 'Vmware_vCenter_1_datastore_2',
76
-          'display_name': 'VMWare vCenter 1 datastore 2',
77
-          'description': 'vCenter 5.0 Datastore created from FC',
78
-          'status': 'active',
79
-          'extra': {'owner': 'mkr1481@namos.com',
80
-                    'size_in_gb': '10240'},
81
-          'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
82
-          'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
83
-    },
84
-    # Switch
85
-    {'f062556b-45c4-417d-80fa-4283b9c58da3':
86
-         {'name': 'Vmware_vCenter_1_switch_1',
87
-          'display_name': 'VMWare vCenter 1 Dist. vSwitch 1',
88
-          'description': 'vCenter 5.0 distributed virtual switch',
89
-          'status': 'active',
90
-          'extra': {'owner': 'mkr1481@namos.com'},
91
-          'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
92
-          'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
93
-    }
32
+    # {'91007d3c-9c95-40c5-8f94-c7b071f9b577':
33
+    #     {
34
+    #         'name': 'Vmware_vCenter_1',
35
+    #         'display_name': 'VMWare vCenter 1',
36
+    #         'description': 'vCenter 5.0',
37
+    #         'status': 'active',
38
+    #         'extra': {'owner': 'mkr1481@namos.com'},
39
+    #         'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
40
+    #     },
41
+    # # Clusters
42
+    # {'d468ea2e-74f6-4a55-a7f4-a56d18e91c66':
43
+    #      {
44
+    #          'name': 'vmware_vc_Cluster_1',
45
+    #          'display_name': 'VMWare vCenter 1 Cluster 1',
46
+    #          'description': 'Cluster 1 having 3 hosts',
47
+    #          'status': 'active',
48
+    #          'extra': {'owner': 'mkr1481@namos.com',
49
+    #                 'vcpus': 1000,
50
+    #                 'ram_in_gb': 1024},
51
+    #       'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
52
+    #       'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
53
+    # },
54
+    # {'6c97f476-8e27-4e21-8528-a5ec236306f3':
55
+    #      {'name': 'vmware_vc_Cluster_2',
56
+    #       'display_name': 'VMWare vCenter 1 Cluster 2',
57
+    #       'description': 'Cluster 2 having 5 hosts',
58
+    #       'status': 'active',
59
+    #       'extra': {'owner': 'mkr1481@namos.com'},
60
+    #       'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
61
+    #       'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
62
+    # },
63
+    # # Datastores
64
+    # {'fdab6c51-38fb-4fb1-a76f-9c243a8b8296':
65
+    #      {'name': 'Vmware_vCenter_1_datastore_1',
66
+    #       'display_name': 'VMWare vCenter 1 datastore 1',
67
+    #       'description': 'vCenter 5.0 Datastore created from FC',
68
+    #       'status': 'active',
69
+    #       'extra': {'owner': 'mkr1481@namos.com',
70
+    #                 'size_in_gb': '102400'},
71
+    #       'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
72
+    #       'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
73
+    # },
74
+    # {'05b935b3-942c-439c-a6a4-9c3c73285430':
75
+    #      {'name': 'Vmware_vCenter_1_datastore_2',
76
+    #       'display_name': 'VMWare vCenter 1 datastore 2',
77
+    #       'description': 'vCenter 5.0 Datastore created from FC',
78
+    #       'status': 'active',
79
+    #       'extra': {'owner': 'mkr1481@namos.com',
80
+    #                 'size_in_gb': '10240'},
81
+    #       'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
82
+    #       'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
83
+    # },
84
+    # # Switch
85
+    # {'f062556b-45c4-417d-80fa-4283b9c58da3':
86
+    #      {'name': 'Vmware_vCenter_1_switch_1',
87
+    #       'display_name': 'VMWare vCenter 1 Dist. vSwitch 1',
88
+    #       'description': 'vCenter 5.0 distributed virtual switch',
89
+    #       'status': 'active',
90
+    #       'extra': {'owner': 'mkr1481@namos.com'},
91
+    #       'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
92
+    #       'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
93
+    # }
94 94
 ]
95 95
 
96 96
 ENDPOINT_LIST = [
97
-    {'7403bf80-9376-4081-89ee-d2501661ca84':{
98
-        'name': 'vcenter1_connection',
99
-        'connection': {'host_ip': '10.1.1.3',
100
-                       'host_port': 443,
101
-                       'host_username': 'adminstrator',
102
-                       'host_password': 'password'},
103
-        'device_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577'
104
-    }}
97
+    # {'7403bf80-9376-4081-89ee-d2501661ca84':{
98
+    #     'name': 'vcenter1_connection',
99
+    #     'connection': {'host_ip': '10.1.1.3',
100
+    #                    'host_port': 443,
101
+    #                    'host_username': 'adminstrator',
102
+    #                    'host_password': 'password'},
103
+    #     'device_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577'
104
+    # }}
105 105
 ]
106 106
 
107 107
 
108 108
 DEVICE_DRIVER_CLASS_LIST = [
109
-    {'0664e8c0-ff02-427e-8fa3-8788c017ad84': {
110
-        'python_class': 'nova...vcdriver',
111
-        'type': 'compute',
112
-        'vendor': 'vmware-community'
113
-    }},
114
-    {'11caf99c-f820-4266-a461-5a15437a8144': {
115
-        'python_class': 'cinder...vmdkdriver',
116
-        'type': 'volume',
117
-        'vendor': 'vmware-community'
118
-    }},
119
-    {'bb99ea96-fe6b-49e6-a761-faea92b79f75': {
120
-        'python_class': 'neutron...nsxdriver',
121
-        'type': 'network',
122
-        'vendor': 'vmware-community'
123
-    }}
109
+    # {'0664e8c0-ff02-427e-8fa3-8788c017ad84': {
110
+    #     'python_class': 'nova...vcdriver',
111
+    #     'type': 'compute',
112
+    #     'vendor': 'vmware-community'
113
+    # }},
114
+    # {'11caf99c-f820-4266-a461-5a15437a8144': {
115
+    #     'python_class': 'cinder...vmdkdriver',
116
+    #     'type': 'volume',
117
+    #     'vendor': 'vmware-community'
118
+    # }},
119
+    # {'bb99ea96-fe6b-49e6-a761-faea92b79f75': {
120
+    #     'python_class': 'neutron...nsxdriver',
121
+    #     'type': 'network',
122
+    #     'vendor': 'vmware-community'
123
+    # }}
124 124
 ]
125 125
 
126 126
 DEVICE_DRIVER_LIST = [
127
-    # nova
128
-    {'3c089cdb-e1d5-4182-9a8e-cef9899fd7e5':{
129
-        'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
130
-        'device_driver_class_id':'0664e8c0-ff02-427e-8fa3-8788c017ad84',
131
-        'device_id': 'd468ea2e-74f6-4a55-a7f4-a56d18e91c66'
132
-    }},
133
-    # nova
134
-    {'4e0360ae-0728-4bfd-a557-3ad867231787':{
135
-        'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
136
-        'device_driver_class_id':'0664e8c0-ff02-427e-8fa3-8788c017ad84',
137
-        'device_id': '6c97f476-8e27-4e21-8528-a5ec236306f3'
138
-    }},
139
-    # cinder
140
-    {'92d5e2c1-511b-4837-a57d-5e6ee723060c':{
141
-        'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
142
-        'device_driver_class_id':'11caf99c-f820-4266-a461-5a15437a8144',
143
-        'device_id': 'fdab6c51-38fb-4fb1-a76f-9c243a8b8296'
144
-    }},
145
-    # cinder
146
-    {'f3d807a0-eff0-4473-8ae5-594967136e05':{
147
-        'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
148
-        'python_class_id':'11caf99c-f820-4266-a461-5a15437a8144',
149
-        'device_id': '05b935b3-942c-439c-a6a4-9c3c73285430'
150
-    }},
151
-    # neutron
152
-    {'f27eb548-929c-45e2-a2a7-dc123e2a1bc7':{
153
-        'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
154
-        'python_class_id':'bb99ea96-fe6b-49e6-a761-faea92b79f75',
155
-        'device_id': 'f062556b-45c4-417d-80fa-4283b9c58da3'
156
-    }}
127
+    # # nova
128
+    # {'3c089cdb-e1d5-4182-9a8e-cef9899fd7e5':{
129
+    #     'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
130
+    #     'device_driver_class_id': '0664e8c0-ff02-427e-8fa3-8788c017ad84',
131
+    #     'device_id': 'd468ea2e-74f6-4a55-a7f4-a56d18e91c66'
132
+    # }},
133
+    # # nova
134
+    # {'4e0360ae-0728-4bfd-a557-3ad867231787':{
135
+    #     'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
136
+    #     'device_driver_class_id': '0664e8c0-ff02-427e-8fa3-8788c017ad84',
137
+    #     'device_id': '6c97f476-8e27-4e21-8528-a5ec236306f3'
138
+    # }},
139
+    # # cinder
140
+    # {'92d5e2c1-511b-4837-a57d-5e6ee723060c':{
141
+    #     'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
142
+    #     'device_driver_class_id': '11caf99c-f820-4266-a461-5a15437a8144',
143
+    #     'device_id': 'fdab6c51-38fb-4fb1-a76f-9c243a8b8296'
144
+    # }},
145
+    # # cinder
146
+    # {'f3d807a0-eff0-4473-8ae5-594967136e05':{
147
+    #     'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
148
+    #     'python_class_id': '11caf99c-f820-4266-a461-5a15437a8144',
149
+    #     'device_id': '05b935b3-942c-439c-a6a4-9c3c73285430'
150
+    # }},
151
+    # # neutron
152
+    # {'f27eb548-929c-45e2-a2a7-dc123e2a1bc7':{
153
+    #     'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
154
+    #     'python_class_id': 'bb99ea96-fe6b-49e6-a761-faea92b79f75',
155
+    #     'device_id': 'f062556b-45c4-417d-80fa-4283b9c58da3'
156
+    # }}
157 157
 ]
158 158
 
159 159
 
160
-SERVICE_LIST =[
161
-    {'11367a37-976f-468a-b8dd-77b28ee63cf4': {
162
-        'name': 'nova_service',
163
-        'keystone_service_id': 'b9c2549f-f685-4bc2-92e9-ba8af9c18599'
164
-    }},
165
-    {'809e04c1-2f3b-43af-9677-3428a0154216': {
166
-        'name': 'cinder_service',
167
-        'keystone_service_id': '9cc4c374-abb5-4bdc-9129-f0fa4bba0e0b'
168
-    }},
169
-    {'3495fa07-39d9-4d87-9f97-0a582a3e25c3': {
170
-        'name': 'neutron_service',
171
-        'keystone_service_id': 'b24e2884-75bc-4876-81d1-5b4fb6e92afc'
172
-    }}
160
+SERVICE_LIST = [
161
+    # {'11367a37-976f-468a-b8dd-77b28ee63cf4': {
162
+    #     'name': 'nova_service',
163
+    #     'keystone_service_id': 'b9c2549f-f685-4bc2-92e9-ba8af9c18599'
164
+    # }},
165
+    # {'809e04c1-2f3b-43af-9677-3428a0154216': {
166
+    #     'name': 'cinder_service',
167
+    #     'keystone_service_id': '9cc4c374-abb5-4bdc-9129-f0fa4bba0e0b'
168
+    # }},
169
+    # {'3495fa07-39d9-4d87-9f97-0a582a3e25c3': {
170
+    #     'name': 'neutron_service',
171
+    #     'keystone_service_id': 'b24e2884-75bc-4876-81d1-5b4fb6e92afc'
172
+    # }}
173 173
 ]
174 174
 
175 175
 SERVICE_NODE_LIST = [
176
-    {
177
-        'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe': {
178
-            'name': 'd_network_node_1',
179
-            'fqdn': 'network_node_1.devstack1.abc.com',
180
-            'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
181
-        }
182
-    },
183
-    {
184
-        '4e99a641-dbe9-416e-8c0a-78015dc55a2a': {
185
-            'name': 'd_compute_node_1',
186
-            'fqdn': 'compute_node_1.devstack.abc.com',
187
-            'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
188
-        }
189
-    },
190
-    {
191
-        'b92f4811-7970-421b-a611-d51c62972388': {
192
-            'name': 'd_cloud-controller-1',
193
-            'fqdn': 'cloud_controller_1.devstack1.abc.com',
194
-            'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
195
-        }
196
-    },
197
-    {
198
-        'e5913cd3-a416-40e1-889f-1a1b1c53001c': {
199
-            'name': 'd_storage_node_1',
200
-            'fqdn': 'storage_node_1.devstack.abc.com',
201
-            'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
202
-        }
203
-    }
176
+    # {
177
+    #     'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe': {
178
+    #         'name': 'd_network_node_1',
179
+    #         'fqdn': 'network_node_1.devstack1.abc.com',
180
+    #         'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
181
+    #     }
182
+    # },
183
+    # {
184
+    #     '4e99a641-dbe9-416e-8c0a-78015dc55a2a': {
185
+    #         'name': 'd_compute_node_1',
186
+    #         'fqdn': 'compute_node_1.devstack.abc.com',
187
+    #         'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
188
+    #     }
189
+    # },
190
+    # {
191
+    #     'b92f4811-7970-421b-a611-d51c62972388': {
192
+    #         'name': 'd_cloud-controller-1',
193
+    #         'fqdn': 'cloud_controller_1.devstack1.abc.com',
194
+    #         'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
195
+    #     }
196
+    # },
197
+    # {
198
+    #     'e5913cd3-a416-40e1-889f-1a1b1c53001c': {
199
+    #         'name': 'd_storage_node_1',
200
+    #         'fqdn': 'storage_node_1.devstack.abc.com',
201
+    #         'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
202
+    #     }
203
+    # }
204 204
 ]
205 205
 
206 206
 
207 207
 SERVICE_COMPONENT_LIST = [
208
-    # nova
209
-    {
210
-        '7259a9ff-2e6f-4e8d-b2fb-a529188825dd': {
211
-            'name': 'd_nova-compute',
212
-            'node_id': '4e99a641-dbe9-416e-8c0a-78015dc55a2a',
213
-            'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
214
-        }
215
-    },
216
-    {
217
-        'e5e366ea-9029-4ba0-8bbc-f658e642aa54': {
218
-            'name': 'd_nova-scheduler',
219
-            'node_id': 'b92f4811-7970-421b-a611-d51c62972388',
220
-            'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
221
-        }
222
-    },
223
-    {
224
-        'f7813622-85ee-4588-871d-42c3128fa14f': {
225
-            'name': 'd_nova-api',
226
-            'node_id': 'b92f4811-7970-421b-a611-d51c62972388',
227
-            'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
228
-        }
229
-    },
230
-    # cinder
231
-    {
232
-        'b0e9ac3f-5600-406c-95e4-f698b1eecfc6': {
233
-            'name': 'd_cinder-volume',
234
-            'node_id': 'e5913cd3-a416-40e1-889f-1a1b1c53001c',
235
-            'service_id': '809e04c1-2f3b-43af-9677-3428a0154216'
236
-        }
237
-    },
238
-    # neutron
239
-    {
240
-        '54f608bd-fb01-4614-9653-acbb803aeaf7':{
241
-            'name': 'd_neutron-agent',
242
-            'node_id': 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe',
243
-            'service_id': '3495fa07-39d9-4d87-9f97-0a582a3e25c3'
244
-        }
245
-    }
208
+    # # nova
209
+    # {
210
+    #     '7259a9ff-2e6f-4e8d-b2fb-a529188825dd': {
211
+    #         'name': 'd_nova-compute',
212
+    #         'node_id': '4e99a641-dbe9-416e-8c0a-78015dc55a2a',
213
+    #         'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
214
+    #     }
215
+    # },
216
+    # {
217
+    #     'e5e366ea-9029-4ba0-8bbc-f658e642aa54': {
218
+    #         'name': 'd_nova-scheduler',
219
+    #         'node_id': 'b92f4811-7970-421b-a611-d51c62972388',
220
+    #         'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
221
+    #     }
222
+    # },
223
+    # {
224
+    #     'f7813622-85ee-4588-871d-42c3128fa14f': {
225
+    #         'name': 'd_nova-api',
226
+    #         'node_id': 'b92f4811-7970-421b-a611-d51c62972388',
227
+    #         'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
228
+    #     }
229
+    # },
230
+    # # cinder
231
+    # {
232
+    #     'b0e9ac3f-5600-406c-95e4-f698b1eecfc6': {
233
+    #         'name': 'd_cinder-volume',
234
+    #         'node_id': 'e5913cd3-a416-40e1-889f-1a1b1c53001c',
235
+    #         'service_id': '809e04c1-2f3b-43af-9677-3428a0154216'
236
+    #     }
237
+    # },
238
+    # # neutron
239
+    # {
240
+    #     '54f608bd-fb01-4614-9653-acbb803aeaf7':{
241
+    #         'name': 'd_neutron-agent',
242
+    #         'node_id': 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe',
243
+    #         'service_id': '3495fa07-39d9-4d87-9f97-0a582a3e25c3'
244
+    #     }
245
+    # }
246 246
 ]
247 247
 
248 248
 SERVICE_WORKER_LIST = [
249
-    # cluster-1
250
-    {
251
-        '65dbd695-fa92-4950-b8b4-d46aa0408f6a': {
252
-            'name': 'd_nova-compute-esx-cluster1',
253
-            'pid': '1233454343',
254
-            'host': 'd_nova-compute-esx-cluster1',
255
-            'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd',
256
-            'device_driver_id': '3c089cdb-e1d5-4182-9a8e-cef9899fd7e5'
257
-        }
258
-    },
259
-    # cluster-2
260
-    {
261
-        '50d2c0c6-741d-4108-a3a2-2090eaa0be37': {
262
-            'name': 'd_nova-compute-esx-cluster2',
263
-            'pid': '1233454344',
264
-            'host': 'd_nova-compute-esx-cluster2',
265
-            'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd',
266
-            'device_driver_id': '4e0360ae-0728-4bfd-a557-3ad867231787'
267
-        }
268
-    },
269
-    # datastore-1
270
-    {
271
-        '77e3ee16-fa2b-4e12-ad1c-226971d1a482': {
272
-            'name': 'd_cinder-volume-vmdk-1',
273
-            'pid': '09878654',
274
-            'host': 'd_cinder-volume-vmdk-1',
275
-            'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6',
276
-            'device_driver_id': '92d5e2c1-511b-4837-a57d-5e6ee723060c'
277
-        }
278
-    },
279
-    # datastore-2
280
-    {
281
-        '8633ce68-2b02-4efd-983c-49a460f6d7ef': {
282
-            'name': 'd_cinder-volume-vmdk-2',
283
-            'pid': '4353453',
284
-            'host': 'd_cinder-volume-vmdk-2',
285
-            'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6',
286
-            'device_driver_id': 'f3d807a0-eff0-4473-8ae5-594967136e05'
287
-        }
288
-    },
289
-    # vswitch
290
-    {
291
-        '5a3ac5b9-9186-45d8-928c-9e702368dfb4': {
292
-            'name': 'd_neutron-agent',
293
-            'pid': '2359234',
294
-            'host': 'd_neutron-agent',
295
-            'service_component_id': '54f608bd-fb01-4614-9653-acbb803aeaf7',
296
-            'device_driver_id': 'f27eb548-929c-45e2-a2a7-dc123e2a1bc7'
297
-        }
298
-    },
249
+    # # cluster-1
250
+    # {
251
+    #     '65dbd695-fa92-4950-b8b4-d46aa0408f6a': {
252
+    #         'name': 'd_nova-compute-esx-cluster1',
253
+    #         'pid': '1233454343',
254
+    #         'host': 'd_nova-compute-esx-cluster1',
255
+    #         'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd',
256
+    #         'device_driver_id': '3c089cdb-e1d5-4182-9a8e-cef9899fd7e5'
257
+    #     }
258
+    # },
259
+    # # cluster-2
260
+    # {
261
+    #     '50d2c0c6-741d-4108-a3a2-2090eaa0be37': {
262
+    #         'name': 'd_nova-compute-esx-cluster2',
263
+    #         'pid': '1233454344',
264
+    #         'host': 'd_nova-compute-esx-cluster2',
265
+    #         'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd',
266
+    #         'device_driver_id': '4e0360ae-0728-4bfd-a557-3ad867231787'
267
+    #     }
268
+    # },
269
+    # # datastore-1
270
+    # {
271
+    #     '77e3ee16-fa2b-4e12-ad1c-226971d1a482': {
272
+    #         'name': 'd_cinder-volume-vmdk-1',
273
+    #         'pid': '09878654',
274
+    #         'host': 'd_cinder-volume-vmdk-1',
275
+    #         'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6',
276
+    #         'device_driver_id': '92d5e2c1-511b-4837-a57d-5e6ee723060c'
277
+    #     }
278
+    # },
279
+    # # datastore-2
280
+    # {
281
+    #     '8633ce68-2b02-4efd-983c-49a460f6d7ef': {
282
+    #         'name': 'd_cinder-volume-vmdk-2',
283
+    #         'pid': '4353453',
284
+    #         'host': 'd_cinder-volume-vmdk-2',
285
+    #         'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6',
286
+    #         'device_driver_id': 'f3d807a0-eff0-4473-8ae5-594967136e05'
287
+    #     }
288
+    # },
289
+    # # vswitch
290
+    # {
291
+    #     '5a3ac5b9-9186-45d8-928c-9e702368dfb4': {
292
+    #         'name': 'd_neutron-agent',
293
+    #         'pid': '2359234',
294
+    #         'host': 'd_neutron-agent',
295
+    #         'service_component_id': '54f608bd-fb01-4614-9653-acbb803aeaf7',
296
+    #         'device_driver_id': 'f27eb548-929c-45e2-a2a7-dc123e2a1bc7'
297
+    #     }
298
+    # },
299 299
 ]
300 300
 
301 301
 CONFIG_LIST = [
302
-    {
303
-        'dc6aa02f-ba70-4410-a59c-5e113e629fe5': {
304
-            'name':'vmware.host_ip',
305
-            'value':'10.1.0.1',
306
-            'help': 'VMWare vcenter IP address',
307
-            'default':'',
308
-            'type':'String',
309
-            'required':True,
310
-            'secret': False,
311
-            'config_file':'/etc/nova/nova.conf',
312
-            'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
313
-        }
314
-    },
315
-    {
316
-        'dc6aa02f-ba70-4410-a59c-5e113e629f10': {
317
-            'name':'vmware.host_username',
318
-            'value':'Administraotr',
319
-            'help': 'VMWare vcenter Username',
320
-            'default':'Administrator',
321
-            'type':'String',
322
-            'required':True,
323
-            'secret': False,
324
-            'file':'/etc/nova/nova.conf',
325
-            'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
326
-        }
327
-    },
328
-    {
329
-        'dc6aa02f-ba70-4410-a59c-5e113e629f11': {
330
-            'name':'vmware.host_password',
331
-            'value':'password',
332
-            'help': 'VMWare vcenter password',
333
-            'default':'',
334
-            'type':'String',
335
-            'required':True,
336
-            'secret': True,
337
-            'file':'/etc/nova/nova.conf',
338
-            'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
339
-        },
340
-    }
302
+    # {
303
+    #     'dc6aa02f-ba70-4410-a59c-5e113e629fe5': {
304
+    #         'name': 'vmware.host_ip',
305
+    #         'value': '10.1.0.1',
306
+    #         'help': 'VMWare vcenter IP address',
307
+    #         'default': '',
308
+    #         'type': 'String',
309
+    #         'required':True,
310
+    #         'secret': False,
311
+    #         'config_file': '/etc/nova/nova.conf',
312
+    #         'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
313
+    #     }
314
+    # },
315
+    # {
316
+    #     'dc6aa02f-ba70-4410-a59c-5e113e629f10': {
317
+    #         'name': 'vmware.host_username',
318
+    #         'value': 'Administraotr',
319
+    #         'help': 'VMWare vcenter Username',
320
+    #         'default': 'Administrator',
321
+    #         'type': 'String',
322
+    #         'required':True,
323
+    #         'secret': False,
324
+    #         'file': '/etc/nova/nova.conf',
325
+    #         'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
326
+    #     }
327
+    # },
328
+    # {
329
+    #     'dc6aa02f-ba70-4410-a59c-5e113e629f11': {
330
+    #         'name': 'vmware.host_password',
331
+    #         'value': 'password',
332
+    #         'help': 'VMWare vcenter password',
333
+    #         'default': '',
334
+    #         'type': 'String',
335
+    #         'required':True,
336
+    #         'secret': True,
337
+    #         'file': '/etc/nova/nova.conf',
338
+    #         'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
339
+    #     },
340
+    # }
341 341
 ]
342 342
 
343 343
 

+ 2
- 0
setup.cfg View File

@@ -47,6 +47,8 @@ output_file = namos/locale/namos.pot
47 47
 [entry_points]
48 48
 console_scripts =
49 49
     namos-manage = namos.cmd.manage:main
50
+    namos-api = namos.cmd.api:main
51
+    namos-manager = namos.cmd.conductor:main
50 52
 
51 53
 oslo.config.opts =
52 54
     namos.common.config = namos.common.config:list_opts

Loading…
Cancel
Save