Browse Source

Region is added part of os_namos

Change-Id: I54e8f5ca592ae3bc91d35a6997d601e92fd83b04
changes/67/307067/1
Kanagaraj Manickam 3 years ago
parent
commit
97d84d1b45

+ 4
- 5
etc/namos.conf View File

@@ -1,10 +1,6 @@
1 1
 [DEFAULT]
2 2
 rpc_backend = rabbit
3 3
 debug=True
4
-logging_exception_prefix = %(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s
5
-logging_debug_format_suffix = from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d
6
-logging_default_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s
7
-logging_context_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s%(color)s] %(instance)s%(color)s%(message)s
8 4
 
9 5
 [oslo_messaging_rabbit]
10 6
 rabbit_userid = stackrabbit
@@ -14,5 +10,8 @@ rabbit_hosts = 172.241.0.101
14 10
 [database]
15 11
 connection = mysql+pymysql://root:password@172.241.0.101/namos?charset=utf8
16 12
 
17
-[conductor]
13
+[os_manager]
18 14
 workers=3
15
+
16
+[os_namos]
17
+region_name=RegionTwo

+ 1
- 0
etc/oslo-config-schema.sync View File

@@ -1,6 +1,7 @@
1 1
 # List of config generator conf files for syncing the conf with namos
2 2
 heat=/opt/stack/heat/config-generator.conf
3 3
 namos=/home/manickan/workspace/namos/openstack/namos/config-generator.conf
4
+os_namos=/home/manickan/workspace/namos/openstack/os-namos/config-generator.conf
4 5
 keystone=/opt/stack/keystone/config-generator/keystone.conf
5 6
 neutron-bgp-dragent=/opt/stack/neutron/etc/oslo-config-generator/bgp_dragent.ini
6 7
 neutron-dhcp-agent=/opt/stack/neutron/etc/oslo-config-generator/dhcp_agent.ini

+ 4
- 4
namos/cmd/conductor.py View File

@@ -43,15 +43,15 @@ def main():
43 43
     from namos import conductor  # noqa
44 44
 
45 45
     mgr = service.RPCService(
46
-        CONF.conductor.name,
46
+        CONF.os_manager.name,
47 47
         config.PROJECT_NAME,
48 48
         manager.ConductorManager())
49 49
 
50
-    launcher = os_service.launch(CONF, mgr, CONF.conductor.workers)
50
+    launcher = os_service.launch(CONF, mgr, CONF.os_manager.workers)
51 51
 
52 52
     # TODO(mrkanag) Namos is not registering the RPC backend, fix it !
53
-    import os_namos
54
-    os_namos.register_myself()
53
+    # import os_namos
54
+    # os_namos.register_myself()
55 55
 
56 56
     launcher.wait()
57 57
 

+ 10
- 16
namos/cmd/manage.py View File

@@ -15,10 +15,10 @@
15 15
 import sys
16 16
 
17 17
 from oslo_config import cfg
18
-from oslo_utils import timeutils
19 18
 
20 19
 from namos.common import config
21 20
 from namos.common import exception
21
+from namos.common import utils
22 22
 from namos.db import api
23 23
 from namos.db import sample
24 24
 from namos.db.sqlalchemy import migration
@@ -29,24 +29,18 @@ MANAGE_COMMAND_NAME = 'namos-manage'
29 29
 
30 30
 
31 31
 class HeartBeat(object):
32
-    def find_status(self, sw, report_interval=60):
33
-        status = False
34
-        if sw.updated_at is not None:
35
-            if ((timeutils.utcnow() - sw.updated_at).total_seconds()
36
-                    <= report_interval):
37
-                status = True
38
-        else:
39
-            if ((timeutils.utcnow() - sw.created_at).total_seconds()
40
-                    <= report_interval):
41
-                status = True
42
-
43
-        return status
44
-
45 32
     def report_status(self):
46 33
         # TODO(mrkanag) Make like Node: Service: worker: status
47 34
         for sw in api.service_worker_get_all(None):
48
-            msg = '[%s] %s' % ('T' if self.find_status(sw) else 'F',
49
-                               sw.name)
35
+            # TODO(mrkanag) Move this to db layer and query non deleted entries
36
+            if sw.deleted_at is not None:
37
+                continue
38
+
39
+            msg = '[%s] [%s] %s %s' % (
40
+                'T' if sw.is_launcher else 'F',
41
+                'T' if utils.find_status(sw) else 'F',
42
+                sw.name,
43
+                sw.host)
50 44
             print (msg)
51 45
 
52 46
 

+ 2
- 2
namos/common/config.py View File

@@ -35,7 +35,7 @@ conductor_opts = [
35 35
 
36 36
 
37 37
 def register_conductor_opts():
38
-    CONF.register_opts(conductor_opts, 'conductor')
38
+    CONF.register_opts(conductor_opts, 'os_manager')
39 39
 
40 40
 
41 41
 def init_conf(prog):
@@ -52,4 +52,4 @@ def init_log(project=PROJECT_NAME):
52 52
 
53 53
 
54 54
 def list_opts():
55
-    yield 'conductor', conductor_opts
55
+    yield 'os_manager', conductor_opts

+ 29
- 0
namos/common/utils.py View File

@@ -0,0 +1,29 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+from oslo_utils import timeutils
16
+
17
+
18
+def find_status(sw, report_interval=60):
19
+    status = False
20
+    if sw.updated_at is not None:
21
+        if ((timeutils.utcnow() - sw.updated_at).total_seconds()
22
+                <= report_interval):
23
+            status = True
24
+    else:
25
+        if ((timeutils.utcnow() - sw.created_at).total_seconds()
26
+                <= report_interval):
27
+            status = True
28
+
29
+    return status

+ 92
- 33
namos/conductor/manager.py View File

@@ -22,6 +22,7 @@ from oslo_utils import timeutils
22 22
 from namos.common import config
23 23
 from namos.common import exception
24 24
 from namos.common import messaging
25
+from namos.common import utils
25 26
 from namos.db import api as db_api
26 27
 from namos.db import openstack_drivers
27 28
 
@@ -68,8 +69,10 @@ class ConductorManager(object):
68 69
         ))
69 70
 
70 71
         # Service processing
71
-        sp = ServiceProcessor(registration_info)
72
-        service_worker_id = sp.process_service(context)
72
+        sp = ServiceProcessor(context,
73
+                              self,
74
+                              registration_info)
75
+        service_component_id, service_worker_id = sp.process_service(context)
73 76
 
74 77
         #  Device Driver processing
75 78
         dp = DriverProcessor(service_worker_id,
@@ -82,6 +85,8 @@ class ConductorManager(object):
82 85
         ))
83 86
         self._regisgration_ackw(context,
84 87
                                 registration_info['identification'])
88
+
89
+        sp.cleanup(service_component_id)
85 90
         return service_worker_id
86 91
 
87 92
     def _regisgration_ackw(self, context, identification):
@@ -94,6 +99,22 @@ class ConductorManager(object):
94 99
                     identification=identification)
95 100
         LOG.info("REGISTER [%s] ACK" % identification)
96 101
 
102
+    def _ping(self, context, identification):
103
+        client = messaging.get_rpc_client(
104
+            topic='namos.CONF.%s' % identification,
105
+            version=self.RPC_API_VERSION,
106
+            exchange=config.PROJECT_NAME)
107
+        try:
108
+            client.call(context,
109
+                        'ping_me',
110
+                        identification=identification)
111
+
112
+            LOG.debug("PING [%s] SUCCESSFUL" % identification)
113
+            return True
114
+        except:  # noqa
115
+            LOG.debug("PING [%s] FAILED" % identification)
116
+            return False
117
+
97 118
     @request_context
98 119
     def heart_beat(self, context, identification, dieing=False):
99 120
         try:
@@ -164,8 +185,13 @@ class ConductorManager(object):
164 185
 
165 186
 
166 187
 class ServiceProcessor(object):
167
-    def __init__(self, registration_info):
188
+    def __init__(self,
189
+                 context,
190
+                 manager,
191
+                 registration_info):
168 192
         self.registration_info = registration_info
193
+        self.manager = manager
194
+        self.context = context
169 195
 
170 196
     def file_to_configs(self, file_content):
171 197
         tmp_file_path = '/tmp/sample-namos-config.conf'
@@ -191,15 +217,33 @@ class ServiceProcessor(object):
191 217
         return conf_dict
192 218
 
193 219
     def process_service(self, context):
220
+        # region
221
+        # If region is not provided, make it as belongs to namos's region
222
+        if not self.registration_info.get('region_name'):
223
+            self.registration_info[
224
+                'region_name'] = cfg.CONF.os_namos.region_name
225
+
226
+        try:
227
+            region = db_api.region_create(
228
+                context,
229
+                dict(name=self.registration_info.get('region_name'))
230
+            )
231
+            LOG.info('Region %s is created' % region)
232
+        except exception.AlreadyExist:
233
+            region = db_api.region_get_by_name(
234
+                context,
235
+                name=self.registration_info.get('region_name')
236
+            )
237
+            LOG.info('Region %s is existing' % region)
238
+
194 239
         # Service Node
195 240
         try:
196
-            # TODO(mrkanag) region_id is hard-coded, fix it !
197
-            # user proper node name instead of fqdn
241
+            # TODO(mrkanag) user proper node name instead of fqdn
198 242
             node = db_api.service_node_create(
199 243
                 context,
200 244
                 dict(name=self.registration_info.get('fqdn'),
201 245
                      fqdn=self.registration_info.get('fqdn'),
202
-                     region_id='f7dcd175-27ef-46b5-997f-e6e572f320b0'))
246
+                     region_id=region.id))
203 247
 
204 248
             LOG.info('Service node %s is created' % node)
205 249
         except exception.AlreadyExist:
@@ -258,35 +302,17 @@ class ServiceProcessor(object):
258 302
                      pid=self.registration_info['identification'],
259 303
                      host=self.registration_info['host'],
260 304
                      service_component_id=service_component.id,
261
-                     deleted_at=None
305
+                     deleted_at=None,
306
+                     is_launcher=self.registration_info['i_am_launcher']
262 307
                      ))
263 308
             LOG.info('Service Worker %s is created' % service_worker)
264 309
         except exception.AlreadyExist:
265
-            # TODO(mrkanag) Find a way to purge the dead service worker
266
-            # Once each service  is enabled with heart beating namos
267
-            # purging can be done once heart beat stopped. this can be
268
-            # done from openstack.common.service.py
269
-            service_workers = \
270
-                db_api.service_worker_get_by_host_for_service_component(
271
-                    context,
272
-                    service_component_id=service_component.id,
273
-                    host=self.registration_info['host']
274
-                )
275
-            if len(service_workers) == 1:
276
-                service_worker = \
277
-                    db_api.service_worker_update(
278
-                        context,
279
-                        service_workers[0].id,
280
-                        dict(
281
-                            deleted_at=None,
282
-                            pid=self.registration_info['identification'],
283
-                            name='%s@%s' % (self.registration_info['pid'],
284
-                                            service_component.name)
285
-                        ))
286
-                LOG.info('Service Worker %s is existing and is updated'
287
-                         % service_worker)
288
-
289
-            # TODO(mrkanag) what to do when service_workers size is > 1
310
+            LOG.info('Service Worker %s is existing' %
311
+                     db_api.service_worker_get_all_by(
312
+                         context,
313
+                         pid=self.registration_info['identification'],
314
+                         service_component_id=service_component.id
315
+                     )[0])
290 316
 
291 317
         # config file
292 318
         conf_files = dict()
@@ -398,7 +424,40 @@ class ServiceProcessor(object):
398 424
                                                   cfg_obj_)
399 425
                     LOG.debug("Config %s is existing and is updated" % config)
400 426
 
401
-        return service_worker.id
427
+        return service_component.id, service_worker.id
428
+
429
+    def cleanup(self, service_component_id):
430
+        # clean up the dead service workers
431
+        #  TODO(mrkanag) Make this into thread
432
+        service_workers = \
433
+            db_api.service_worker_get_all_by(
434
+                context,
435
+                service_component_id=service_component_id
436
+            )
437
+
438
+        for srv_wkr in service_workers:
439
+            # TODO(mrkanag) Move this to db layer and query non deleted entries
440
+            if srv_wkr.deleted_at is not None:
441
+                continue
442
+
443
+            if utils.find_status(srv_wkr):
444
+                LOG.info('Service Worker %s is live'
445
+                         % srv_wkr.id)
446
+                continue
447
+            else:
448
+                confs = db_api.config_get_by_name_for_service_worker(
449
+                    self.context,
450
+                    service_worker_id=srv_wkr.id
451
+                )
452
+
453
+                for conf in confs:
454
+                    db_api.config_delete(self.context, conf.id)
455
+                    LOG.debug('Config %s is deleted'
456
+                              % conf.id)
457
+
458
+                db_api.service_worker_delete(self.context, srv_wkr.id)
459
+                LOG.info('Service Worker %s is deleted'
460
+                         % srv_wkr.id)
402 461
 
403 462
 
404 463
 class DriverProcessor(object):

+ 8
- 2
namos/db/sqlalchemy/api.py View File

@@ -48,6 +48,7 @@ def get_backend():
48 48
 def _model_query(context, *args):
49 49
     session = _session(context)
50 50
     query = session.query(*args)
51
+
51 52
     return query
52 53
 
53 54
 
@@ -98,9 +99,14 @@ def _get_all_by(context, cls, **kwargs):
98 99
     return results
99 100
 
100 101
 
101
-def _delete(context, cls, _id):
102
+def _delete(context, cls, _id, soft=True):
102 103
     result = _get(context, cls, _id)
103 104
     if result is not None:
105
+        if soft and hasattr(result, 'soft_delete'):
106
+            result.soft_delete(_session(context))
107
+            return
108
+        # TODO(mrkanag) is it ok to hard delete when soft =True and soft_delete
109
+        # is missing
104 110
         result.delete(_session(context))
105 111
 
106 112
 
@@ -588,7 +594,7 @@ def config_get_by_name_for_service_worker(context,
588 594
         query = query.filter_by(name=name)
589 595
     elif only_configured:
590 596
         query = query.filter(
591
-            models.OsloConfig.value != models.OsloConfig.default_value)
597
+            models.OsloConfig.oslo_config_file_id is not None)
592 598
     return query.all()
593 599
 
594 600
 

+ 10
- 3
namos/db/sqlalchemy/models.py View File

@@ -102,9 +102,10 @@ class Region(BASE,
102 102
     __tablename__ = 'region'
103 103
 
104 104
     # Its of type String to match with keystone region id
105
+    # TODO(mrkanag) make this as non nullable
105 106
     keystone_region_id = sqlalchemy.Column(
106 107
         sqlalchemy.String(255),
107
-        nullable=False)
108
+        nullable=True)
108 109
 
109 110
 
110 111
 class Device(BASE,
@@ -211,9 +212,10 @@ class Service(BASE,
211 212
               Extra):
212 213
     __tablename__ = 'service'
213 214
 
215
+    # TODO(mrkanag) make this as non nullable
214 216
     keystone_service_id = sqlalchemy.Column(
215 217
         Uuid,
216
-        nullable=False)
218
+        nullable=True)
217 219
 
218 220
 
219 221
 class ServiceNode(BASE,
@@ -264,7 +266,7 @@ class ServiceWorker(BASE,
264 266
     __tablename__ = 'service_worker'
265 267
 
266 268
     __table_args__ = (
267
-        UniqueConstraint("host", "service_component_id"),
269
+        UniqueConstraint("pid", "service_component_id"),
268 270
     )
269 271
 
270 272
     name = sqlalchemy.Column(sqlalchemy.String(255),
@@ -281,6 +283,11 @@ class ServiceWorker(BASE,
281 283
         sqlalchemy.String(248),
282 284
         nullable=False
283 285
     )
286
+    is_launcher = sqlalchemy.Column(
287
+        sqlalchemy.Boolean,
288
+        nullable=False,
289
+        default=False
290
+    )
284 291
     service_component_id = sqlalchemy.Column(
285 292
         Uuid,
286 293
         sqlalchemy.ForeignKey('service_component.id'),

Loading…
Cancel
Save