re-factored the processor

Change-Id: I4d06b563b5170e72cb5f691fb569c4f77d74539c
This commit is contained in:
Kanagaraj Manickam 2016-04-18 15:55:09 +05:30
parent 1f8b1e2f04
commit e8f5479949
12 changed files with 1091 additions and 2368 deletions

View File

@ -12,3 +12,54 @@ Features
-------- --------
* Automatic discovery of OpenStack deployment architecture * Automatic discovery of OpenStack deployment architecture
How to setup db
----------------
* create the 'namos' db using below command
`create database namos`
* update database.connection in /etc/namos/namos.conf with db username and
password
* Run the below command to sync the namos schema
`namos-manage create_schema`
How to setup namos
------------------
* Assume, namos is cloned at /opt/stack/namos, then run below command to
install namos from this directory.
`sudo python setup.py install`
How to run namos
-----------------
* namos-api - Namos API starts to listen on port 9999. Now it does have support
for keystone authendication
`namos-api`
* namos-manager - Namos backend service, to configured the number of workers,
update os_manager->workers
`namos-manager --config-file=/etc/namos/namos.conf`
NOTE: Before running the namos-manager, please add os-namos agent in the
console scripts of respective service components.
To find the 360 view of OpenStack deployment
--------------------------------------------
Run http://localhost:8888/v1/view_360
It provides 360 degree view under region->service_node in the response. In
addition, gives the current live status of each servicec components.
To find the status of components
--------------------------------
Run the below command
`namos-manage status`
NOTE: This command supports to query status based on given node name, node type
, service and component. To find more details run this command with --help

View File

@ -16,7 +16,7 @@
# under the License. # under the License.
""" """
The Namos Infra Management Service The Namos Manager
""" """
import eventlet import eventlet

View File

@ -21,7 +21,6 @@ from namos.common import exception
from namos.common import utils from namos.common import utils
from namos.db import api from namos.db import api
from namos.db import sample
from namos.db.sqlalchemy import migration from namos.db.sqlalchemy import migration
@ -154,6 +153,7 @@ class DBCommand(object):
migration.history() migration.history()
def demo_data(self): def demo_data(self):
from namos.db import sample
if CONF.command.purge: if CONF.command.purge:
sample.purge_demo_data() sample.purge_demo_data()
else: else:

View File

@ -405,7 +405,7 @@ def _append_opts_json(f, group, namespaces):
f[group][namespace][opt.name]['deprecated'] = [] f[group][namespace][opt.name]['deprecated'] = []
for d in opt.deprecated_opts: for d in opt.deprecated_opts:
f[group][namespace][opt.name]['deprecated'].append( f[group][namespace][opt.name]['deprecated'].append(
(d.group or 'DEFAULT', d.name or opt.dest)) (d.group or 'DEFAULT', d.name or opt.dest))
f[group][namespace][opt.name][ f[group][namespace][opt.name][
'deprecated_for_removal'] = opt.deprecated_for_removal 'deprecated_for_removal'] = opt.deprecated_for_removal

View File

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from namos.common import exception
from namos.common import utils
from namos.db import api as db_api
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class ConfigProcessor(object):
def __init__(self, context, manager, registration_info, service_worker_id):
self.context = context
self.manager = manager
self.registration_info = registration_info
self.service_worker_id = service_worker_id
self.service_component_id = db_api.service_worker_get(
self.context,
self.service_worker_id).service_component_id
sc = db_api.service_component_get(
self.context,
self.service_component_id
)
self.service_node_id = sc.node_id
self.project = db_api.service_get(self.context, sc.service_id).name
def file_to_configs(self, file_content):
import uuid
tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4())
with open(tmp_file_path, 'w') as file:
file.write(file_content)
conf_dict = utils.file_to_configs(tmp_file_path)
import os
os.remove(tmp_file_path)
return conf_dict
def _form_config_name(self, group, key):
return '%s.%s' % (group, key)
def process_config_files(self):
# config file
conf_name_to_file_id = dict()
for cfg_f in self.registration_info['config_file_dict'].keys():
try:
config_file = db_api.config_file_create(
self.context,
dict(name=cfg_f,
file=self.registration_info[
'config_file_dict'][cfg_f],
service_node_id=self.service_node_id))
LOG.info('Oslo config file %s is created' % config_file)
except exception.AlreadyExist:
config_files = \
db_api.config_file_get_by_name_for_service_node(
self.context,
service_node_id=self.service_node_id,
name=cfg_f
)
if len(config_files) == 1:
config_file = \
db_api.config_file_update(
self.context,
config_files[0].id,
dict(file=self.registration_info[
'config_file_dict'][cfg_f]))
LOG.info('Oslo config file %s is existing and is updated'
% config_file)
config_dict = self.file_to_configs(
config_file.file
)
# config file entry
for grp, keys in config_dict.items():
for key, value in keys.items():
# find config schema
cfg_schs = db_api.config_schema_get_by(
context=self.context,
group=grp,
name=key,
project=self.project
)
cfg_sche = None
if len(cfg_schs) == 0:
LOG.debug("[%s] No Config Schema is existing, so "
"no schema is associated for Config Entry "
"%s::%s" %
(self.service_component_id,
grp,
key))
elif len(cfg_schs) > 1:
LOG.debug("[%s] More than one Config Schema is "
"existing, so no schema is associated for "
"Config Entry %s::%s" %
(self.service_component_id,
grp,
key))
else:
cfg_sche = cfg_schs[0]
LOG.debug("[%s] Config Schema %s is existing and is "
"used to associated for Config Entry"
" %s::%s" %
(self.service_component_id,
cfg_sche.id,
grp,
key))
# config file entry
cfg_name = self._form_config_name(grp, key)
cfg_obj_ = dict(
service_component_id=self.service_component_id,
name=cfg_name,
value=value,
oslo_config_schema_id=cfg_sche.id if
cfg_sche else None,
oslo_config_file_id=config_file.id
)
try:
config = db_api.config_file_entry_create(
self.context,
cfg_obj_)
LOG.debug("Config Entry %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_file_entry_get_all_by(
self.context,
service_component_id=cfg_obj_[
'service_component_id'],
oslo_config_file_id=config_file.id,
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_file_entry_update(
self.context,
configs[0].id,
cfg_obj_)
LOG.debug("Config Entry %s is existing and is "
"updated" % config)
conf_name_to_file_id[cfg_name] = config.id
return conf_name_to_file_id
def process_configs(self):
conf_name_to_file_id = self.process_config_files()
# Config
for cfg_obj in self.registration_info['config_list']:
# This format is used by DriverProcessor
cfg_name = self._form_config_name(cfg_obj['group'],
cfg_obj['name'])
if not conf_name_to_file_id.get(cfg_name):
cfg_schm_id = None
cfg_f_entry = None
# find config schema
# ignore the config file_name right now !!, assumed conf unique
# across the service wth given group and name
cfg_schs = db_api.config_schema_get_by(
context=self.context,
group=cfg_obj['group'],
name=cfg_obj['name'],
project=self.project
)
if len(cfg_schs) == 0:
LOG.debug("[%s] No Config Schema is existing, so "
"no schema is associated for Config %s::%s" %
(self.service_worker_id,
cfg_obj['group'],
cfg_obj['name']))
elif len(cfg_schs) > 1:
LOG.debug("[%s] More than one Config Schema is existing, "
"so no schema is associated for Config %s::%s" %
(self.service_worker_id,
cfg_obj['group'],
cfg_obj['name']))
else:
# try:
# cfg_sche = db_api.config_schema_create(
# self.context,
# dict(
# namespace='UNKNOWN-tagged-by-NAMOS',
# default_value=cfg_obj['default_value'],
# type=cfg_obj['type'],
# help=cfg_obj['help'],
# required=cfg_obj['required'],
# secret=cfg_obj['secret'],
# mutable=False,
# group_name=cfg_obj['group'],
# name=cfg_obj['name']
# )
# )
# LOG.info("Config Schema %s is created" % cfg_sche)
# except exception.AlreadyExist:
# cfg_schs = db_api.config_schema_get_by(
# context=self.context,
# group=cfg_obj['group'],
# name=cfg_obj['name'],
# namespace='UNKNOWN-tagged-by-NAMOS'
# )
cfg_sche = cfg_schs[0]
LOG.debug("[%s] Config Schema %s is existing and is used "
"for Config %s::%s" %
(self.service_worker_id,
cfg_sche.id,
cfg_obj['group'],
cfg_obj['name']))
cfg_schm_id = cfg_sche.id
else:
cfg_schm_id = None
cfg_f_entry = conf_name_to_file_id[cfg_name]
cfg_obj_ = dict(
service_worker_id=self.service_worker_id,
name=cfg_name,
value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[
'default_value'],
oslo_config_schema_id=cfg_schm_id,
oslo_config_file_entry_id=cfg_f_entry
)
try:
config = db_api.config_create(self.context, cfg_obj_)
LOG.debug("Config %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_get_by_name_for_service_worker(
self.context,
service_worker_id=cfg_obj_['service_worker_id'],
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_update(self.context,
configs[0].id,
cfg_obj_)
LOG.debug("Config %s is existing and is updated" % config)

View File

@ -20,11 +20,13 @@ from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
from namos.common import config as namos_config from namos.common import config as namos_config
from namos.common import exception
from namos.common import messaging from namos.common import messaging
from namos.common import utils from namos.common import utils
from namos.conductor.config_processor import ConfigProcessor
from namos.conductor.namespace_processor import NamespaceProcessor
from namos.conductor.region_processor import RegionProcessor
from namos.conductor.service_processor import ServiceProcessor
from namos.db import api as db_api from namos.db import api as db_api
from namos.db import openstack_drivers
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -48,6 +50,49 @@ class ConductorManager(object):
RPC_API_VERSION = '1.0' RPC_API_VERSION = '1.0'
TOPIC = namos_config.MESSAGE_QUEUE_CONDUCTOR_TOPIC TOPIC = namos_config.MESSAGE_QUEUE_CONDUCTOR_TOPIC
def _regisgration_ackw(self, context, identification):
client = messaging.get_rpc_client(
topic=self._os_namos_listener_topic(identification),
version=self.RPC_API_VERSION,
exchange=namos_config.PROJECT_NAME)
client.cast(context,
'regisgration_ackw',
identification=identification)
LOG.info("REGISTER [%s] ACK" % identification)
def _os_namos_listener_topic(self, identification):
return 'namos.CONF.%s' % identification
def _ping(self, context, identification):
client = messaging.get_rpc_client(
topic=self._os_namos_listener_topic(identification),
version=self.RPC_API_VERSION,
exchange=namos_config.PROJECT_NAME,
timeout=1)
try:
client.call(context,
'ping_me',
identification=identification)
LOG.info("PING [%s] SUCCESSFUL" % identification)
return True
except: # noqa
LOG.info("PING [%s] FAILED" % identification)
return False
def _update_config_file(self, context, identification, name, content):
client = messaging.get_rpc_client(
topic=self._os_namos_listener_topic(identification),
version=self.RPC_API_VERSION,
exchange=namos_config.PROJECT_NAME,
timeout=2)
client.call(context,
'update_config_file',
identification=identification,
name=name,
content=content)
LOG.info("CONF FILE [%s] UPDATE [%s] DONE" % (name, identification))
@request_context @request_context
def add_region(self, context, region): def add_region(self, context, region):
return db_api.region_create(context, region) return db_api.region_create(context, region)
@ -120,10 +165,10 @@ class ConductorManager(object):
cp.process_configs() cp.process_configs()
# Device Driver processing # Device Driver processing
# TODO(mrkanag) if this to be per service component?? # TODO(mrkanag) if this to be per service component??
dp = DriverProcessor(context, dp = NamespaceProcessor(context,
self, self,
service_worker_id, service_worker_id,
region_id) region_id)
dp.process_drivers() dp.process_drivers()
self._regisgration_ackw(context, self._regisgration_ackw(context,
@ -140,49 +185,6 @@ class ConductorManager(object):
sp.cleanup(service_component_id) sp.cleanup(service_component_id)
return service_worker_id return service_worker_id
def _regisgration_ackw(self, context, identification):
client = messaging.get_rpc_client(
topic=self._os_namos_listener_topic(identification),
version=self.RPC_API_VERSION,
exchange=namos_config.PROJECT_NAME)
client.cast(context,
'regisgration_ackw',
identification=identification)
LOG.info("REGISTER [%s] ACK" % identification)
def _os_namos_listener_topic(self, identification):
return 'namos.CONF.%s' % identification
def _ping(self, context, identification):
client = messaging.get_rpc_client(
topic=self._os_namos_listener_topic(identification),
version=self.RPC_API_VERSION,
exchange=namos_config.PROJECT_NAME,
timeout=1)
try:
client.call(context,
'ping_me',
identification=identification)
LOG.info("PING [%s] SUCCESSFUL" % identification)
return True
except: # noqa
LOG.info("PING [%s] FAILED" % identification)
return False
def _update_config_file(self, context, identification, name, content):
client = messaging.get_rpc_client(
topic=self._os_namos_listener_topic(identification),
version=self.RPC_API_VERSION,
exchange=namos_config.PROJECT_NAME,
timeout=2)
client.call(context,
'update_config_file',
identification=identification,
name=name,
content=content)
LOG.info("CONF FILE [%s] UPDATE [%s] DONE" % (name, identification))
@request_context @request_context
def heart_beat(self, context, identification, dieing=False): def heart_beat(self, context, identification, dieing=False):
try: try:
@ -339,610 +341,3 @@ class ConductorManager(object):
cfg_s.name]['entries'] = cfg_es cfg_s.name]['entries'] = cfg_es
return file_schema return file_schema
class RegionProcessor(object):
def __init__(self,
context,
manager,
registration_info):
self.registration_info = registration_info
self.manager = manager
self.context = context
def process_region(self):
# region
# If region is not provided, make it as belongs to namos's region
if not self.registration_info.get('region_name'):
self.registration_info[
'region_name'] = cfg.CONF.os_namos.region_name
try:
region = db_api.region_create(
self.context,
dict(name=self.registration_info.get('region_name'))
)
LOG.info('Region %s is created' % region)
except exception.AlreadyExist:
region = db_api.region_get_by_name(
self.context,
name=self.registration_info.get('region_name')
)
LOG.info('Region %s is existing' % region)
return region.id
class ServiceProcessor(object):
def __init__(self,
context,
manager,
region_id,
registration_info):
self.registration_info = registration_info
self.manager = manager
self.context = context
self.region_id = region_id
def process_service(self):
# Service Node
try:
# TODO(mrkanag) user proper node name instead of fqdn
node = db_api.service_node_create(
self.context,
dict(name=self.registration_info.get('fqdn'),
fqdn=self.registration_info.get('fqdn'),
region_id=self.region_id,
extra={'ips': self.registration_info.get('ips')}))
LOG.info('Service node %s is created' % node)
except exception.AlreadyExist:
# TODO(mrkanag) is this to be region specifc search
node = db_api.service_node_get_by_name(
self.context,
self.registration_info.get('fqdn'))
LOG.info('Service node %s is existing' % node)
# Service
try:
s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591'
service = db_api.service_create(
self.context,
# TODO(mrkanag) use keystone python client and
# use real service id here
dict(name=self.registration_info.get('project_name'),
keystone_service_id=s_id))
LOG.info('Service %s is created' % service)
except exception.AlreadyExist:
service = db_api.service_get_by_name(
self.context,
self.registration_info.get('project_name'))
LOG.info('Service %s is existing' % service)
# Service Component
try:
service_component = db_api.service_component_create(
self.context,
dict(name=self.registration_info['prog_name'],
node_id=node.id,
service_id=service.id,
type=namos_config.find_type(self.registration_info[
'prog_name'])))
LOG.info('Service Component %s is created' % service_component)
except exception.AlreadyExist:
service_components = \
db_api.service_component_get_all_by_node_for_service(
self.context,
node_id=node.id,
service_id=service.id,
name=self.registration_info['prog_name']
)
if len(service_components) == 1:
service_component = service_components[0]
LOG.info('Service Component %s is existing' %
service_component)
# TODO(mrkanag) what to do when service_components size is > 1
# Service Worker
try:
service_worker = db_api.service_worker_create(
self.context,
# TODO(mrkanag) Fix the name, device driver proper !
dict(name='%s@%s' % (service_component.name,
self.registration_info['pid']),
pid=self.registration_info['identification'],
host=self.registration_info['host'],
service_component_id=service_component.id,
deleted_at=None,
is_launcher=self.registration_info['i_am_launcher']
))
LOG.info('Service Worker %s is created' % service_worker)
except exception.AlreadyExist:
service_worker = db_api.service_worker_get_all_by(
self.context,
pid=self.registration_info['identification'],
service_component_id=service_component.id
)[0]
LOG.info('Service Worker %s is existing' %
service_worker)
return service_component.id, service_worker.id
def cleanup(self, service_component_id):
# clean up the dead service workers
db_api.cleanup(self.context, service_component_id)
class ConfigProcessor(object):
def __init__(self, context, manager, registration_info, service_worker_id):
self.context = context
self.manager = manager
self.registration_info = registration_info
self.service_worker_id = service_worker_id
self.service_component_id = db_api.service_worker_get(
self.context,
self.service_worker_id).service_component_id
sc = db_api.service_component_get(
self.context,
self.service_component_id
)
self.service_node_id = sc.node_id
self.project = db_api.service_get(self.context, sc.service_id).name
def file_to_configs(self, file_content):
import uuid
tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4())
with open(tmp_file_path, 'w') as file:
file.write(file_content)
conf_dict = utils.file_to_configs(tmp_file_path)
import os
os.remove(tmp_file_path)
return conf_dict
def _form_config_name(self, group, key):
return '%s.%s' % (group, key)
def process_config_files(self):
# config file
conf_name_to_file_id = dict()
for cfg_f in self.registration_info['config_file_dict'].keys():
try:
config_file = db_api.config_file_create(
self.context,
dict(name=cfg_f,
file=self.registration_info[
'config_file_dict'][cfg_f],
service_node_id=self.service_node_id))
LOG.info('Oslo config file %s is created' % config_file)
except exception.AlreadyExist:
config_files = \
db_api.config_file_get_by_name_for_service_node(
self.context,
service_node_id=self.service_node_id,
name=cfg_f
)
if len(config_files) == 1:
config_file = \
db_api.config_file_update(
self.context,
config_files[0].id,
dict(file=self.registration_info[
'config_file_dict'][cfg_f]))
LOG.info('Oslo config file %s is existing and is updated'
% config_file)
config_dict = self.file_to_configs(
config_file.file
)
# config file entry
for grp, keys in config_dict.items():
for key, value in keys.items():
# find config schema
cfg_schs = db_api.config_schema_get_by(
context=self.context,
group=grp,
name=key,
project=self.project
)
cfg_sche = None
if len(cfg_schs) == 0:
LOG.debug("[%s] No Config Schema is existing, so "
"no schema is associated for Config Entry "
"%s::%s" %
(self.service_component_id,
grp,
key))
elif len(cfg_schs) > 1:
LOG.debug("[%s] More than one Config Schema is "
"existing, so no schema is associated for "
"Config Entry %s::%s" %
(self.service_component_id,
grp,
key))
else:
cfg_sche = cfg_schs[0]
LOG.debug("[%s] Config Schema %s is existing and is "
"used to associated for Config Entry"
" %s::%s" %
(self.service_component_id,
cfg_sche.id,
grp,
key))
# config file entry
cfg_name = self._form_config_name(grp, key)
cfg_obj_ = dict(
service_component_id=self.service_component_id,
name=cfg_name,
value=value,
oslo_config_schema_id=cfg_sche.id if
cfg_sche else None,
oslo_config_file_id=config_file.id
)
try:
config = db_api.config_file_entry_create(
self.context,
cfg_obj_)
LOG.debug("Config Entry %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_file_entry_get_all_by(
self.context,
service_component_id=cfg_obj_[
'service_component_id'],
oslo_config_file_id=config_file.id,
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_file_entry_update(
self.context,
configs[0].id,
cfg_obj_)
LOG.debug("Config Entry %s is existing and is "
"updated" % config)
conf_name_to_file_id[cfg_name] = config.id
return conf_name_to_file_id
def process_configs(self):
conf_name_to_file_id = self.process_config_files()
# Config
for cfg_obj in self.registration_info['config_list']:
# This format is used by DriverProcessor
cfg_name = self._form_config_name(cfg_obj['group'],
cfg_obj['name'])
if not conf_name_to_file_id.get(cfg_name):
cfg_schm_id = None
cfg_f_entry = None
# find config schema
# ignore the config file_name right now !!, assumed conf unique
# across the service wth given group and name
cfg_schs = db_api.config_schema_get_by(
context=self.context,
group=cfg_obj['group'],
name=cfg_obj['name'],
project=self.project
)
if len(cfg_schs) == 0:
LOG.debug("[%s] No Config Schema is existing, so "
"no schema is associated for Config %s::%s" %
(self.service_worker_id,
cfg_obj['group'],
cfg_obj['name']))
elif len(cfg_schs) > 1:
LOG.debug("[%s] More than one Config Schema is existing, "
"so no schema is associated for Config %s::%s" %
(self.service_worker_id,
cfg_obj['group'],
cfg_obj['name']))
else:
# try:
# cfg_sche = db_api.config_schema_create(
# self.context,
# dict(
# namespace='UNKNOWN-tagged-by-NAMOS',
# default_value=cfg_obj['default_value'],
# type=cfg_obj['type'],
# help=cfg_obj['help'],
# required=cfg_obj['required'],
# secret=cfg_obj['secret'],
# mutable=False,
# group_name=cfg_obj['group'],
# name=cfg_obj['name']
# )
# )
# LOG.info("Config Schema %s is created" % cfg_sche)
# except exception.AlreadyExist:
# cfg_schs = db_api.config_schema_get_by(
# context=self.context,
# group=cfg_obj['group'],
# name=cfg_obj['name'],
# namespace='UNKNOWN-tagged-by-NAMOS'
# )
cfg_sche = cfg_schs[0]
LOG.debug("[%s] Config Schema %s is existing and is used "
"for Config %s::%s" %
(self.service_worker_id,
cfg_sche.id,
cfg_obj['group'],
cfg_obj['name']))
cfg_schm_id = cfg_sche.id
else:
cfg_schm_id = None
cfg_f_entry = conf_name_to_file_id[cfg_name]
# config_file_entry_id = None
# for f_id, conf_groups in conf_name_to_file_id.items():
# if cfg_obj['group'] in list(conf_groups):
# if cfg_obj['name'] in list(conf_groups[cfg_obj[
# 'group']]):
# config_entrys=db_api.config_file_entry_get_all_by(
# self.context,
# service_component_id=self.service_component_id,
# oslo_config_file_id=f_id,
# name=cfg_name)
# if len(config_entrys) == 1:
# config_file_entry_id = config_entrys[0].id
#
# break
cfg_obj_ = dict(
service_worker_id=self.service_worker_id,
name=cfg_name,
value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[
'default_value'],
oslo_config_schema_id=cfg_schm_id,
oslo_config_file_entry_id=cfg_f_entry
)
try:
config = db_api.config_create(self.context, cfg_obj_)
LOG.debug("Config %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_get_by_name_for_service_worker(
self.context,
service_worker_id=cfg_obj_['service_worker_id'],
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_update(self.context,
configs[0].id,
cfg_obj_)
LOG.debug("Config %s is existing and is updated" % config)
class DriverProcessor(object):
def __init__(self, context, manager, service_worker_id, region_id):
self.context = context
self.manager = manager
self.service_worker_id = service_worker_id
self.region_id = region_id
self.config_dict = self._get_config_dict()
def _get_config_dict(self):
conf_dict = {}
for c in db_api.config_get_by_name_for_service_worker(
self.context,
self.service_worker_id
):
conf_dict[c.name] = c.to_dict()
return conf_dict
def _identify_drivers(self):
return (set(openstack_drivers.get_drivers_config().keys()) &
set(self.config_dict.keys()))
def _get_value(self, name):
if name is None:
return name
if isinstance(name, str):
# Constant naming
if name[0] == '#':
return name[1:]
return (self.config_dict[name].get('value'))
elif isinstance(name, tuple):
fn = name[0]
args = list()
for var in name[1:]:
args.append(self._get_value(var))
return fn(*args)
elif isinstance(name, list):
fmt_str = name[0]
params = [self._get_value(param) for param in name[1:]]
return fmt_str % tuple(params)
def process_drivers(self):
for driver_key in self._identify_drivers():
try:
drivers = self._get_value(driver_key)
drivers = utils._to_list(drivers)
for driver_name in drivers:
self.process_driver(driver_key, driver_name)
except KeyError: # noqa
# TODO(mrkanag) run namos-manager and restart nova-scheduler
# KeyError: 'libvirt.virt_type' is thrown, fix it
LOG.error('Failed to process driver %s in service worker %s' %
(driver_key, self.service_worker_id))
continue
def process_driver(self, driver_key, driver_name):
driver_config = \
openstack_drivers.get_drivers_config()[driver_key][driver_name]
if driver_config.get('alias') is not None:
alias = driver_config.get('alias')
driver_config = \
openstack_drivers.get_drivers_config()
for key in alias.split(':'):
driver_config = driver_config[key]
driver_name = key
driver_def = \
openstack_drivers.get_drivers_def()[driver_name]
connection = dict()
endpoint_type = None
connection_cfg = None
device_endpoint_name = None
device_cfg = None
child_device_cfg = None
if driver_config.get('device') is not None:
device_cfg = driver_config['device']
if driver_config['endpoint'].get('type') is not None:
endpoint_type = driver_config['endpoint']['type']
if endpoint_type[0] != '#':
endpoint_type = self._get_value(endpoint_type)
connection_cfg = driver_config['endpoint'][endpoint_type][
'connection']
device_endpoint_name = self._get_value(
driver_config['endpoint'][endpoint_type]['name'])
# override the device name
if driver_config['endpoint'][endpoint_type].get(
'device') is not None:
device_cfg = driver_config['endpoint'][endpoint_type][
'device']
if driver_config['endpoint'][endpoint_type].get(
'child_device') is not None:
child_device_cfg = driver_config['endpoint'][
endpoint_type]['child_device']
else:
endpoint_type = None
connection_cfg = driver_config['endpoint']['connection']
device_endpoint_name = self._get_value(
driver_config['endpoint']['name']
)
# override the device name
if driver_config['endpoint'].get('device') is not None:
device_cfg = driver_config['endpoint']['device']
if driver_config['endpoint'].get('child_device') is not None:
child_device_cfg = driver_config['endpoint'][
'child_device']
# Device
device_name = self._get_value(device_cfg['name'])
try:
# TODO(mrkanag) Set the right status
device = db_api.device_create(
self.context,
dict(name=device_name,
status='active',
region_id=self.region_id))
LOG.info('Device %s is created' % device)
except exception.AlreadyExist:
device = db_api.device_get_by_name(
self.context,
device_name)
LOG.info('Device %s is existing' % device)
# TODO(mrkanag) Poperly Handle child devices
if child_device_cfg is not None:
for d_name in self._get_value(child_device_cfg['key']):
base_name = self._get_value(child_device_cfg['base_name'])
d_name = '%s-%s' % (base_name, d_name)
try:
device = db_api.device_get_by_name(
self.context,
d_name)
LOG.info('Device %s is existing' % device)
except exception.DeviceNotFound:
# TODO(mrkanag) region_id is hard-coded, fix it !
# Set the right status as well
r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0'
device = db_api.device_create(
self.context,
dict(name=d_name,
status='active',
parent_id=device.id,
region_id=r_id))
LOG.info('Device %s is created' % device)
# Device Endpoint
try:
for k, v in connection_cfg.iteritems():
connection[k] = self._get_value(k)
device_endpoint = db_api.device_endpoint_create(
self.context,
dict(name=device_endpoint_name,
connection=connection,
type=endpoint_type,
device_id=device.id))
LOG.info('Device Endpoint %s is created' % device_endpoint)
except exception.AlreadyExist:
device_endpoints = db_api.device_endpoint_get_by_device_type(
self.context,
device_id=device.id,
type=endpoint_type,
name=device_endpoint_name)
if len(device_endpoints) >= 1:
device_endpoint = device_endpoints[0]
LOG.info('Device Endpoint %s is existing' %
device_endpoints[0])
# Device Driver Class
try:
device_driver_class = db_api.device_driver_class_create(
self.context,
dict(name=driver_name,
python_class=driver_name,
type=driver_def['type'],
device_id=device.id,
endpoint_id=device_endpoint.id,
service_worker_id=self.service_worker_id,
extra=driver_def.get('extra')))
LOG.info('Device Driver Class %s is created' %
device_driver_class)
except exception.AlreadyExist:
device_driver_class = db_api.device_driver_class_get_by_name(
self.context,
driver_name)
LOG.info('Device Driver Class %s is existing' %
device_driver_class)
# Device Driver
try:
device_driver = db_api.device_driver_create(
self.context,
dict(device_id=device.id,
name=driver_name,
endpoint_id=device_endpoint.id,
device_driver_class_id=device_driver_class.id,
service_worker_id=self.service_worker_id)
)
LOG.info('Device Driver %s is created' %
device_driver)
except exception.AlreadyExist:
device_drivers = \
db_api.device_driver_get_by_device_endpoint_service_worker(
self.context,
device_id=device.id,
endpoint_id=device_endpoint.id,
device_driver_class_id=device_driver_class.id,
service_worker_id=self.service_worker_id
)
if len(device_drivers) >= 1:
device_driver = device_drivers[0]
LOG.info('Device Driver %s is existing' %
device_driver)
if __name__ == '__main__':
print (DriverProcessor(None, None)._to_list("[\"file\', \'http\']"))

View File

@ -0,0 +1,252 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from namos.common import exception
from namos.common import utils
from namos.db import api as db_api
from namos.db import openstack_drivers
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class NamespaceProcessor(object):
# TODO(mrkanag) check Fuel driver at
# http://docs.openstack.org/mitaka/config-reference/content/
# hpe-3par-driver.html
def __init__(self, context, manager, service_worker_id, region_id):
self.context = context
self.manager = manager
self.service_worker_id = service_worker_id
self.region_id = region_id
self.config_dict = self._get_config_dict()
def _get_config_dict(self):
conf_dict = {}
for c in db_api.config_get_by_name_for_service_worker(
self.context,
self.service_worker_id
):
conf_dict[c.name] = c.to_dict()
return conf_dict
def _identify_drivers(self):
return (set(openstack_drivers.get_drivers_config().keys()) &
set(self.config_dict.keys()))
def _get_value(self, name):
if name is None:
return name
if isinstance(name, str):
# Constant naming
if name[0] == '#':
return name[1:]
return (self.config_dict[name].get('value'))
elif isinstance(name, tuple):
fn = name[0]
args = list()
for var in name[1:]:
args.append(self._get_value(var))
return fn(*args)
elif isinstance(name, list):
fmt_str = name[0]
params = [self._get_value(param) for param in name[1:]]
return fmt_str % tuple(params)
def process_drivers(self):
for driver_key in self._identify_drivers():
try:
drivers = self._get_value(driver_key)
drivers = utils._to_list(drivers)
for driver_name in drivers:
self.process_driver(driver_key, driver_name)
except KeyError: # noqa
# TODO(mrkanag) run namos-manager and restart nova-scheduler
# KeyError: 'libvirt.virt_type' is thrown, fix it
LOG.error('Failed to process driver %s in service worker %s' %
(driver_key, self.service_worker_id))
continue
def process_driver(self, driver_key, driver_name):
driver_config = \
openstack_drivers.get_drivers_config()[driver_key][driver_name]
if driver_config.get('alias') is not None:
alias = driver_config.get('alias')
driver_config = \
openstack_drivers.get_drivers_config()
for key in alias.split(':'):
driver_config = driver_config[key]
driver_name = key
driver_def = \
openstack_drivers.get_drivers_def()[driver_name]
connection = dict()
endpoint_type = None
connection_cfg = None
device_endpoint_name = None
device_cfg = None
child_device_cfg = None
if driver_config.get('device') is not None:
device_cfg = driver_config['device']
if driver_config['endpoint'].get('type') is not None:
endpoint_type = driver_config['endpoint']['type']
if endpoint_type[0] != '#':
endpoint_type = self._get_value(endpoint_type)
connection_cfg = driver_config['endpoint'][endpoint_type][
'connection']
device_endpoint_name = self._get_value(
driver_config['endpoint'][endpoint_type]['name'])
# override the device name
if driver_config['endpoint'][endpoint_type].get(
'device') is not None:
device_cfg = driver_config['endpoint'][endpoint_type][
'device']
if driver_config['endpoint'][endpoint_type].get(
'child_device') is not None:
child_device_cfg = driver_config['endpoint'][
endpoint_type]['child_device']
else:
endpoint_type = None
connection_cfg = driver_config['endpoint']['connection']
device_endpoint_name = self._get_value(
driver_config['endpoint']['name']
)
# override the device name
if driver_config['endpoint'].get('device') is not None:
device_cfg = driver_config['endpoint']['device']
if driver_config['endpoint'].get('child_device') is not None:
child_device_cfg = driver_config['endpoint'][
'child_device']
# Device
device_name = self._get_value(device_cfg['name'])
try:
# TODO(mrkanag) Set the right status
device = db_api.device_create(
self.context,
dict(name=device_name,
status='active',
region_id=self.region_id))
LOG.info('Device %s is created' % device)
except exception.AlreadyExist:
device = db_api.device_get_by_name(
self.context,
device_name)
LOG.info('Device %s is existing' % device)
# TODO(mrkanag) Poperly Handle child devices
if child_device_cfg is not None:
for d_name in self._get_value(child_device_cfg['key']):
base_name = self._get_value(child_device_cfg['base_name'])
d_name = '%s-%s' % (base_name, d_name)
try:
device = db_api.device_get_by_name(
self.context,
d_name)
LOG.info('Device %s is existing' % device)
except exception.DeviceNotFound:
# TODO(mrkanag) region_id is hard-coded, fix it !
# Set the right status as well
r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0'
device = db_api.device_create(
self.context,
dict(name=d_name,
status='active',
parent_id=device.id,
region_id=r_id))
LOG.info('Device %s is created' % device)
# Device Endpoint
try:
for k, v in connection_cfg.iteritems():
connection[k] = self._get_value(k)
device_endpoint = db_api.device_endpoint_create(
self.context,
dict(name=device_endpoint_name,
connection=connection,
type=endpoint_type,
device_id=device.id))
LOG.info('Device Endpoint %s is created' % device_endpoint)
except exception.AlreadyExist:
device_endpoints = db_api.device_endpoint_get_by_device_type(
self.context,
device_id=device.id,
type=endpoint_type,
name=device_endpoint_name)
if len(device_endpoints) >= 1:
device_endpoint = device_endpoints[0]
LOG.info('Device Endpoint %s is existing' %
device_endpoints[0])
# Device Driver Class
try:
device_driver_class = db_api.device_driver_class_create(
self.context,
dict(name=driver_name,
python_class=driver_name,
type=driver_def['type'],
device_id=device.id,
endpoint_id=device_endpoint.id,
service_worker_id=self.service_worker_id,
extra=driver_def.get('extra')))
LOG.info('Device Driver Class %s is created' %
device_driver_class)
except exception.AlreadyExist:
device_driver_class = db_api.device_driver_class_get_by_name(
self.context,
driver_name)
LOG.info('Device Driver Class %s is existing' %
device_driver_class)
# Device Driver
try:
device_driver = db_api.device_driver_create(
self.context,
dict(device_id=device.id,
name=driver_name,
endpoint_id=device_endpoint.id,
device_driver_class_id=device_driver_class.id,
service_worker_id=self.service_worker_id)
)
LOG.info('Device Driver %s is created' %
device_driver)
except exception.AlreadyExist:
device_drivers = \
db_api.device_driver_get_by_device_endpoint_service_worker(
self.context,
device_id=device.id,
endpoint_id=device_endpoint.id,
device_driver_class_id=device_driver_class.id,
service_worker_id=self.service_worker_id
)
if len(device_drivers) >= 1:
device_driver = device_drivers[0]
LOG.info('Device Driver %s is existing' %
device_driver)

View File

@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from namos.common import exception
from namos.db import api as db_api
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class RegionProcessor(object):
def __init__(self,
context,
manager,
registration_info):
self.registration_info = registration_info
self.manager = manager
self.context = context
def process_region(self):
# region
# If region is not provided, make it as belongs to namos's region
if not self.registration_info.get('region_name'):
self.registration_info[
'region_name'] = cfg.CONF.os_namos.region_name
try:
region = db_api.region_create(
self.context,
dict(name=self.registration_info.get('region_name'))
)
LOG.info('Region %s is created' % region)
except exception.AlreadyExist:
region = db_api.region_get_by_name(
self.context,
name=self.registration_info.get('region_name')
)
LOG.info('Region %s is existing' % region)
return region.id

View File

@ -0,0 +1,124 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from namos.common import config as namos_config
from namos.common import exception
from namos.db import api as db_api
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class ServiceProcessor(object):
def __init__(self,
context,
manager,
region_id,
registration_info):
self.registration_info = registration_info
self.manager = manager
self.context = context
self.region_id = region_id
def process_service(self):
# Service Node
try:
# TODO(mrkanag) user proper node name instead of fqdn
node = db_api.service_node_create(
self.context,
dict(name=self.registration_info.get('fqdn'),
fqdn=self.registration_info.get('fqdn'),
region_id=self.region_id,
extra={'ips': self.registration_info.get('ips')}))
LOG.info('Service node %s is created' % node)
except exception.AlreadyExist:
# TODO(mrkanag) is this to be region specifc search
node = db_api.service_node_get_by_name(
self.context,
self.registration_info.get('fqdn'))
LOG.info('Service node %s is existing' % node)
# Service
try:
s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591'
service = db_api.service_create(
self.context,
# TODO(mrkanag) use keystone python client and
# use real service id here
dict(name=self.registration_info.get('project_name'),
keystone_service_id=s_id))
LOG.info('Service %s is created' % service)
except exception.AlreadyExist:
service = db_api.service_get_by_name(
self.context,
self.registration_info.get('project_name'))
LOG.info('Service %s is existing' % service)
# Service Component
try:
service_component = db_api.service_component_create(
self.context,
dict(name=self.registration_info['prog_name'],
node_id=node.id,
service_id=service.id,
type=namos_config.find_type(self.registration_info[
'prog_name'])))
LOG.info('Service Component %s is created' % service_component)
except exception.AlreadyExist:
service_components = \
db_api.service_component_get_all_by_node_for_service(
self.context,
node_id=node.id,
service_id=service.id,
name=self.registration_info['prog_name']
)
if len(service_components) == 1:
service_component = service_components[0]
LOG.info('Service Component %s is existing' %
service_component)
# TODO(mrkanag) what to do when service_components size is > 1
# Service Worker
try:
service_worker = db_api.service_worker_create(
self.context,
# TODO(mrkanag) Fix the name, device driver proper !
dict(name='%s@%s' % (service_component.name,
self.registration_info['pid']),
pid=self.registration_info['identification'],
host=self.registration_info['host'],
service_component_id=service_component.id,
deleted_at=None,
is_launcher=self.registration_info['i_am_launcher']
))
LOG.info('Service Worker %s is created' % service_worker)
except exception.AlreadyExist:
service_worker = db_api.service_worker_get_all_by(
self.context,
pid=self.registration_info['identification'],
service_component_id=service_component.id
)[0]
LOG.info('Service Worker %s is existing' %
service_worker)
return service_component.id, service_worker.id
def cleanup(self, service_component_id):
# clean up the dead service workers
db_api.cleanup(self.context, service_component_id)

File diff suppressed because it is too large Load Diff

View File

@ -15,329 +15,329 @@
from namos.db import api from namos.db import api
REGION_LIST = [ REGION_LIST = [
{'f7dcd175-27ef-46b5-997f-e6e572f320af': # {'f7dcd175-27ef-46b5-997f-e6e572f320af':
{'name': 'RegionOne', # {'name': 'RegionOne',
'keystone_region_id': 'region_one', # 'keystone_region_id': 'region_one',
'extra': {'location': 'bangalore'}} # 'extra': {'location': 'bangalore'}}
}, # },
{'f7dcd175-27ef-46b5-997f-e6e572f320b0': # {'f7dcd175-27ef-46b5-997f-e6e572f320b0':
{'name': 'RegionTwo', # {'name': 'RegionTwo',
'keystone_region_id': 'region_two', # 'keystone_region_id': 'region_two',
'extra': {'location': 'chennai'}} # 'extra': {'location': 'chennai'}}
} # }
] ]
DEVICE_LIST = [ DEVICE_LIST = [
# vCenter # vCenter
{'91007d3c-9c95-40c5-8f94-c7b071f9b577': # {'91007d3c-9c95-40c5-8f94-c7b071f9b577':
{ # {
'name': 'Vmware_vCenter_1', # 'name': 'Vmware_vCenter_1',
'display_name': 'VMWare vCenter 1', # 'display_name': 'VMWare vCenter 1',
'description': 'vCenter 5.0', # 'description': 'vCenter 5.0',
'status': 'active', # 'status': 'active',
'extra': {'owner': 'mkr1481@namos.com'}, # 'extra': {'owner': 'mkr1481@namos.com'},
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
}, # },
# Clusters # # Clusters
{'d468ea2e-74f6-4a55-a7f4-a56d18e91c66': # {'d468ea2e-74f6-4a55-a7f4-a56d18e91c66':
{ # {
'name': 'vmware_vc_Cluster_1', # 'name': 'vmware_vc_Cluster_1',
'display_name': 'VMWare vCenter 1 Cluster 1', # 'display_name': 'VMWare vCenter 1 Cluster 1',
'description': 'Cluster 1 having 3 hosts', # 'description': 'Cluster 1 having 3 hosts',
'status': 'active', # 'status': 'active',
'extra': {'owner': 'mkr1481@namos.com', # 'extra': {'owner': 'mkr1481@namos.com',
'vcpus': 1000, # 'vcpus': 1000,
'ram_in_gb': 1024}, # 'ram_in_gb': 1024},
'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
}, # },
{'6c97f476-8e27-4e21-8528-a5ec236306f3': # {'6c97f476-8e27-4e21-8528-a5ec236306f3':
{'name': 'vmware_vc_Cluster_2', # {'name': 'vmware_vc_Cluster_2',
'display_name': 'VMWare vCenter 1 Cluster 2', # 'display_name': 'VMWare vCenter 1 Cluster 2',
'description': 'Cluster 2 having 5 hosts', # 'description': 'Cluster 2 having 5 hosts',
'status': 'active', # 'status': 'active',
'extra': {'owner': 'mkr1481@namos.com'}, # 'extra': {'owner': 'mkr1481@namos.com'},
'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
}, # },
# Datastores # # Datastores
{'fdab6c51-38fb-4fb1-a76f-9c243a8b8296': # {'fdab6c51-38fb-4fb1-a76f-9c243a8b8296':
{'name': 'Vmware_vCenter_1_datastore_1', # {'name': 'Vmware_vCenter_1_datastore_1',
'display_name': 'VMWare vCenter 1 datastore 1', # 'display_name': 'VMWare vCenter 1 datastore 1',
'description': 'vCenter 5.0 Datastore created from FC', # 'description': 'vCenter 5.0 Datastore created from FC',
'status': 'active', # 'status': 'active',
'extra': {'owner': 'mkr1481@namos.com', # 'extra': {'owner': 'mkr1481@namos.com',
'size_in_gb': '102400'}, # 'size_in_gb': '102400'},
'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
}, # },
{'05b935b3-942c-439c-a6a4-9c3c73285430': # {'05b935b3-942c-439c-a6a4-9c3c73285430':
{'name': 'Vmware_vCenter_1_datastore_2', # {'name': 'Vmware_vCenter_1_datastore_2',
'display_name': 'VMWare vCenter 1 datastore 2', # 'display_name': 'VMWare vCenter 1 datastore 2',
'description': 'vCenter 5.0 Datastore created from FC', # 'description': 'vCenter 5.0 Datastore created from FC',
'status': 'active', # 'status': 'active',
'extra': {'owner': 'mkr1481@namos.com', # 'extra': {'owner': 'mkr1481@namos.com',
'size_in_gb': '10240'}, # 'size_in_gb': '10240'},
'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
}, # },
# Switch # # Switch
{'f062556b-45c4-417d-80fa-4283b9c58da3': # {'f062556b-45c4-417d-80fa-4283b9c58da3':
{'name': 'Vmware_vCenter_1_switch_1', # {'name': 'Vmware_vCenter_1_switch_1',
'display_name': 'VMWare vCenter 1 Dist. vSwitch 1', # 'display_name': 'VMWare vCenter 1 Dist. vSwitch 1',
'description': 'vCenter 5.0 distributed virtual switch', # 'description': 'vCenter 5.0 distributed virtual switch',
'status': 'active', # 'status': 'active',
'extra': {'owner': 'mkr1481@namos.com'}, # 'extra': {'owner': 'mkr1481@namos.com'},
'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'}
} # }
] ]
ENDPOINT_LIST = [ ENDPOINT_LIST = [
{'7403bf80-9376-4081-89ee-d2501661ca84':{ # {'7403bf80-9376-4081-89ee-d2501661ca84':{
'name': 'vcenter1_connection', # 'name': 'vcenter1_connection',
'connection': {'host_ip': '10.1.1.3', # 'connection': {'host_ip': '10.1.1.3',
'host_port': 443, # 'host_port': 443,
'host_username': 'adminstrator', # 'host_username': 'adminstrator',
'host_password': 'password'}, # 'host_password': 'password'},
'device_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577' # 'device_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577'
}} # }}
] ]
DEVICE_DRIVER_CLASS_LIST = [ DEVICE_DRIVER_CLASS_LIST = [
{'0664e8c0-ff02-427e-8fa3-8788c017ad84': { # {'0664e8c0-ff02-427e-8fa3-8788c017ad84': {
'python_class': 'nova...vcdriver', # 'python_class': 'nova...vcdriver',
'type': 'compute', # 'type': 'compute',
'vendor': 'vmware-community' # 'vendor': 'vmware-community'
}}, # }},
{'11caf99c-f820-4266-a461-5a15437a8144': { # {'11caf99c-f820-4266-a461-5a15437a8144': {
'python_class': 'cinder...vmdkdriver', # 'python_class': 'cinder...vmdkdriver',
'type': 'volume', # 'type': 'volume',
'vendor': 'vmware-community' # 'vendor': 'vmware-community'
}}, # }},
{'bb99ea96-fe6b-49e6-a761-faea92b79f75': { # {'bb99ea96-fe6b-49e6-a761-faea92b79f75': {
'python_class': 'neutron...nsxdriver', # 'python_class': 'neutron...nsxdriver',
'type': 'network', # 'type': 'network',
'vendor': 'vmware-community' # 'vendor': 'vmware-community'
}} # }}
] ]
DEVICE_DRIVER_LIST = [ DEVICE_DRIVER_LIST = [
# nova # # nova
{'3c089cdb-e1d5-4182-9a8e-cef9899fd7e5':{ # {'3c089cdb-e1d5-4182-9a8e-cef9899fd7e5':{
'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
'device_driver_class_id':'0664e8c0-ff02-427e-8fa3-8788c017ad84', # 'device_driver_class_id': '0664e8c0-ff02-427e-8fa3-8788c017ad84',
'device_id': 'd468ea2e-74f6-4a55-a7f4-a56d18e91c66' # 'device_id': 'd468ea2e-74f6-4a55-a7f4-a56d18e91c66'
}}, # }},
# nova # # nova
{'4e0360ae-0728-4bfd-a557-3ad867231787':{ # {'4e0360ae-0728-4bfd-a557-3ad867231787':{
'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
'device_driver_class_id':'0664e8c0-ff02-427e-8fa3-8788c017ad84', # 'device_driver_class_id': '0664e8c0-ff02-427e-8fa3-8788c017ad84',
'device_id': '6c97f476-8e27-4e21-8528-a5ec236306f3' # 'device_id': '6c97f476-8e27-4e21-8528-a5ec236306f3'
}}, # }},
# cinder # # cinder
{'92d5e2c1-511b-4837-a57d-5e6ee723060c':{ # {'92d5e2c1-511b-4837-a57d-5e6ee723060c':{
'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
'device_driver_class_id':'11caf99c-f820-4266-a461-5a15437a8144', # 'device_driver_class_id': '11caf99c-f820-4266-a461-5a15437a8144',
'device_id': 'fdab6c51-38fb-4fb1-a76f-9c243a8b8296' # 'device_id': 'fdab6c51-38fb-4fb1-a76f-9c243a8b8296'
}}, # }},
# cinder # # cinder
{'f3d807a0-eff0-4473-8ae5-594967136e05':{ # {'f3d807a0-eff0-4473-8ae5-594967136e05':{
'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
'python_class_id':'11caf99c-f820-4266-a461-5a15437a8144', # 'python_class_id': '11caf99c-f820-4266-a461-5a15437a8144',
'device_id': '05b935b3-942c-439c-a6a4-9c3c73285430' # 'device_id': '05b935b3-942c-439c-a6a4-9c3c73285430'
}}, # }},
# neutron # # neutron
{'f27eb548-929c-45e2-a2a7-dc123e2a1bc7':{ # {'f27eb548-929c-45e2-a2a7-dc123e2a1bc7':{
'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84',
'python_class_id':'bb99ea96-fe6b-49e6-a761-faea92b79f75', # 'python_class_id': 'bb99ea96-fe6b-49e6-a761-faea92b79f75',
'device_id': 'f062556b-45c4-417d-80fa-4283b9c58da3' # 'device_id': 'f062556b-45c4-417d-80fa-4283b9c58da3'
}} # }}
] ]
SERVICE_LIST =[ SERVICE_LIST = [
{'11367a37-976f-468a-b8dd-77b28ee63cf4': { # {'11367a37-976f-468a-b8dd-77b28ee63cf4': {
'name': 'nova_service', # 'name': 'nova_service',
'keystone_service_id': 'b9c2549f-f685-4bc2-92e9-ba8af9c18599' # 'keystone_service_id': 'b9c2549f-f685-4bc2-92e9-ba8af9c18599'
}}, # }},
{'809e04c1-2f3b-43af-9677-3428a0154216': { # {'809e04c1-2f3b-43af-9677-3428a0154216': {
'name': 'cinder_service', # 'name': 'cinder_service',
'keystone_service_id': '9cc4c374-abb5-4bdc-9129-f0fa4bba0e0b' # 'keystone_service_id': '9cc4c374-abb5-4bdc-9129-f0fa4bba0e0b'
}}, # }},
{'3495fa07-39d9-4d87-9f97-0a582a3e25c3': { # {'3495fa07-39d9-4d87-9f97-0a582a3e25c3': {
'name': 'neutron_service', # 'name': 'neutron_service',
'keystone_service_id': 'b24e2884-75bc-4876-81d1-5b4fb6e92afc' # 'keystone_service_id': 'b24e2884-75bc-4876-81d1-5b4fb6e92afc'
}} # }}
] ]
SERVICE_NODE_LIST = [ SERVICE_NODE_LIST = [
{ # {
'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe': { # 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe': {
'name': 'd_network_node_1', # 'name': 'd_network_node_1',
'fqdn': 'network_node_1.devstack1.abc.com', # 'fqdn': 'network_node_1.devstack1.abc.com',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
} # }
}, # },
{ # {
'4e99a641-dbe9-416e-8c0a-78015dc55a2a': { # '4e99a641-dbe9-416e-8c0a-78015dc55a2a': {
'name': 'd_compute_node_1', # 'name': 'd_compute_node_1',
'fqdn': 'compute_node_1.devstack.abc.com', # 'fqdn': 'compute_node_1.devstack.abc.com',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
} # }
}, # },
{ # {
'b92f4811-7970-421b-a611-d51c62972388': { # 'b92f4811-7970-421b-a611-d51c62972388': {
'name': 'd_cloud-controller-1', # 'name': 'd_cloud-controller-1',
'fqdn': 'cloud_controller_1.devstack1.abc.com', # 'fqdn': 'cloud_controller_1.devstack1.abc.com',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
} # }
}, # },
{ # {
'e5913cd3-a416-40e1-889f-1a1b1c53001c': { # 'e5913cd3-a416-40e1-889f-1a1b1c53001c': {
'name': 'd_storage_node_1', # 'name': 'd_storage_node_1',
'fqdn': 'storage_node_1.devstack.abc.com', # 'fqdn': 'storage_node_1.devstack.abc.com',
'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'
} # }
} # }
] ]
SERVICE_COMPONENT_LIST = [ SERVICE_COMPONENT_LIST = [
# nova # # nova
{ # {
'7259a9ff-2e6f-4e8d-b2fb-a529188825dd': { # '7259a9ff-2e6f-4e8d-b2fb-a529188825dd': {
'name': 'd_nova-compute', # 'name': 'd_nova-compute',
'node_id': '4e99a641-dbe9-416e-8c0a-78015dc55a2a', # 'node_id': '4e99a641-dbe9-416e-8c0a-78015dc55a2a',
'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' # 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
} # }
}, # },
{ # {
'e5e366ea-9029-4ba0-8bbc-f658e642aa54': { # 'e5e366ea-9029-4ba0-8bbc-f658e642aa54': {
'name': 'd_nova-scheduler', # 'name': 'd_nova-scheduler',
'node_id': 'b92f4811-7970-421b-a611-d51c62972388', # 'node_id': 'b92f4811-7970-421b-a611-d51c62972388',
'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' # 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
} # }
}, # },
{ # {
'f7813622-85ee-4588-871d-42c3128fa14f': { # 'f7813622-85ee-4588-871d-42c3128fa14f': {
'name': 'd_nova-api', # 'name': 'd_nova-api',
'node_id': 'b92f4811-7970-421b-a611-d51c62972388', # 'node_id': 'b92f4811-7970-421b-a611-d51c62972388',
'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' # 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4'
} # }
}, # },
# cinder # # cinder
{ # {
'b0e9ac3f-5600-406c-95e4-f698b1eecfc6': { # 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6': {
'name': 'd_cinder-volume', # 'name': 'd_cinder-volume',
'node_id': 'e5913cd3-a416-40e1-889f-1a1b1c53001c', # 'node_id': 'e5913cd3-a416-40e1-889f-1a1b1c53001c',
'service_id': '809e04c1-2f3b-43af-9677-3428a0154216' # 'service_id': '809e04c1-2f3b-43af-9677-3428a0154216'
} # }
}, # },
# neutron # # neutron
{ # {
'54f608bd-fb01-4614-9653-acbb803aeaf7':{ # '54f608bd-fb01-4614-9653-acbb803aeaf7':{
'name': 'd_neutron-agent', # 'name': 'd_neutron-agent',
'node_id': 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe', # 'node_id': 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe',
'service_id': '3495fa07-39d9-4d87-9f97-0a582a3e25c3' # 'service_id': '3495fa07-39d9-4d87-9f97-0a582a3e25c3'
} # }
} # }
] ]
SERVICE_WORKER_LIST = [ SERVICE_WORKER_LIST = [
# cluster-1 # # cluster-1
{ # {
'65dbd695-fa92-4950-b8b4-d46aa0408f6a': { # '65dbd695-fa92-4950-b8b4-d46aa0408f6a': {
'name': 'd_nova-compute-esx-cluster1', # 'name': 'd_nova-compute-esx-cluster1',
'pid': '1233454343', # 'pid': '1233454343',
'host': 'd_nova-compute-esx-cluster1', # 'host': 'd_nova-compute-esx-cluster1',
'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd', # 'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd',
'device_driver_id': '3c089cdb-e1d5-4182-9a8e-cef9899fd7e5' # 'device_driver_id': '3c089cdb-e1d5-4182-9a8e-cef9899fd7e5'
} # }
}, # },
# cluster-2 # # cluster-2
{ # {
'50d2c0c6-741d-4108-a3a2-2090eaa0be37': { # '50d2c0c6-741d-4108-a3a2-2090eaa0be37': {
'name': 'd_nova-compute-esx-cluster2', # 'name': 'd_nova-compute-esx-cluster2',
'pid': '1233454344', # 'pid': '1233454344',
'host': 'd_nova-compute-esx-cluster2', # 'host': 'd_nova-compute-esx-cluster2',
'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd', # 'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd',
'device_driver_id': '4e0360ae-0728-4bfd-a557-3ad867231787' # 'device_driver_id': '4e0360ae-0728-4bfd-a557-3ad867231787'
} # }
}, # },
# datastore-1 # # datastore-1
{ # {
'77e3ee16-fa2b-4e12-ad1c-226971d1a482': { # '77e3ee16-fa2b-4e12-ad1c-226971d1a482': {
'name': 'd_cinder-volume-vmdk-1', # 'name': 'd_cinder-volume-vmdk-1',
'pid': '09878654', # 'pid': '09878654',
'host': 'd_cinder-volume-vmdk-1', # 'host': 'd_cinder-volume-vmdk-1',
'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6', # 'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6',
'device_driver_id': '92d5e2c1-511b-4837-a57d-5e6ee723060c' # 'device_driver_id': '92d5e2c1-511b-4837-a57d-5e6ee723060c'
} # }
}, # },
# datastore-2 # # datastore-2
{ # {
'8633ce68-2b02-4efd-983c-49a460f6d7ef': { # '8633ce68-2b02-4efd-983c-49a460f6d7ef': {
'name': 'd_cinder-volume-vmdk-2', # 'name': 'd_cinder-volume-vmdk-2',
'pid': '4353453', # 'pid': '4353453',
'host': 'd_cinder-volume-vmdk-2', # 'host': 'd_cinder-volume-vmdk-2',
'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6', # 'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6',
'device_driver_id': 'f3d807a0-eff0-4473-8ae5-594967136e05' # 'device_driver_id': 'f3d807a0-eff0-4473-8ae5-594967136e05'
} # }
}, # },
# vswitch # # vswitch
{ # {
'5a3ac5b9-9186-45d8-928c-9e702368dfb4': { # '5a3ac5b9-9186-45d8-928c-9e702368dfb4': {
'name': 'd_neutron-agent', # 'name': 'd_neutron-agent',
'pid': '2359234', # 'pid': '2359234',
'host': 'd_neutron-agent', # 'host': 'd_neutron-agent',
'service_component_id': '54f608bd-fb01-4614-9653-acbb803aeaf7', # 'service_component_id': '54f608bd-fb01-4614-9653-acbb803aeaf7',
'device_driver_id': 'f27eb548-929c-45e2-a2a7-dc123e2a1bc7' # 'device_driver_id': 'f27eb548-929c-45e2-a2a7-dc123e2a1bc7'
} # }
}, # },
] ]
CONFIG_LIST = [ CONFIG_LIST = [
{ # {
'dc6aa02f-ba70-4410-a59c-5e113e629fe5': { # 'dc6aa02f-ba70-4410-a59c-5e113e629fe5': {
'name':'vmware.host_ip', # 'name': 'vmware.host_ip',
'value':'10.1.0.1', # 'value': '10.1.0.1',
'help': 'VMWare vcenter IP address', # 'help': 'VMWare vcenter IP address',
'default':'', # 'default': '',
'type':'String', # 'type': 'String',
'required':True, # 'required':True,
'secret': False, # 'secret': False,
'config_file':'/etc/nova/nova.conf', # 'config_file': '/etc/nova/nova.conf',
'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' # 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
} # }
}, # },
{ # {
'dc6aa02f-ba70-4410-a59c-5e113e629f10': { # 'dc6aa02f-ba70-4410-a59c-5e113e629f10': {
'name':'vmware.host_username', # 'name': 'vmware.host_username',
'value':'Administraotr', # 'value': 'Administraotr',
'help': 'VMWare vcenter Username', # 'help': 'VMWare vcenter Username',
'default':'Administrator', # 'default': 'Administrator',
'type':'String', # 'type': 'String',
'required':True, # 'required':True,
'secret': False, # 'secret': False,
'file':'/etc/nova/nova.conf', # 'file': '/etc/nova/nova.conf',
'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' # 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
} # }
}, # },
{ # {
'dc6aa02f-ba70-4410-a59c-5e113e629f11': { # 'dc6aa02f-ba70-4410-a59c-5e113e629f11': {
'name':'vmware.host_password', # 'name': 'vmware.host_password',
'value':'password', # 'value': 'password',
'help': 'VMWare vcenter password', # 'help': 'VMWare vcenter password',
'default':'', # 'default': '',
'type':'String', # 'type': 'String',
'required':True, # 'required':True,
'secret': True, # 'secret': True,
'file':'/etc/nova/nova.conf', # 'file': '/etc/nova/nova.conf',
'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' # 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a'
}, # },
} # }
] ]

View File

@ -47,6 +47,8 @@ output_file = namos/locale/namos.pot
[entry_points] [entry_points]
console_scripts = console_scripts =
namos-manage = namos.cmd.manage:main namos-manage = namos.cmd.manage:main
namos-api = namos.cmd.api:main
namos-manager = namos.cmd.conductor:main
oslo.config.opts = oslo.config.opts =
namos.common.config = namos.common.config:list_opts namos.common.config = namos.common.config:list_opts