Fixing keystone Configuration

adding keystone_authtoken to config file and remove keystone old
section

Change-Id: I31066305e69b44842746951c2dbbb8801fffd42a
This commit is contained in:
Saad Zaher 2016-01-06 10:22:26 +00:00
parent 59fb6e4120
commit da9fbe0ee7
7 changed files with 173 additions and 70 deletions

View File

@ -116,62 +116,42 @@
#options =
[keystone]
[keystone_authtoken]
#
# From osha
#
# Name used for authentication with the OpenStack Identity service. Defaults to
# env[OS_USERNAME]. (string value)
#os_username =
# Openstack auth URI i.e. http://controller:5000 (string value)
#auth_uri = <None>
# Password used for authentication with the OpenStack Identity service.
# Defaults to env[OS_PASSWORD]. (string value)
#os_password =
# Openstack auth URL i.e. http://controller:35357 (string value)
#auth_url = <None>
# Project name to scope to. Defaults to env[OS_PROJECT_NAME]. (string value)
#os_project_name =
# Openstack auth plugin i.e. ( password, token, ...) password is the only
# available plugin for the time being (string value)
#auth_plugin = <None>
# Domain name containing project. Defaults to env[OS_PROJECT_DOMAIN_NAME].
# (string value)
#os_project_domain_name =
# Openstack Project Domain id, default is Default (string value)
#project_domain_id = Default
# User's domain name. Defaults to env[OS_USER_DOMAIN_NAME]. (string value)
#os_user_domain_name =
# Openstack user Domain id, default is Default (string value)
#user_domain_id = Default
# Tenant to request authorization on. Defaults to env[OS_TENANT_NAME]. (string
# value)
#os_tenant_name =
# Openstack Project Domain name, default is Default (string value)
#project_domain_name = Default
# Tenant to request authorization on. Defaults to env[OS_TENANT_ID]. (string
# value)
#os_tenant_id =
# Openstack user Domain name, default is Default (string value)
#user_domain_name = Default
# Specify the Identity endpoint to use for authentication. Defaults to
# env[OS_AUTH_URL]. (string value)
#os_auth_url =
# Openstack Project Name. (string value)
#project_name = services
# Specify the Freezer backup service endpoint to use. Defaults to
# env[OS_BACKUP_URL]. (string value)
#os_backup_url =
# Openstack username (string value)
#username = <None>
# Specify the region to use. Defaults to env[OS_REGION_NAME]. (string value)
#os_region_name =
# Specify an existing token to use instead of retrieving one via authentication
# (e.g. with username & password). Defaults to env[OS_TOKEN]. (string value)
#os_token =
# Identity API version: 2.0 or 3. Defaults to env[OS_IDENTITY_API_VERSION]
# (string value)
#os_identity_api_version =
# Endpoint type to select. Valid endpoint types: "public" or "publicURL",
# "internal" or "internalURL", "admin" or "adminURL". Defaults to
# env[OS_ENDPOINT_TYPE] or "public" (string value)
# Allowed values: public, publicURL, internal, internalURL, admin, adminURL
#os_endpoint_type = public
# Openstack Password (string value)
#password = <None>
[monitoring]

View File

@ -48,6 +48,7 @@
- id: COMPUTE-0001
ip-addr: 192.168.10.6
hostname: padawan-ccp-comp0001-mgmt
domain-name: padawan-vagrant_cpn-0001 # used for libvirt driver only !
fencer-ip: 192.168.9.6
fencer-password: password
fencer-user: admin
@ -55,6 +56,7 @@
- id: COMPUTE-0002
ip-addr: 192.168.10.7
hostname: padawan-ccp-comp0002-mgmt
domain-name: padawan-vagrant_cpn-0002 # name of VM in kvm
fencer-ip: 192.168.9.7
fencer-password: password
fencer-user: admin

View File

@ -66,6 +66,45 @@ _FENCER = [
'should be in key:value format')
]
_KEYSTONE_AUTH_TOKEN = [
cfg.StrOpt('auth_uri',
help='Openstack auth URI i.e. http://controller:5000',
dest='auth_uri'),
cfg.StrOpt('auth_url',
help='Openstack auth URL i.e. http://controller:35357',
dest='auth_url'),
cfg.StrOpt('auth_plugin',
help='Openstack auth plugin i.e. ( password, token, ...) '
'password is the only available plugin for the time being',
dest='auth_plugin'),
cfg.StrOpt('project_domain_id',
default='Default',
help='Openstack Project Domain id, default is Default',
dest='project_domain_id'),
cfg.StrOpt('user_domain_id',
default='Default',
help='Openstack user Domain id, default is Default',
dest='user_domain_id'),
cfg.StrOpt('project_domain_name',
default='Default',
help='Openstack Project Domain name, default is Default',
dest='project_domain_name'),
cfg.StrOpt('user_domain_name',
default='Default',
help='Openstack user Domain name, default is Default',
dest='user_domain_name'),
cfg.StrOpt('project_name',
default='services',
help='Openstack Project Name.',
dest='project_name'),
cfg.StrOpt('username',
help='Openstack username',
dest='username'),
cfg.StrOpt('password',
help='Openstack Password',
dest='password')
]
def build_os_options():
osclient_opts = [
@ -161,6 +200,14 @@ def configure():
CONF.register_group(fencers_grp)
CONF.register_opts(_FENCER, group='fencer')
# Osha Auth
keystone_grp = cfg.OptGroup('keystone_authtoken',
title='Keystone Auth Options',
help='Openstack Credentials to call the nova '
'APIs to evacuate ')
CONF.register_group(keystone_grp)
CONF.register_opts(_KEYSTONE_AUTH_TOKEN, group='keystone_authtoken')
default_conf = cfg.find_config_files('osha', 'osha',
'.conf')
log.register_options(CONF)
@ -191,7 +238,7 @@ def list_opts():
_OPTS = {
None: _COMMON,
'monitoring': _MONITORS,
'keystone': build_os_options(),
'keystone_authtoken': _KEYSTONE_AUTH_TOKEN,
'fencer': _FENCER
}

View File

@ -105,20 +105,22 @@ class OSClient:
self.authSession = new_sess
evacuated_nodes = []
for node in nodes:
hypervisors = nova.hypervisors.search(node, True)
hypervisors = nova.hypervisors.search(node.get('host'), True)
for hypervisor in hypervisors:
host = {'host': node, 'servers': hypervisor.servers}
host = {'host': node.get('host'), 'servers': hypervisor.servers}
evacuated_nodes.append(host)
for server in hypervisor.servers:
pass
# output = nova.servers.evacuate(server.get('uuid'),
# on_shared_storage=True)
output = nova.servers.evacuate(server.get('uuid'),
on_shared_storage=True)
print output
exit()
return evacuated_nodes
def set_in_maintance(self, nodes):
new_sess = session.Session(auth=self.authSession.auth)
nova = novaclient.Client(session=new_sess,
endpoint_type=self.endpoint_type)
endpoint_type=self.endpoint_type)
self.authSession = new_sess
for node in nodes:
output = []
@ -134,6 +136,25 @@ class OSClient:
auth_session = session.Session(auth=self.authSession.auth)
return auth_session
def get_node_status(self, hostname):
"""
Check the node nova-service status and if it's disabled or not
:param hostname: of the required node
:return: return dict contains the node status if it's disabled or not !
"""
nova = novaclient.Client(session=self.authSession,
endpoint_type=self.endpoint_type)
try:
node = nova.services.find(host=hostname)
print node
except Exception as e:
LOG.error(e)
return False
if not node:
return False
return node.to_dict()
def disable_node(self, hostname):
auth_session = session.Session(auth=self.authSession.auth)
nova = novaclient.Client(session=auth_session,

View File

@ -14,6 +14,7 @@
from oslo_config import cfg
from oslo_log import log
from osha.common.osclient import OSClient
from osha.fencers.common.manager import FencerManager
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@ -29,15 +30,76 @@ class EvacuationManager(object):
5- Get a list of instances running on this node
6- Evacuate :)
"""
def __init__(self):
def __init__(self, nodes=[]):
"""
@todo we cannot get the credentials from monitoring so, we need to get
it from keystone section and we need to review that for code in other
parts
:return:
"""
credentials = CONF.get('monitoring')
credentials = CONF.get('keystone_authtoken')
self.client = OSClient(
authurl=credentials.get('endpoint'),
username=credentials.get()
authurl=credentials.get('auth_url'),
username=credentials.get('username'),
password=credentials.get('password'),
project_name=credentials.get('project_name'),
user_domain_id=credentials.get('user_domain_id'),
project_domain_id=credentials.get('project_domain_id'),
project_domain_name=credentials.get('project_domain_name'),
user_domain_name=credentials.get('user_domain_name')
)
self.nodes = nodes
if not nodes:
raise Exception('No nodes to evacuate ...')
def evacuate(self):
"""
This fn will do the evacuation process ...
:param nodes: List of Failed nodes got from the monitoring system
:return: List of nodes with success or Fail
"""
self.check_nodes_maintenance()
trigger_disable = False
for node in self.nodes:
if not node.get('status'):
trigger_disable = True
break
if trigger_disable:
self._disable_nodes()
self.fence_nodes()
self.list_host_instances()
def _disable_nodes(self):
disabled_nodes = []
for node in self.nodes:
node_status = self.client.get_node_status(hostname=node.get('host'))
if node_status.get('status') == 'enabled':
node['status'] = self.client.disable_node(
hostname=node.get('host'))
else:
node['status'] = True
disabled_nodes.append(node)
self.nodes = disabled_nodes
def check_nodes_maintenance(self):
nodes_status = []
for node in self.nodes:
status = self.client.get_node_status(hostname=node.get('host'))
if status.get('status') == 'enabled':
node['status'] = False
nodes_status.append(node)
else:
node['status'] = True
nodes_status.append(node)
self.nodes = nodes_status
def fence_nodes(self):
fencer = FencerManager(self.nodes)
nodes = fencer.fence()
print nodes
def list_host_instances(self):
self.client.evacuate(self.nodes)

View File

@ -27,15 +27,15 @@ class LibvirtDriver(FencerBaseDriver):
self.connection = libvirt.open(name=conn_name)
def force_shutdown(self):
target = self.connection.lookupByName(name=self.node.get('hostname'))
target = self.connection.lookupByName(name=self.node.get('domain-name'))
return target.destroy()
def graceful_shutdown(self):
target = self.connection.lookupByName(name=self.node.get('hostname'))
target = self.connection.lookupByName(name=self.node.get('domain-name'))
return target.shutdown()
def status(self):
target = self.connection.lookupByName(name=self.node.get('hostname'))
target = self.connection.lookupByName(name=self.node.get('domain-name'))
return target.isActive()
def get_info(self):

View File

@ -15,8 +15,7 @@ from osha.common import config
from oslo_config import cfg
from oslo_log import log
from osha.monitors.common.manager import MonitorManager
from osha.fencers.common.manager import FencerManager
from osha.common.osclient import OSClient
from osha.evacuate import EvacuationManager
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@ -27,16 +26,6 @@ def main():
config.setup_logging()
LOG.info('Starting osha ... ')
# load and initialize the monitoring driver
mon = CONF.get('monitoring')
client = OSClient(
authurl=mon.get('endpoint'),
username=mon.get('username'),
password=mon.get('password'),
**mon.get('kwargs')
)
client.disable_node('padawan-ccp-comp0003-mgmt')
#client.set_in_maintance(['padawan-ccp-comp0003-mgmt'])
exit()
monitor = MonitorManager()
# Do the monitoring procedure
# Monitor, analyse, nodes down ?, wait, double check ? evacuate ..
@ -47,6 +36,8 @@ def main():
# deployments
# Load Fence driver
# Shutdown the node
fencer = FencerManager(nodes)
nodes = fencer.fence()
evacuator = EvacuationManager(nodes)
evacuator.evacuate()
exit()
print "Fenced nodes are", nodes