make compass support os only cluster
Change-Id: Ia6e8d1bde05822a6d8d98018ff0e3126e6a31323
This commit is contained in:
parent
51aac2dd7f
commit
5b969f98cd
@ -66,6 +66,9 @@ flags.add('machines',
|
||||
flags.add('subnets',
|
||||
help='comma seperated subnets',
|
||||
default='')
|
||||
flags.add('adapter_name',
|
||||
help='adapter name',
|
||||
default='')
|
||||
flags.add('adapter_os_pattern',
|
||||
help='adapter os name',
|
||||
default=r'(?i)centos.*')
|
||||
@ -325,43 +328,73 @@ def _get_adapter(client):
|
||||
msg = 'failed to get adapters'
|
||||
raise Exception(msg)
|
||||
|
||||
adapter_name = flags.OPTIONS.adapter_name
|
||||
os_pattern = flags.OPTIONS.adapter_os_pattern
|
||||
os_re = re.compile(os_pattern)
|
||||
if os_pattern:
|
||||
os_re = re.compile(os_pattern)
|
||||
else:
|
||||
os_re = None
|
||||
target_system_pattern = flags.OPTIONS.adapter_target_system_pattern
|
||||
target_system_re = re.compile(target_system_pattern)
|
||||
if target_system_pattern:
|
||||
target_system_re = re.compile(target_system_pattern)
|
||||
else:
|
||||
target_system_re = None
|
||||
flavor_pattern = flags.OPTIONS.adapter_flavor_pattern
|
||||
flavor_re = re.compile(flavor_pattern)
|
||||
if flavor_pattern:
|
||||
flavor_re = re.compile(flavor_pattern)
|
||||
else:
|
||||
flavor_re = None
|
||||
adapter_id = None
|
||||
os_id = None
|
||||
flavor_id = None
|
||||
adapter = None
|
||||
for item in resp:
|
||||
if 'distributed_system_name' not in item:
|
||||
adapter_id = None
|
||||
os_id = None
|
||||
flavor_id = None
|
||||
adapter = item
|
||||
for supported_os in adapter['supported_oses']:
|
||||
if not os_re or os_re.match(supported_os['name']):
|
||||
os_id = supported_os['os_id']
|
||||
break
|
||||
|
||||
if not os_id:
|
||||
logging.info('no os found for adapter %s', adapter)
|
||||
continue
|
||||
if target_system_re.match(item['distributed_system_name']):
|
||||
adapter = item
|
||||
|
||||
if 'flavors' in adapter:
|
||||
for flavor in adapter['flavors']:
|
||||
if not flavor_re or flavor_re.match(flavor['name']):
|
||||
flavor_id = flavor['id']
|
||||
break
|
||||
|
||||
if adapter_name and adapter['name'] == adapter_name:
|
||||
adapter_id = adapter['id']
|
||||
logging.info('adapter name %s match: %s', adapter_name, adapter)
|
||||
elif 'distributed_system_name' in item:
|
||||
if (
|
||||
not target_system_re or
|
||||
target_system_re.match(adapter['distributed_system_name'])
|
||||
):
|
||||
adapter_id = adapter['id']
|
||||
logging.info(
|
||||
'distributed system name pattern %s match: %s',
|
||||
target_system_pattern, adapter
|
||||
)
|
||||
|
||||
if adapter_id:
|
||||
logging.info('adadpter does not match: %s', adapter)
|
||||
break
|
||||
|
||||
if not adapter_id:
|
||||
msg = 'no adapter found for %s' % target_system_pattern
|
||||
msg = 'no adapter found'
|
||||
raise Exception(msg)
|
||||
|
||||
for supported_os in adapter['supported_oses']:
|
||||
if os_re.match(supported_os['name']):
|
||||
os_id = supported_os['os_id']
|
||||
break
|
||||
|
||||
if not os_id:
|
||||
msg = 'no os found for %s' % os_pattern
|
||||
raise Exception(msg)
|
||||
|
||||
for flavor in adapter['flavors']:
|
||||
if flavor_re.match(flavor['name']):
|
||||
flavor_id = flavor['id']
|
||||
break
|
||||
|
||||
if not flavor_id:
|
||||
if flavor_re and not flavor_id:
|
||||
msg = 'no flavor found for %s' % flavor_pattern
|
||||
raise Exception(msg)
|
||||
|
||||
@ -421,14 +454,21 @@ def _add_cluster(client, adapter_id, os_id, flavor_id, machines):
|
||||
|
||||
cluster = resp
|
||||
cluster_id = cluster['id']
|
||||
flavor = cluster['flavor']
|
||||
roles = flavor['roles']
|
||||
if 'flavor' in cluster:
|
||||
flavor = cluster['flavor']
|
||||
else:
|
||||
flavor = None
|
||||
if flavor and 'roles' in flavor:
|
||||
roles = flavor['roles']
|
||||
else:
|
||||
roles = []
|
||||
role_mapping = {}
|
||||
for role in roles:
|
||||
if role.get('optional', False):
|
||||
role_mapping[role['name']] = 1
|
||||
else:
|
||||
role_mapping[role['name']] = 0
|
||||
|
||||
hostnames = [
|
||||
hostname for hostname in flags.OPTIONS.hostnames.split(',')
|
||||
if hostname
|
||||
@ -882,8 +922,8 @@ def _get_installing_progress(client, cluster_id, host_mapping):
|
||||
def _check_dashboard_links(client, cluster_id):
|
||||
dashboard_url = flags.OPTIONS.dashboard_url
|
||||
if not dashboard_url:
|
||||
raise Exception(
|
||||
'no dashboard url set')
|
||||
logging.info('no dashboarde url set')
|
||||
return
|
||||
dashboard_link_pattern = re.compile(
|
||||
flags.OPTIONS.dashboard_link_pattern)
|
||||
r = requests.get(dashboard_url, verify=False)
|
||||
@ -918,8 +958,10 @@ def main():
|
||||
client, host_mapping, subnet_mapping
|
||||
)
|
||||
_set_cluster_os_config(client, cluster_id, host_ips)
|
||||
_set_cluster_package_config(client, cluster_id)
|
||||
_set_hosts_roles(client, cluster_id, host_mapping, role_mapping)
|
||||
if flavor_id:
|
||||
_set_cluster_package_config(client, cluster_id)
|
||||
if role_mapping:
|
||||
_set_hosts_roles(client, cluster_id, host_mapping, role_mapping)
|
||||
_deploy_clusters(client, cluster_id, host_mapping)
|
||||
_get_installing_progress(client, cluster_id, host_mapping)
|
||||
_check_dashboard_links(client, cluster_id)
|
||||
|
@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
/opt/compass/bin/client.py --switch_ips=172.29.8.40 --machines=00:0c:29:a7:ea:4b --subnets=10.145.88.0/23,172.16.0.0/16 --cluster_name=cluster1 --domain=ods.com --default_gateway=10.145.88.1 --service_credentials=image:service=service,compute:service=service,dashboard:service=service,identity:service=service,metering:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service --console_credentials=admin:console=console,compute:console=console,dashboard:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console --hostnames=host1 --host_networks="host1:eth0=10.145.89.201|is_mgmt,eth1=172.16.100.201|is_promiscuous" --partitions="/var=50%,/home=30%" --network_mapping=management=eth0,tenant=eth0,storage=eth0,public=eth1 --host_roles=host1=allinone-compute --dashboard_url=http://10.145.89.201
|
||||
/opt/compass/bin/client.py --switch_ips=172.29.8.40 --machines=00:0c:29:a7:ea:4b --adapter_name=os_only --adapter_flavor_pattern= --subnets=10.145.88.0/23,172.16.0.0/16 --cluster_name=cluster1 --domain=ods.com --default_gateway=10.145.88.1 --service_credentials= --console_credentials= --hostnames=host1 --host_networks="host1:eth0=10.145.89.201|is_mgmt,eth1=172.16.100.201|is_promiscuous" --partitions="/var=50%,/home=30%" --network_mapping= --host_roles= --dashboard_url=
|
||||
|
@ -192,7 +192,8 @@ class ActionHelper(object):
|
||||
cluster_info = cluster_db.get_cluster(user, cluster_id)
|
||||
|
||||
# convert roles retrieved from db into a list of role names
|
||||
roles_info = cluster_info[const.FLAVOR][const.ROLES]
|
||||
roles_info = cluster_info.setdefault(
|
||||
const.FLAVOR, {}).setdefault(const.ROLES, [])
|
||||
cluster_info[const.FLAVOR][const.ROLES] = \
|
||||
ActionHelper._get_role_names(roles_info)
|
||||
|
||||
@ -245,9 +246,7 @@ class ActionHelper(object):
|
||||
info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
|
||||
|
||||
# TODO(grace): Is following line necessary??
|
||||
roles_info = info.setdefault(const.ROLES, [])
|
||||
if not roles_info:
|
||||
raise Exception("Host(id=%d) haven't set any roles!" % host_id)
|
||||
info.setdefault(const.ROLES, [])
|
||||
|
||||
config = cluster_db.get_cluster_host_config(user,
|
||||
cluster_id,
|
||||
|
@ -180,7 +180,16 @@ def update_progress():
|
||||
'is not in adaper_mapping %s',
|
||||
clusterhost, adapter_id, adapter_mapping
|
||||
)
|
||||
continue
|
||||
adapter = adapter_mapping[adapter_id]
|
||||
if 'package_installer' not in adapter:
|
||||
logging.info(
|
||||
'ignore clusterhost %s '
|
||||
'since the package_installer is not define '
|
||||
'in adapter %s',
|
||||
clusterhost, adapter
|
||||
)
|
||||
continue
|
||||
package_installer = adapter['package_installer']
|
||||
clusterhost['package_installer'] = package_installer
|
||||
clusterhost_state = cluster_api.get_clusterhost_self_state(
|
||||
|
@ -53,67 +53,3 @@ def lock(lock_name, blocking=True, timeout=10):
|
||||
logging.debug('released lock %s', lock_name)
|
||||
else:
|
||||
logging.debug('nothing to release %s', lock_name)
|
||||
|
||||
|
||||
"""
|
||||
def update_cluster_hosts(cluster_hosts,
|
||||
cluster_filter=None, host_filter=None):
|
||||
session = database.current_session()
|
||||
os_versions = {}
|
||||
target_systems = {}
|
||||
updated_cluster_hosts = {}
|
||||
clusters = session.query(models.Cluster).all()
|
||||
for cluster in clusters:
|
||||
if cluster_hosts and (
|
||||
cluster.id not in cluster_hosts and
|
||||
str(cluster.id) not in cluster_hosts and
|
||||
cluster.name not in cluster_hosts
|
||||
):
|
||||
logging.debug('ignore cluster %s sinc it is not in %s',
|
||||
cluster.id, cluster_hosts)
|
||||
continue
|
||||
|
||||
adapter = cluster.adapter
|
||||
if not cluster.adapter:
|
||||
logging.error('there is no adapter for cluster %s',
|
||||
cluster.id)
|
||||
continue
|
||||
|
||||
if cluster_filter and not cluster_filter(cluster):
|
||||
logging.debug('filter cluster %s', cluster.id)
|
||||
continue
|
||||
|
||||
updated_cluster_hosts[cluster.id] = []
|
||||
os_versions[cluster.id] = 'CentOS-6.5-x86_64'
|
||||
target_systems[cluster.id] = 'openstack'
|
||||
|
||||
if cluster.id in cluster_hosts:
|
||||
hosts = cluster_hosts[cluster.id]
|
||||
elif str(cluster.id) in cluster_hosts:
|
||||
hosts = cluster_hosts[str(cluster.id)]
|
||||
elif cluster.name in cluster_hosts:
|
||||
hosts = cluster_hosts[cluster.name]
|
||||
else:
|
||||
hosts = []
|
||||
|
||||
if not hosts:
|
||||
hosts = [host.id for host in cluster.hosts]
|
||||
|
||||
for host in cluster.hosts:
|
||||
if (
|
||||
host.id not in hosts and
|
||||
str(host.id) not in hosts and
|
||||
host.hostname not in hosts
|
||||
):
|
||||
logging.debug('ignore host %s which is not in %s',
|
||||
host.id, hosts)
|
||||
continue
|
||||
|
||||
if host_filter and not host_filter(host):
|
||||
logging.debug('filter host %s', host.id)
|
||||
continue
|
||||
|
||||
updated_cluster_hosts[cluster.id].append(host.id)
|
||||
|
||||
return (updated_cluster_hosts, os_versions, target_systems)
|
||||
"""
|
||||
|
@ -889,7 +889,7 @@ def get_clusterhost_deployed_config(session, getter, clusterhost_id, **kwargs):
|
||||
def _update_clusterhost_config(session, updater, clusterhost, **kwargs):
|
||||
from compass.db.api import host as host_api
|
||||
ignore_keys = []
|
||||
if host_api.is_host_editable(
|
||||
if not host_api.is_host_editable(
|
||||
session, clusterhost.host, updater,
|
||||
exception_when_not_editable=False
|
||||
):
|
||||
|
@ -22,10 +22,15 @@ from sqlalchemy import create_engine
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.orm import scoped_session
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy.pool import QueuePool
|
||||
from sqlalchemy.pool import SingletonThreadPool
|
||||
from sqlalchemy.pool import StaticPool
|
||||
from threading import local
|
||||
|
||||
from compass.db import exception
|
||||
from compass.db import models
|
||||
from compass.utils import logsetting
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
@ -34,6 +39,13 @@ SESSION = sessionmaker(autocommit=False, autoflush=False)
|
||||
SCOPED_SESSION = None
|
||||
SESSION_HOLDER = local()
|
||||
|
||||
POOL_MAPPING = {
|
||||
'instant': NullPool,
|
||||
'static': StaticPool,
|
||||
'queued': QueuePool,
|
||||
'thread_single': SingletonThreadPool
|
||||
}
|
||||
|
||||
|
||||
def init(database_url=None):
|
||||
"""Initialize database.
|
||||
@ -46,13 +58,20 @@ def init(database_url=None):
|
||||
database_url = setting.SQLALCHEMY_DATABASE_URI
|
||||
logging.info('init database %s', database_url)
|
||||
root_logger = logging.getLogger()
|
||||
fine_debug = root_logger.isEnabledFor(logging.DEBUG - 1)
|
||||
fine_debug = root_logger.isEnabledFor(logsetting.LOGLEVEL_MAPPING['fine'])
|
||||
if fine_debug:
|
||||
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
|
||||
finest_debug = root_logger.isEnabledFor(
|
||||
logsetting.LOGLEVEL_MAPPING['finest']
|
||||
)
|
||||
if finest_debug:
|
||||
logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
|
||||
logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)
|
||||
logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)
|
||||
poolclass = POOL_MAPPING[setting.SQLALCHEMY_DATABASE_POOL_TYPE]
|
||||
ENGINE = create_engine(
|
||||
database_url, convert_unicode=True,
|
||||
pool_recycle=setting.SQLALCHEMY_DATABASE_POOL_RECYCLE_PERIOD,
|
||||
pool_size=setting.SQLALCHEMY_DATABASE_POOL_SIZE
|
||||
poolclass=poolclass
|
||||
)
|
||||
SESSION.configure(bind=ENGINE)
|
||||
SCOPED_SESSION = scoped_session(SESSION)
|
||||
|
@ -31,7 +31,7 @@ def model_query(session, model):
|
||||
if not issubclass(model, models.BASE):
|
||||
raise exception.DatabaseException("model should be sublass of BASE!")
|
||||
|
||||
return session.query(model)
|
||||
return session.query(model).with_lockmode('update')
|
||||
|
||||
|
||||
def _default_list_condition_func(col_attr, value, condition_func):
|
||||
|
@ -26,8 +26,7 @@ import os.path
|
||||
# default setting
|
||||
CONFIG_DIR = '/etc/compass'
|
||||
SQLALCHEMY_DATABASE_URI = 'sqlite://'
|
||||
SQLALCHEMY_DATABASE_POOL_SIZE = 10
|
||||
SQLALCHEMY_DATABASE_POOL_RECYCLE_PERIOD = 7200
|
||||
SQLALCHEMY_DATABASE_POOL_TYPE = 'static'
|
||||
INSTALLATION_LOGDIR = {
|
||||
'CobblerInstaller': '/var/log/cobbler/anamon',
|
||||
'ChefInstaller': '/var/log/cobbler/anamon'
|
||||
|
@ -5,6 +5,7 @@ DATABASE_PASSWORD = 'root'
|
||||
DATABASE_SERVER = '127.0.0.1:3306'
|
||||
DATABASE_NAME = 'db'
|
||||
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
|
||||
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
|
||||
INSTALLATION_LOGDIR = {
|
||||
'CobblerInstaller': '/var/log/cobbler/anamon',
|
||||
'ChefInstaller': '/var/log/cobbler/anamon'
|
||||
|
@ -19,6 +19,7 @@ export LANGUAGE=${LANGUAGE:-'EN'}
|
||||
export TIMEZONE=${TIMEZONE:-'America/Los_Angeles'}
|
||||
export HOSTNAMES=${HOSTNAMES:-'allinone'}
|
||||
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)centos.*'}
|
||||
export ADAPTER_NAME=${ADAPTER_NAME:=''}
|
||||
export ADAPTER_TARGET_SYSTEM_PATTERN=${ADAPTER_TARGET_SYSTEM_PATTERN:-'openstack.*'}
|
||||
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'allinone'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'allinone=allinone-compute'}
|
||||
|
@ -156,7 +156,7 @@ else
|
||||
POLL_SWITCHES_FLAG="poll_switches"
|
||||
fi
|
||||
|
||||
${CLIENT_SCRIPT} --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" --hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" --adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" --server_credential="${SERVER_CREDENTIAL}" --service_credentials="${SERVICE_CREDENTIALS}" --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" --network_mapping="${NETWORK_MAPPING}" --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" --machines="${machines}" --switch_credential="${SWITCH_CREDENTIAL}" --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
|
||||
${CLIENT_SCRIPT} --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" --hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" --adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" --adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" --server_credential="${SERVER_CREDENTIAL}" --service_credentials="${SERVICE_CREDENTIALS}" --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" --network_mapping="${NETWORK_MAPPING}" --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" --machines="${machines}" --switch_credential="${SWITCH_CREDENTIAL}" --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
|
||||
rc=$?
|
||||
deactivate
|
||||
# Tear down machines after the test
|
||||
|
19
regtest/regtest6.conf
Normal file
19
regtest/regtest6.conf
Normal file
@ -0,0 +1,19 @@
|
||||
# conf to run 1 instance with single-contoller-multi-compute flavor
|
||||
export VIRT_NUM=${VIRT_NUM:-'1'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'10'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'30G'}
|
||||
export HOSTNAMES=${HOSTNAMES:-'osonly'}
|
||||
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source ${REGTEST_DIR}/regtest.conf
|
||||
|
||||
export HOST_ROLES=''
|
||||
export ADAPTER_NAME='os_only'
|
||||
export ADAPTER_TARGET_SYSTEM_PATTERN=''
|
||||
export ADAPTER_FLAVOR_PATTERN=''
|
||||
export SERVICE_CREDENTIALS=''
|
||||
export CONSOLE_CREDENTIALS=''
|
||||
export NETWORK_MAPPING=''
|
||||
export DEFAULT_ROLES=''
|
||||
export DASHBOARD_URL=''
|
||||
|
Loading…
Reference in New Issue
Block a user