Reworking password lookup and config lookup
1. Removing anvil.ini and replacing with component configuration files 2. Merging those component configuration files into the component options 3. Reworking all uses of the previous configuration to use this new config 4. Removing the environment variable searching (for now) and the env writing (for now)
This commit is contained in:
@@ -19,15 +19,19 @@ import collections
|
||||
import copy
|
||||
import functools
|
||||
|
||||
from anvil import cfg
|
||||
from anvil import colorizer
|
||||
from anvil import exceptions as excp
|
||||
from anvil import importer
|
||||
from anvil import log as logging
|
||||
from anvil import packager
|
||||
from anvil import passwords as pw
|
||||
from anvil import phase
|
||||
from anvil import settings
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -41,12 +45,15 @@ class PhaseFunctors(object):
|
||||
class Action(object):
|
||||
__meta__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, distro, cfg, root_dir, name, **kwargs):
|
||||
def __init__(self, name, distro, root_dir, **kwargs):
|
||||
self.distro = distro
|
||||
self.cfg = cfg
|
||||
self.root_dir = root_dir
|
||||
self.name = name
|
||||
self.keep_old = kwargs.get('keep_old', False)
|
||||
self.interpolator = cfg.YamlInterpolator(settings.COMPONENT_CONF_DIR)
|
||||
self.passwords = pw.ProxyPassword()
|
||||
if kwargs.get('prompt_for_passwords'):
|
||||
self.passwords.resolvers.append(pw.InputPassword())
|
||||
self.passwords.resolvers.append(pw.RandomPassword())
|
||||
self.force = kwargs.get('force', False)
|
||||
|
||||
@property
|
||||
@@ -80,7 +87,7 @@ class Action(object):
|
||||
'trace_dir': trace_dir,
|
||||
}
|
||||
|
||||
def _merge_options(self, name, override_opts, base_opts, component_opts, persona_opts):
|
||||
def _merge_options(self, name, base_opts, component_opts, persona_opts):
|
||||
opts = {}
|
||||
opts.update(self._get_component_dirs(name))
|
||||
if base_opts:
|
||||
@@ -89,8 +96,6 @@ class Action(object):
|
||||
opts.update(component_opts)
|
||||
if persona_opts:
|
||||
opts.update(persona_opts)
|
||||
if override_opts:
|
||||
opts.update(override_opts)
|
||||
return opts
|
||||
|
||||
def _merge_subsystems(self, component_subsys, desired_subsys):
|
||||
@@ -119,6 +124,12 @@ class Action(object):
|
||||
opts.update(self._get_component_dirs(name))
|
||||
return opts
|
||||
|
||||
def _get_interp_options(self, name):
|
||||
base = {}
|
||||
for c in ['general', name]:
|
||||
base.update(self.interpolator.extract(c))
|
||||
return base
|
||||
|
||||
def _construct_instances(self, persona):
|
||||
"""
|
||||
Create component objects for each component in the persona.
|
||||
@@ -126,14 +137,10 @@ class Action(object):
|
||||
persona_subsystems = persona.wanted_subsystems or {}
|
||||
persona_opts = persona.component_options or {}
|
||||
instances = {}
|
||||
base_opts = {
|
||||
'keep_old': self.keep_old,
|
||||
}
|
||||
for c in persona.wanted_components:
|
||||
((cls, distro_opts), siblings) = self.distro.extract_component(c, self.lookup_name)
|
||||
LOG.debug("Constructing component %r (%s)", c, utils.obj_name(cls))
|
||||
kvs = {}
|
||||
kvs['runner'] = self
|
||||
kvs['name'] = c
|
||||
kvs['packager_functor'] = functools.partial(packager.get_packager,
|
||||
distro=self.distro)
|
||||
@@ -142,7 +149,9 @@ class Action(object):
|
||||
kvs['instances'] = {}
|
||||
kvs['subsystems'] = {}
|
||||
kvs['siblings'] = {}
|
||||
kvs['options'] = self._get_sibling_options(c, base_opts)
|
||||
kvs['passwords'] = self.passwords
|
||||
kvs['distro'] = self.distro
|
||||
kvs['options'] = self._get_sibling_options(c, self._get_interp_options(c))
|
||||
LOG.debug("Constructing %s siblings:", c)
|
||||
utils.log_object(siblings, logger=LOG, level=logging.DEBUG)
|
||||
LOG.debug("Using params:")
|
||||
@@ -150,7 +159,7 @@ class Action(object):
|
||||
siblings = self._construct_siblings(siblings, dict(kvs))
|
||||
# Now inject the full options
|
||||
kvs['instances'] = instances
|
||||
kvs['options'] = self._merge_options(c, kvs, base_opts,
|
||||
kvs['options'] = self._merge_options(c, self._get_interp_options(c),
|
||||
distro_opts, (persona_opts.get(c) or {}))
|
||||
kvs['subsystems'] = self._merge_subsystems((distro_opts.pop('subsystems', None) or {}),
|
||||
(persona_subsystems.get(c) or {}))
|
||||
|
@@ -16,7 +16,6 @@
|
||||
|
||||
from anvil import action
|
||||
from anvil import colorizer
|
||||
from anvil import env_rc
|
||||
from anvil import log
|
||||
from anvil import settings
|
||||
from anvil import shell as sh
|
||||
@@ -46,13 +45,6 @@ class InstallAction(action.Action):
|
||||
return 'install'
|
||||
|
||||
def _run(self, persona, component_order, instances):
|
||||
# Update/write out the 'bash' env exports file
|
||||
(settings_am, out_fns) = env_rc.write(self,
|
||||
components=[(c, instances[c]) for c in component_order])
|
||||
utils.log_iterable(out_fns,
|
||||
header="Wrote out %s environment 'exports' to the following" % (settings_am),
|
||||
logger=LOG
|
||||
)
|
||||
self._run_phase(
|
||||
PhaseFunctors(
|
||||
start=lambda i: LOG.info('Downloading %s.', colorizer.quote(i.name)),
|
||||
|
268
anvil/cfg.py
268
anvil/cfg.py
@@ -24,14 +24,15 @@ import re
|
||||
# This one keeps comments but has some weirdness with it
|
||||
import iniparse
|
||||
|
||||
import yaml
|
||||
|
||||
from anvil import env
|
||||
from anvil import exceptions as excp
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
ENV_PAT = re.compile(r"^\s*\$\{([\w\d]+):\-(.*)\}\s*$")
|
||||
SUB_MATCH = re.compile(r"(?:\$\(([\w\d]+):([\w\d]+))\)")
|
||||
PW_SECTION = 'passwords'
|
||||
INTERP_PAT = r"\s*\$\(([\w\d]+):([\w\d]+)\)\s*"
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@@ -111,201 +112,74 @@ class RewritableConfigParser(IgnoreMissingMixin, iniparse.RawConfigParser, Strin
|
||||
self.read(f)
|
||||
|
||||
|
||||
class ProxyConfig(object):
|
||||
class YamlInterpolator(object):
|
||||
def __init__(self, base):
|
||||
self.in_progress = {}
|
||||
self.interpolated = {}
|
||||
self.base = base
|
||||
|
||||
def __init__(self):
|
||||
self.read_resolvers = []
|
||||
self.set_resolvers = []
|
||||
self.opts_cache = dict()
|
||||
self.opts_read = dict()
|
||||
self.opts_set = dict()
|
||||
self.pw_resolvers = []
|
||||
def _interpolate_iterable(self, what):
|
||||
n_what = []
|
||||
for v in what:
|
||||
n_what.append(self._interpolate(v))
|
||||
return n_what
|
||||
|
||||
def add_password_resolver(self, resolver):
|
||||
self.pw_resolvers.append(resolver)
|
||||
def _interpolate_dictionary(self, what):
|
||||
n_what = {}
|
||||
for (k, v) in what.iteritems():
|
||||
n_what[k] = self._interpolate(v)
|
||||
return n_what
|
||||
|
||||
def add_read_resolver(self, resolver):
|
||||
self.read_resolvers.append(resolver)
|
||||
|
||||
def add_set_resolver(self, resolver):
|
||||
self.set_resolvers.append(resolver)
|
||||
|
||||
def get_password(self, option, prompt_text='', length=8, **kwargs):
|
||||
password = ''
|
||||
for resolver in self.pw_resolvers:
|
||||
found_password = resolver.get_password(option,
|
||||
prompt_text=prompt_text,
|
||||
length=length, **kwargs)
|
||||
if found_password is not None and len(found_password):
|
||||
password = found_password
|
||||
break
|
||||
if len(password) == 0:
|
||||
LOG.warn("Password provided for %r is empty", option)
|
||||
self.set(PW_SECTION, option, password)
|
||||
return password
|
||||
|
||||
def get(self, section, option):
|
||||
val = self._get(section, option)
|
||||
LOG.debug("Fetched option %r with value %r.",
|
||||
make_id(section, option), val)
|
||||
return val
|
||||
|
||||
def _get(self, section, option):
|
||||
# Try the cache first
|
||||
cache_key = make_id(section, option)
|
||||
if cache_key in self.opts_cache:
|
||||
return self.opts_cache[cache_key]
|
||||
# Check the resolvers
|
||||
val = None
|
||||
for resolver in self.read_resolvers:
|
||||
found_val = resolver.get(section, option)
|
||||
if found_val is not None:
|
||||
val = found_val
|
||||
break
|
||||
# Store in cache if we found something
|
||||
if val is not None:
|
||||
self.opts_cache[cache_key] = val
|
||||
# Mark as read
|
||||
if section not in self.opts_read:
|
||||
self.opts_read[section] = set()
|
||||
self.opts_read[section].add(option)
|
||||
return val
|
||||
|
||||
def getdefaulted(self, section, option, default_value=''):
|
||||
val = self.get(section, option)
|
||||
if not val or not val.strip():
|
||||
return default_value
|
||||
return val
|
||||
|
||||
def getfloat(self, section, option):
|
||||
try:
|
||||
return float(self.get(section, option))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def getint(self, section, option):
|
||||
try:
|
||||
return int(self.get(section, option))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def getboolean(self, section, option):
|
||||
return utils.make_bool(self.getdefaulted(section, option))
|
||||
|
||||
def set(self, section, option, value):
|
||||
for resolver in self.set_resolvers:
|
||||
resolver.set(section, option, value)
|
||||
cache_key = make_id(section, option)
|
||||
self.opts_cache[cache_key] = value
|
||||
if section not in self.opts_set:
|
||||
self.opts_set[section] = set()
|
||||
self.opts_set[section].add(option)
|
||||
return value
|
||||
|
||||
|
||||
class ConfigResolver(object):
|
||||
|
||||
def __init__(self, backing):
|
||||
self.backing = backing
|
||||
|
||||
def get(self, section, option):
|
||||
return self._resolve_value(section, option, self._get_bashed(section, option))
|
||||
|
||||
def set(self, section, option, value):
|
||||
self.backing.set(section, option, value)
|
||||
|
||||
def _resolve_value(self, section, option, value_gotten):
|
||||
if not value_gotten:
|
||||
if section == 'host' and option == 'ip':
|
||||
value_gotten = utils.get_host_ip()
|
||||
return value_gotten
|
||||
|
||||
def _getdefaulted(self, section, option, default_value):
|
||||
val = self.get(section, option)
|
||||
if not val or not val.strip():
|
||||
return default_value
|
||||
return val
|
||||
|
||||
def _get_bashed(self, section, option):
|
||||
value = self.backing.get(section, option)
|
||||
if value is None:
|
||||
return value
|
||||
extracted_val = ''
|
||||
mtch = ENV_PAT.match(value)
|
||||
if mtch:
|
||||
env_key = mtch.group(1).strip()
|
||||
def_val = mtch.group(2).strip()
|
||||
if not def_val and not env_key:
|
||||
msg = "Invalid bash-like value %r" % (value)
|
||||
raise excp.BadParamException(msg)
|
||||
env_value = env.get_key(env_key)
|
||||
if env_value is None:
|
||||
extracted_val = self._resolve_replacements(def_val)
|
||||
else:
|
||||
extracted_val = env_value
|
||||
else:
|
||||
extracted_val = value
|
||||
return extracted_val
|
||||
|
||||
def _resolve_replacements(self, value):
|
||||
|
||||
# Allow for our simple replacement to occur
|
||||
def replacer(match):
|
||||
section = match.group(1)
|
||||
option = match.group(2)
|
||||
# We use the default fetcher here so that we don't try to put in None values...
|
||||
return self._getdefaulted(section, option, '')
|
||||
|
||||
return SUB_MATCH.sub(replacer, value)
|
||||
|
||||
|
||||
class CliResolver(object):
|
||||
|
||||
def __init__(self, cli_args):
|
||||
self.cli_args = cli_args
|
||||
|
||||
def get(self, section, option):
|
||||
return self.cli_args.get(make_id(section, option))
|
||||
|
||||
@classmethod
|
||||
def create(cls, cli_args):
|
||||
parsed_args = dict()
|
||||
for c in cli_args:
|
||||
if not c:
|
||||
continue
|
||||
split_up = c.split("/")
|
||||
if len(split_up) != 3:
|
||||
LOG.warn("Incorrectly formatted cli option: %r", c)
|
||||
else:
|
||||
section = (split_up[0]).strip()
|
||||
if not section or section.lower() == 'default':
|
||||
section = 'DEFAULT'
|
||||
option = split_up[1].strip()
|
||||
if not option:
|
||||
LOG.warn("Badly formatted cli option - no option name: %r", c)
|
||||
else:
|
||||
parsed_args[make_id(section, option)] = split_up[2]
|
||||
return cls(parsed_args)
|
||||
|
||||
|
||||
class EnvResolver(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def _form_key(self, section, option):
|
||||
return make_id(section, option)
|
||||
|
||||
def get(self, section, option):
|
||||
return env.get_key(self._form_key(section, option))
|
||||
|
||||
|
||||
def make_id(section, option):
|
||||
joinwhat = []
|
||||
if section:
|
||||
joinwhat.append(str(section))
|
||||
if option:
|
||||
joinwhat.append(str(option))
|
||||
return "/".join(joinwhat)
|
||||
def _interpolate(self, v):
|
||||
n_v = v
|
||||
if v and isinstance(v, (basestring, str)):
|
||||
n_v = self._interpolate_string(v)
|
||||
elif isinstance(v, dict):
|
||||
n_v = self._interpolate_dictionary(v)
|
||||
elif isinstance(v, (list, set, tuple)):
|
||||
n_v = self._interpolate_iterable(v)
|
||||
return n_v
|
||||
|
||||
|
||||
def _interpolate_string(self, what):
|
||||
if not re.search(INTERP_PAT, what):
|
||||
return what
|
||||
|
||||
def replacer(match):
|
||||
who = match.group(1).strip()
|
||||
key = match.group(2).strip()
|
||||
special_val = self._interpolate_special(who, key)
|
||||
if special_val is not None:
|
||||
return str(special_val)
|
||||
if who in self.interpolated:
|
||||
return str(self.interpolated[who][key])
|
||||
if who in self.in_progress:
|
||||
return str(self.in_progress[who][key])
|
||||
contents = self.extract(who)
|
||||
return str(contents[key])
|
||||
|
||||
return re.sub(INTERP_PAT, replacer, what)
|
||||
|
||||
def _interpolate_special(self, who, key):
|
||||
if key == 'ip' and who == 'auto':
|
||||
return utils.get_host_ip()
|
||||
if key == 'user' and who == 'auto':
|
||||
return sh.getuser()
|
||||
if who == 'auto':
|
||||
raise KeyError("Unknown auto key type %s" % (key))
|
||||
return None
|
||||
|
||||
def extract(self, root):
|
||||
if root in self.interpolated:
|
||||
return self.interpolated[root]
|
||||
pth = sh.joinpths(self.base, "%s.yaml" % (root))
|
||||
if not sh.isfile(pth):
|
||||
return {}
|
||||
self.in_progress[root] = yaml.load(sh.load_file(pth))
|
||||
interped = self._interpolate(self.in_progress[root])
|
||||
del(self.in_progress[root])
|
||||
self.interpolated[root] = interped
|
||||
# Do a final run over the interpolated to pick up any stragglers
|
||||
# that were recursively 'included' (but not filled in)
|
||||
for (troot, contents) in self.interpolated.items():
|
||||
self.interpolated[troot] = self._interpolate(contents)
|
||||
return self.interpolated[root]
|
||||
|
@@ -35,15 +35,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Component(object):
|
||||
def __init__(self,
|
||||
subsystems,
|
||||
runner,
|
||||
instances,
|
||||
options,
|
||||
name,
|
||||
siblings,
|
||||
*args,
|
||||
**kargs):
|
||||
def __init__(self, name, subsystems, instances, options, siblings, distro, passwords, **kwargs):
|
||||
|
||||
# Subsystems this was requested with
|
||||
self.subsystems = subsystems
|
||||
@@ -60,21 +52,24 @@ class Component(object):
|
||||
# All the other class names that can be used alongside this class
|
||||
self.siblings = siblings
|
||||
|
||||
# The runner has a reference to us, so use a weakref here to
|
||||
# avoid breaking garbage collection.
|
||||
self.runner = weakref.proxy(runner)
|
||||
|
||||
# Parts of the global runner context that we use
|
||||
self.cfg = runner.cfg
|
||||
|
||||
# The distribution 'interaction object'
|
||||
self.distro = runner.distro
|
||||
self.distro = distro
|
||||
|
||||
# Turned on and off as phases get activated
|
||||
self.activated = False
|
||||
|
||||
def get_option(self, opt_name, def_val=None):
|
||||
return self.options.get(opt_name, def_val)
|
||||
# How we get any passwords we need
|
||||
self.passwords = passwords
|
||||
|
||||
def get_password(self, option, prompt_text, **kwargs):
|
||||
return self.passwords.get_password(option, prompt_text, **kwargs)
|
||||
|
||||
def get_option(self, option, default_value=None):
|
||||
option_value = utils.get_from_path(self.options, option)
|
||||
if option_value is None:
|
||||
return default_value
|
||||
else:
|
||||
return option_value
|
||||
|
||||
@property
|
||||
def env_exports(self):
|
||||
|
@@ -76,7 +76,7 @@ class PkgInstallComponent(component.Component):
|
||||
default_packager_class=self.distro.package_manager_class)
|
||||
|
||||
def _get_download_config(self):
|
||||
return (None, None)
|
||||
return None
|
||||
|
||||
def _clear_package_duplicates(self, pkg_list):
|
||||
dup_free_list = []
|
||||
@@ -88,13 +88,13 @@ class PkgInstallComponent(component.Component):
|
||||
return dup_free_list
|
||||
|
||||
def _get_download_location(self):
|
||||
(section, key) = self._get_download_config()
|
||||
if not section or not key:
|
||||
return (None, None)
|
||||
uri = self.cfg.getdefaulted(section, key).strip()
|
||||
key = self._get_download_config()
|
||||
if not key:
|
||||
return None
|
||||
uri = self.get_option(key, '').strip()
|
||||
if not uri:
|
||||
raise ValueError(("Could not find uri in config to download "
|
||||
"from at section %s for option %s") % (section, key))
|
||||
"from at section %s for option %s") % (section, key))
|
||||
return (uri, self.get_option('app_dir'))
|
||||
|
||||
def download(self):
|
||||
@@ -166,7 +166,8 @@ class PkgInstallComponent(component.Component):
|
||||
|
||||
@property
|
||||
def link_dir(self):
|
||||
return sh.joinpths(self.distro.get_command_config('base_link_dir'), self.name)
|
||||
link_dir_base = self.distro.get_command_config('base_link_dir')
|
||||
return sh.joinpths(link_dir_base, self.name)
|
||||
|
||||
@property
|
||||
def symlinks(self):
|
||||
@@ -236,7 +237,7 @@ class PythonInstallComponent(PkgInstallComponent):
|
||||
]
|
||||
|
||||
def _get_download_config(self):
|
||||
return ('download_from', self.name.replace("-", "_").lower().strip())
|
||||
return 'get_from'
|
||||
|
||||
@property
|
||||
def python_directories(self):
|
||||
@@ -488,7 +489,7 @@ class PythonRuntime(ProgramRuntime):
|
||||
# Anything to start?
|
||||
am_started = 0
|
||||
# Select how we are going to start it
|
||||
run_type = self.cfg.getdefaulted("DEFAULT", "run_type", 'anvil.runners.fork:ForkRunner')
|
||||
run_type = self.get_option("run_type", 'anvil.runners.fork:ForkRunner')
|
||||
starter_cls = importer.import_entry_point(run_type)
|
||||
starter = starter_cls(self)
|
||||
for i, app_info in enumerate(self.apps_to_start):
|
||||
|
@@ -40,10 +40,6 @@ SQL_RESET_PW_LINKS = [
|
||||
# Copies from helper
|
||||
BASE_ERROR = dbhelper.BASE_ERROR
|
||||
|
||||
# PW keys we warm up so u won't be prompted later
|
||||
PASSWORD_PROMPT = dbhelper.PASSWORD_PROMPT
|
||||
WARMUP_PWS = [('sql', PASSWORD_PROMPT)]
|
||||
|
||||
|
||||
class DBUninstaller(comp.PkgUninstallComponent):
|
||||
|
||||
@@ -52,11 +48,10 @@ class DBUninstaller(comp.PkgUninstallComponent):
|
||||
self.runtime = self.siblings.get('running')
|
||||
|
||||
def warm_configs(self):
|
||||
for key, prompt in WARMUP_PWS:
|
||||
self.cfg.get_password(key, prompt)
|
||||
dbhelper.get_shared_passwords(self)
|
||||
|
||||
def pre_uninstall(self):
|
||||
dbtype = self.cfg.get("db", "type")
|
||||
dbtype = self.get_option("type")
|
||||
dbactions = self.distro.get_command_config(dbtype, quiet=True)
|
||||
try:
|
||||
if dbactions:
|
||||
@@ -67,10 +62,10 @@ class DBUninstaller(comp.PkgUninstallComponent):
|
||||
LOG.info("Ensuring your database is started before we operate on it.")
|
||||
self.runtime.restart()
|
||||
params = {
|
||||
'OLD_PASSWORD': self.cfg.get_password('sql', PASSWORD_PROMPT),
|
||||
'OLD_PASSWORD': dbhelper.get_shared_passwords(self)['pw'],
|
||||
'NEW_PASSWORD': RESET_BASE_PW,
|
||||
'USER': self.cfg.getdefaulted("db", "sql_user", 'root'),
|
||||
}
|
||||
'USER': self.get_option("user", 'root'),
|
||||
}
|
||||
cmds = [{'cmd': pwd_cmd}]
|
||||
utils.execute_template(*cmds, params=params)
|
||||
except IOError:
|
||||
@@ -91,19 +86,17 @@ class DBInstaller(comp.PkgInstallComponent):
|
||||
# This dictionary will be used for parameter replacement
|
||||
# In pre-install and post-install sections
|
||||
mp = comp.PkgInstallComponent.config_params(self, config_fn)
|
||||
adds = {
|
||||
'PASSWORD': self.cfg.get_password("sql", PASSWORD_PROMPT),
|
||||
'BOOT_START': ("%s" % (True)).lower(),
|
||||
'USER': self.cfg.getdefaulted("db", "sql_user", 'root'),
|
||||
'SERVICE_HOST': self.cfg.get('host', 'ip'),
|
||||
'HOST_IP': self.cfg.get('host', 'ip'),
|
||||
}
|
||||
mp.update(adds)
|
||||
mp.update({
|
||||
'PASSWORD': dbhelper.get_shared_passwords(self)['pw'],
|
||||
'BOOT_START': "true",
|
||||
'USER': self.get_option("user", 'root'),
|
||||
'SERVICE_HOST': self.get_option('ip'),
|
||||
'HOST_IP': self.get_option('ip'),
|
||||
})
|
||||
return mp
|
||||
|
||||
def warm_configs(self):
|
||||
for key, prompt in WARMUP_PWS:
|
||||
self.cfg.get_password(key, prompt)
|
||||
dbhelper.get_shared_passwords(self)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _configure_db_confs(self):
|
||||
@@ -116,7 +109,7 @@ class DBInstaller(comp.PkgInstallComponent):
|
||||
self._configure_db_confs()
|
||||
|
||||
# Extra actions to ensure we are granted access
|
||||
dbtype = self.cfg.get("db", "type")
|
||||
dbtype = self.get_option("type")
|
||||
dbactions = self.distro.get_command_config(dbtype, quiet=True)
|
||||
|
||||
# Set your password
|
||||
@@ -129,10 +122,10 @@ class DBInstaller(comp.PkgInstallComponent):
|
||||
LOG.info("Ensuring your database is started before we operate on it.")
|
||||
self.runtime.restart()
|
||||
params = {
|
||||
'NEW_PASSWORD': self.cfg.get_password("sql", PASSWORD_PROMPT),
|
||||
'USER': self.cfg.getdefaulted("db", "sql_user", 'root'),
|
||||
'NEW_PASSWORD': dbhelper.get_shared_passwords(self)['pw'],
|
||||
'USER': self.get_option("user", 'root'),
|
||||
'OLD_PASSWORD': RESET_BASE_PW,
|
||||
}
|
||||
}
|
||||
cmds = [{'cmd': pwd_cmd}]
|
||||
utils.execute_template(*cmds, params=params)
|
||||
except IOError:
|
||||
@@ -140,18 +133,20 @@ class DBInstaller(comp.PkgInstallComponent):
|
||||
"set by a previous process."))
|
||||
|
||||
# Ensure access granted
|
||||
user = self.cfg.getdefaulted("db", "sql_user", 'root')
|
||||
dbhelper.grant_permissions(self.cfg, self.distro, user,
|
||||
restart_func=self.runtime.restart)
|
||||
dbhelper.grant_permissions(dbtype,
|
||||
distro=self.distro,
|
||||
user=self.get_option("user", 'root'),
|
||||
restart_func=self.runtime.restart,
|
||||
**dbhelper.get_shared_passwords(self))
|
||||
|
||||
|
||||
class DBRuntime(comp.ProgramRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.ProgramRuntime.__init__(self, *args, **kargs)
|
||||
self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)
|
||||
self.wait_time = max(int(self.get_option('service_wait_seconds')), 1)
|
||||
|
||||
def _get_run_actions(self, act, exception_cls):
|
||||
db_type = self.cfg.get("db", "type")
|
||||
db_type = self.get_option("type")
|
||||
distro_options = self.distro.get_command_config(db_type)
|
||||
if distro_options is None:
|
||||
raise NotImplementedError(BASE_ERROR % (act, db_type))
|
||||
@@ -194,7 +189,7 @@ class DBRuntime(comp.ProgramRuntime):
|
||||
combined.find('unrecognized') != -1:
|
||||
st = comp.STATUS_STOPPED
|
||||
return [
|
||||
comp.ProgramStatus(name=self.cfg.get("db", "type"),
|
||||
comp.ProgramStatus(name=self.get_option("type"),
|
||||
status=st,
|
||||
details=(sysout + stderr).strip()),
|
||||
]
|
||||
|
@@ -94,8 +94,16 @@ class GlanceInstaller(GlanceMixin, comp.PythonInstallComponent):
|
||||
self._setup_db()
|
||||
|
||||
def _setup_db(self):
|
||||
dbhelper.drop_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.create_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.drop_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
dbhelper.create_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
|
||||
def source_config(self, config_fn):
|
||||
real_fn = config_fn
|
||||
@@ -105,22 +113,28 @@ class GlanceInstaller(GlanceMixin, comp.PythonInstallComponent):
|
||||
return (fn, sh.load_file(fn))
|
||||
|
||||
def _config_adjust_registry(self, contents, fn):
|
||||
params = ghelper.get_shared_params(self.cfg)
|
||||
params = ghelper.get_shared_params(**self.options)
|
||||
with io.BytesIO(contents) as stream:
|
||||
config = cfg.RewritableConfigParser()
|
||||
config.readfp(stream)
|
||||
config.set('DEFAULT', 'debug', True)
|
||||
config.set('DEFAULT', 'verbose', True)
|
||||
config.set('DEFAULT', 'bind_port', params['endpoints']['registry']['port'])
|
||||
config.set('DEFAULT', 'sql_connection',
|
||||
dbhelper.fetch_dbdsn(self.cfg, DB_NAME, utf8=True))
|
||||
config.set('DEFAULT', 'sql_connection', dbhelper.fetch_dbdsn(dbname=DB_NAME,
|
||||
utf8=True,
|
||||
dbtype=self.get_option('db.type'),
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self))))
|
||||
config.remove_option('DEFAULT', 'log_file')
|
||||
config.set('paste_deploy', 'flavor', 'keystone')
|
||||
config.set('paste_deploy', 'flavor', self.get_option('paste_flavor'))
|
||||
return config.stringify(fn)
|
||||
return contents
|
||||
|
||||
def _config_adjust_paste(self, contents, fn):
|
||||
params = khelper.get_shared_params(self.cfg, 'glance')
|
||||
params = khelper.get_shared_params(ip=self.get_option('ip'),
|
||||
service_user='glance',
|
||||
**utils.merge_dicts(self.get_option('keystone'),
|
||||
khelper.get_shared_passwords(self)))
|
||||
with io.BytesIO(contents) as stream:
|
||||
config = cfg.RewritableConfigParser()
|
||||
config.readfp(stream)
|
||||
@@ -139,7 +153,7 @@ class GlanceInstaller(GlanceMixin, comp.PythonInstallComponent):
|
||||
return contents
|
||||
|
||||
def _config_adjust_api(self, contents, fn):
|
||||
params = ghelper.get_shared_params(self.cfg)
|
||||
params = ghelper.get_shared_params(**self.options)
|
||||
with io.BytesIO(contents) as stream:
|
||||
config = cfg.RewritableConfigParser()
|
||||
config.readfp(stream)
|
||||
@@ -149,10 +163,13 @@ class GlanceInstaller(GlanceMixin, comp.PythonInstallComponent):
|
||||
config.set('DEFAULT', 'default_store', 'file')
|
||||
config.set('DEFAULT', 'filesystem_store_datadir', img_store_dir)
|
||||
config.set('DEFAULT', 'bind_port', params['endpoints']['public']['port'])
|
||||
config.set('DEFAULT', 'sql_connection',
|
||||
dbhelper.fetch_dbdsn(self.cfg, DB_NAME, utf8=True))
|
||||
config.set('DEFAULT', 'sql_connection', dbhelper.fetch_dbdsn(dbname=DB_NAME,
|
||||
utf8=True,
|
||||
dbtype=self.get_option('db.type'),
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self))))
|
||||
config.remove_option('DEFAULT', 'log_file')
|
||||
config.set('paste_deploy', 'flavor', 'keystone')
|
||||
config.set('paste_deploy', 'flavor', self.get_option('paste_flavor'))
|
||||
LOG.debug("Ensuring file system store directory %r exists and is empty." % (img_store_dir))
|
||||
sh.deldir(img_store_dir)
|
||||
self.tracewriter.dirs_made(*sh.mkdirslist(img_store_dir))
|
||||
@@ -192,7 +209,7 @@ class GlanceRuntime(GlanceMixin, comp.PythonRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.bin_dir = sh.joinpths(self.get_option('app_dir'), 'bin')
|
||||
self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)
|
||||
self.wait_time = max(int(self.get_option('service_wait_seconds')), 1)
|
||||
|
||||
@property
|
||||
def apps_to_start(self):
|
||||
@@ -212,7 +229,7 @@ class GlanceRuntime(GlanceMixin, comp.PythonRuntime):
|
||||
return APP_OPTIONS.get(app)
|
||||
|
||||
def _get_image_urls(self):
|
||||
uris = self.cfg.getdefaulted('glance', 'image_urls', '').split(",")
|
||||
uris = self.get_option('image_urls', [])
|
||||
return [u.strip() for u in uris if len(u.strip())]
|
||||
|
||||
def post_start(self):
|
||||
@@ -222,6 +239,9 @@ class GlanceRuntime(GlanceMixin, comp.PythonRuntime):
|
||||
LOG.info("Waiting %s seconds so that glance can start up before image install." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
params = {}
|
||||
params['glance'] = ghelper.get_shared_params(self.cfg)
|
||||
params['keystone'] = khelper.get_shared_params(self.cfg, 'glance')
|
||||
params['glance'] = ghelper.get_shared_params(**self.options)
|
||||
params['keystone'] = khelper.get_shared_params(ip=self.get_option('ip'),
|
||||
service_user='glance',
|
||||
**utils.merge_dicts(self.get_option('keystone'),
|
||||
khelper.get_shared_passwords(self)))
|
||||
ghelper.UploadService(params).install(self._get_image_urls())
|
||||
|
@@ -29,14 +29,19 @@ BASE_ERROR = 'Currently we do not know how to %r for database type %r'
|
||||
PASSWORD_PROMPT = 'the database user'
|
||||
|
||||
|
||||
def drop_db(cfg, distro, dbname):
|
||||
dbtype = cfg.get("db", "type")
|
||||
def get_shared_passwords(component):
|
||||
mp = {}
|
||||
mp['pw'] = component.get_password('sql', PASSWORD_PROMPT)
|
||||
return mp
|
||||
|
||||
|
||||
def drop_db(distro, dbtype, user, pw, dbname, **kwargs):
|
||||
dropcmd = distro.get_command(dbtype, 'drop_db', silent=True)
|
||||
if dropcmd:
|
||||
LOG.info('Dropping %s database: %s', colorizer.quote(dbtype), colorizer.quote(dbname))
|
||||
params = dict()
|
||||
params['PASSWORD'] = cfg.get_password("sql", PASSWORD_PROMPT)
|
||||
params['USER'] = cfg.getdefaulted("db", "sql_user", 'root')
|
||||
params['PASSWORD'] = pw
|
||||
params['USER'] = user
|
||||
params['DB'] = dbname
|
||||
cmds = list()
|
||||
cmds.append({
|
||||
@@ -49,14 +54,14 @@ def drop_db(cfg, distro, dbname):
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
def create_db(cfg, distro, dbname, charset='utf8'):
|
||||
dbtype = cfg.get("db", "type")
|
||||
def create_db(distro, dbtype, user, pw, dbname, **kwargs):
|
||||
createcmd = distro.get_command(dbtype, 'create_db', silent=True)
|
||||
if createcmd:
|
||||
charset = kwargs.get('charset', 'utf8')
|
||||
LOG.info('Creating %s database: %s (%s)', colorizer.quote(dbtype), colorizer.quote(dbname), charset)
|
||||
params = dict()
|
||||
params['PASSWORD'] = cfg.get_password("sql", PASSWORD_PROMPT)
|
||||
params['USER'] = cfg.getdefaulted("db", "sql_user", 'root')
|
||||
params['PASSWORD'] = pw
|
||||
params['USER'] = user
|
||||
params['DB'] = dbname
|
||||
params['CHARACTER_SET'] = charset
|
||||
cmds = list()
|
||||
@@ -70,11 +75,10 @@ def create_db(cfg, distro, dbname, charset='utf8'):
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
def grant_permissions(cfg, distro, user, restart_func=None):
|
||||
def grant_permissions(dbtype, distro, user, pw, restart_func=None):
|
||||
"""
|
||||
Grant permissions on the database.
|
||||
"""
|
||||
dbtype = cfg.get("db", "type")
|
||||
dbactions = distro.get_command_config(dbtype, quiet=True)
|
||||
if dbactions:
|
||||
grant_cmd = distro.get_command(dbtype, 'grant_all')
|
||||
@@ -83,7 +87,7 @@ def grant_permissions(cfg, distro, user, restart_func=None):
|
||||
LOG.info("Ensuring the database is started.")
|
||||
restart_func()
|
||||
params = {
|
||||
'PASSWORD': cfg.get_password("sql", PASSWORD_PROMPT),
|
||||
'PASSWORD': pw,
|
||||
'USER': user,
|
||||
}
|
||||
cmds = [{'cmd': grant_cmd}]
|
||||
@@ -92,19 +96,15 @@ def grant_permissions(cfg, distro, user, restart_func=None):
|
||||
return
|
||||
|
||||
|
||||
def fetch_dbdsn(cfg, dbname, utf8=False):
|
||||
def fetch_dbdsn(dbtype, user, host, port, pw, dbname, **kwargs):
|
||||
"""Return the database connection string, including password."""
|
||||
user = cfg.get("db", "sql_user")
|
||||
host = cfg.get("db", "sql_host")
|
||||
port = cfg.get("db", "port")
|
||||
pw = cfg.get_password("sql", PASSWORD_PROMPT)
|
||||
# Form the dsn (from components we have...)
|
||||
# dsn = "<driver>://<username>:<password>@<host>:<port>/<database>"
|
||||
# See: http://en.wikipedia.org/wiki/Data_Source_Name
|
||||
if not host:
|
||||
msg = "Unable to fetch a database dsn - no sql host found"
|
||||
raise excp.BadParamException(msg)
|
||||
driver = cfg.get("db", "type")
|
||||
driver = dbtype
|
||||
if not driver:
|
||||
msg = "Unable to fetch a database dsn - no db driver type found"
|
||||
raise excp.BadParamException(msg)
|
||||
@@ -120,7 +120,7 @@ def fetch_dbdsn(cfg, dbname, utf8=False):
|
||||
dsn += ":" + str(port)
|
||||
if dbname:
|
||||
dsn += "/" + str(dbname)
|
||||
if utf8:
|
||||
if kwargs.get('utf8'):
|
||||
dsn += "?charset=utf8"
|
||||
else:
|
||||
dsn += "/"
|
||||
|
@@ -391,18 +391,14 @@ class UploadService:
|
||||
return am_installed
|
||||
|
||||
|
||||
def get_shared_params(cfg):
|
||||
mp = dict()
|
||||
def get_shared_params(ip, api_port=9292, protocol='http', reg_port=9191, **kwargs):
|
||||
mp = {}
|
||||
mp['service_host'] = ip
|
||||
|
||||
host_ip = cfg.get('host', 'ip')
|
||||
mp['service_host'] = host_ip
|
||||
|
||||
glance_host = cfg.getdefaulted('glance', 'glance_host', host_ip)
|
||||
glance_port = cfg.getdefaulted('glance', 'glance_port', '9292')
|
||||
glance_protocol = cfg.getdefaulted('glance', 'glance_protocol', 'http')
|
||||
|
||||
# Registry should be on the same host
|
||||
glance_registry_port = cfg.getdefaulted('glance', 'glance_registry_port', '9191')
|
||||
glance_host = ip
|
||||
glance_port = api_port
|
||||
glance_protocol = protocol
|
||||
glance_registry_port = reg_port
|
||||
|
||||
# Uri's of the http/https endpoints
|
||||
mp['endpoints'] = {
|
||||
|
@@ -24,12 +24,10 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class Initializer(object):
|
||||
|
||||
def __init__(self, cfg):
|
||||
def __init__(self, service_token, admin_uri):
|
||||
# Late load since its using a client lib that is only avail after install...
|
||||
self.cfg = cfg
|
||||
client_cls = importer.import_entry_point("keystoneclient.v2_0.client:Client")
|
||||
self.client = client_cls(token=self.cfg['service_token'],
|
||||
endpoint=self.cfg['endpoints']['admin']['uri'])
|
||||
self.client = client_cls(token=service_token, endpoint=admin_uri)
|
||||
|
||||
def _create_tenants(self, tenants):
|
||||
tenants_made = dict()
|
||||
@@ -134,9 +132,30 @@ class Initializer(object):
|
||||
self._create_endpoints(endpoints, services_made)
|
||||
|
||||
|
||||
def get_shared_params(cfg, service_user=None):
|
||||
def get_shared_passwords(component):
|
||||
mp = {}
|
||||
mp['service_token'] = component.get_password(
|
||||
"service_token",
|
||||
'the service admin token',
|
||||
)
|
||||
mp['admin_password'] = component.get_password(
|
||||
'horizon_keystone_admin',
|
||||
'the horizon and keystone admin',
|
||||
length=20,
|
||||
)
|
||||
mp['demo_password'] = mp['admin_password']
|
||||
mp['service_password'] = component.get_password(
|
||||
'service_password',
|
||||
'service authentication',
|
||||
)
|
||||
return mp
|
||||
|
||||
mp = dict()
|
||||
|
||||
def get_shared_params(ip, service_token, admin_password, demo_password, service_password,
|
||||
auth_host, auth_port, auth_proto, service_host, service_port, service_proto,
|
||||
**kwargs):
|
||||
|
||||
mp = {}
|
||||
|
||||
# Tenants and users
|
||||
mp['tenants'] = ['admin', 'service', 'demo']
|
||||
@@ -149,42 +168,32 @@ def get_shared_params(cfg, service_user=None):
|
||||
mp['admin_user'] = 'admin'
|
||||
|
||||
mp['service_tenant'] = 'service'
|
||||
if service_user:
|
||||
mp['users'].append(service_user)
|
||||
mp['service_user'] = service_user
|
||||
if 'service_user' in kwargs:
|
||||
mp['users'].append(kwargs['service_user'])
|
||||
mp['service_user'] = kwargs['service_user']
|
||||
|
||||
# Tokens and passwords
|
||||
mp['service_token'] = cfg.get_password(
|
||||
"service_token",
|
||||
'the service admin token',
|
||||
)
|
||||
mp['admin_password'] = cfg.get_password(
|
||||
'horizon_keystone_admin',
|
||||
'the horizon and keystone admin',
|
||||
length=20,
|
||||
)
|
||||
mp['demo_password'] = mp['admin_password']
|
||||
mp['service_password'] = cfg.get_password(
|
||||
'service_password',
|
||||
'service authentication',
|
||||
)
|
||||
mp['service_token'] = service_token
|
||||
mp['admin_password'] = admin_password
|
||||
mp['demo_password'] = demo_password
|
||||
mp['service_password'] = service_password
|
||||
|
||||
host_ip = cfg.get('host', 'ip')
|
||||
host_ip = ip
|
||||
mp['service_host'] = host_ip
|
||||
|
||||
# Components of the admin endpoint
|
||||
keystone_auth_host = cfg.getdefaulted('keystone', 'keystone_auth_host', host_ip)
|
||||
keystone_auth_port = cfg.getdefaulted('keystone', 'keystone_auth_port', '35357')
|
||||
keystone_auth_proto = cfg.getdefaulted('keystone', 'keystone_auth_protocol', 'http')
|
||||
keystone_auth_host = auth_host
|
||||
keystone_auth_port = auth_port
|
||||
keystone_auth_proto = auth_proto
|
||||
keystone_auth_uri = utils.make_url(keystone_auth_proto,
|
||||
keystone_auth_host, keystone_auth_port, path="v2.0")
|
||||
keystone_auth_host, keystone_auth_port, path="v2.0")
|
||||
|
||||
# Components of the public+internal endpoint
|
||||
keystone_service_host = cfg.getdefaulted('keystone', 'keystone_service_host', host_ip)
|
||||
keystone_service_port = cfg.getdefaulted('keystone', 'keystone_service_port', '5000')
|
||||
keystone_service_proto = cfg.getdefaulted('keystone', 'keystone_service_protocol', 'http')
|
||||
keystone_service_host = service_host
|
||||
keystone_service_port = service_port
|
||||
keystone_service_proto = service_proto
|
||||
keystone_service_uri = utils.make_url(keystone_service_proto,
|
||||
keystone_service_host, keystone_service_port, path="v2.0")
|
||||
keystone_service_host, keystone_service_port, path="v2.0")
|
||||
|
||||
mp['endpoints'] = {
|
||||
'admin': {
|
||||
|
@@ -64,45 +64,46 @@ def canon_virt_driver(virt_driver):
|
||||
return virt_driver
|
||||
|
||||
|
||||
def get_shared_params(cfgobj):
|
||||
mp = dict()
|
||||
|
||||
host_ip = cfgobj.get('host', 'ip')
|
||||
mp['service_host'] = host_ip
|
||||
nova_host = cfgobj.getdefaulted('nova', 'nova_host', host_ip)
|
||||
nova_protocol = cfgobj.getdefaulted('nova', 'nova_protocol', 'http')
|
||||
def get_shared_params(ip, protocol,
|
||||
api_host, api_port,
|
||||
s3_host, s3_port,
|
||||
volume_host, volume_port,
|
||||
ec2_host, ec2_port,
|
||||
ec2_admin_host, ec2_admin_port):
|
||||
mp = {}
|
||||
mp['service_host'] = ip
|
||||
|
||||
# Uri's of the various nova endpoints
|
||||
mp['endpoints'] = {
|
||||
'ec2_admin': {
|
||||
'uri': utils.make_url(nova_protocol, nova_host, 8773, "services/Admin"),
|
||||
'port': 8773,
|
||||
'host': host_ip,
|
||||
'protocol': nova_protocol,
|
||||
'uri': utils.make_url(protocol, ec2_admin_host, ec2_admin_port, "services/Admin"),
|
||||
'port': ec2_admin_port,
|
||||
'host': ec2_admin_host,
|
||||
'protocol': protocol,
|
||||
},
|
||||
'ec2_cloud': {
|
||||
'uri': utils.make_url(nova_protocol, nova_host, 8773, "services/Cloud"),
|
||||
'port': 8773,
|
||||
'host': host_ip,
|
||||
'protocol': nova_protocol,
|
||||
'uri': utils.make_url(protocol, ec2_host, ec2_port, "services/Cloud"),
|
||||
'port': ec2_port,
|
||||
'host': ec2_host,
|
||||
'protocol': protocol,
|
||||
},
|
||||
'volume': {
|
||||
'uri': utils.make_url(nova_protocol, host_ip, 8776, "v1"),
|
||||
'port': 8776,
|
||||
'host': host_ip,
|
||||
'protocol': nova_protocol,
|
||||
'uri': utils.make_url(protocol, volume_host, volume_port, "v1"),
|
||||
'port': volume_port,
|
||||
'host': volume_host,
|
||||
'protocol': protocol,
|
||||
},
|
||||
's3': {
|
||||
'uri': utils.make_url('http', host_ip, 3333),
|
||||
'port': 3333,
|
||||
'host': host_ip,
|
||||
'protocol': nova_protocol,
|
||||
'uri': utils.make_url(protocol, s3_host, s3_port),
|
||||
'port': s3_port,
|
||||
'host': s3_host,
|
||||
'protocol': protocol,
|
||||
},
|
||||
'api': {
|
||||
'uri': utils.make_url('http', host_ip, 8774, "v2"),
|
||||
'port': 8774,
|
||||
'host': host_ip,
|
||||
'protocol': nova_protocol,
|
||||
'uri': utils.make_url(protocol, api_host, api_port, "v2"),
|
||||
'port': api_port,
|
||||
'host': api_host,
|
||||
'protocol': protocol,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -117,16 +118,12 @@ class ConfConfigurator(object):
|
||||
|
||||
def __init__(self, installer):
|
||||
self.installer = weakref.proxy(installer)
|
||||
self.cfg = installer.cfg
|
||||
self.instances = installer.instances
|
||||
self.tracewriter = installer.tracewriter
|
||||
self.distro = installer.distro
|
||||
|
||||
def _getbool(self, name):
|
||||
return self.cfg.getboolean('nova', name)
|
||||
return bool(self.installer.get_option(name))
|
||||
|
||||
def _getstr(self, name, default=''):
|
||||
return self.cfg.getdefaulted('nova', name, default)
|
||||
return str(self.installer.get_option(name, default))
|
||||
|
||||
def verify(self):
|
||||
# Do a little check to make sure actually have that interface/s
|
||||
@@ -157,7 +154,7 @@ class ConfConfigurator(object):
|
||||
nova_conf = Conf(fn)
|
||||
|
||||
# Used more than once so we calculate it ahead of time
|
||||
hostip = self.cfg.get('host', 'ip')
|
||||
hostip = self._getstr('ip')
|
||||
|
||||
if self._getbool('verbose'):
|
||||
nova_conf.add('verbose', True)
|
||||
@@ -181,8 +178,14 @@ class ConfConfigurator(object):
|
||||
# The ip of where we are running
|
||||
nova_conf.add('my_ip', hostip)
|
||||
|
||||
dbdsn = dbhelper.fetch_dbdsn(dbname=DB_NAME,
|
||||
utf8=True,
|
||||
dbtype=self._getstr('db.type'),
|
||||
**utils.merge_dicts(self.installer.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self.installer)))
|
||||
|
||||
# Setup your sql connection
|
||||
nova_conf.add('sql_connection', dbhelper.fetch_dbdsn(self.cfg, DB_NAME))
|
||||
nova_conf.add('sql_connection', dbdsn)
|
||||
|
||||
# Configure anything libvirt related?
|
||||
virt_driver = canon_virt_driver(self._getstr('virt_driver'))
|
||||
@@ -223,7 +226,7 @@ class ConfConfigurator(object):
|
||||
nova_conf.add('s3_host', hostip)
|
||||
|
||||
# How is your message queue setup?
|
||||
mq_type = canon_mq_type(self.installer.get_option('mq'))
|
||||
mq_type = canon_mq_type(self._getstr('mq'))
|
||||
if mq_type == 'rabbit':
|
||||
nova_conf.add('rabbit_host', self.cfg.getdefaulted('rabbit', 'rabbit_host', hostip))
|
||||
nova_conf.add('rabbit_password', self.cfg.get("passwords", "rabbit"))
|
||||
@@ -358,7 +361,7 @@ class ConfConfigurator(object):
|
||||
def _configure_instances_path(self, instances_path, nova_conf):
|
||||
nova_conf.add('instances_path', instances_path)
|
||||
LOG.debug("Attempting to create instance directory: %r", instances_path)
|
||||
self.tracewriter.dirs_made(*sh.mkdirslist(instances_path))
|
||||
self.installer.tracewriter.dirs_made(*sh.mkdirslist(instances_path))
|
||||
LOG.debug("Adjusting permissions of instance directory: %r", instances_path)
|
||||
sh.chmod(instances_path, 0777)
|
||||
|
||||
|
@@ -21,3 +21,9 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
# Partial of rabbit user prompt
|
||||
PW_USER_PROMPT = 'the rabbit user'
|
||||
|
||||
def get_shared_passwords(component):
|
||||
mp = {}
|
||||
mp['pw'] = component.get_password('rabbit', PW_USER_PROMPT)
|
||||
return mp
|
||||
|
||||
|
@@ -19,6 +19,7 @@ from anvil import components as comp
|
||||
from anvil import exceptions as excp
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components.helpers import db as dbhelper
|
||||
|
||||
@@ -132,22 +133,20 @@ class HorizonInstaller(comp.PythonInstallComponent):
|
||||
sh.execute(*DB_SYNC_CMD, cwd=self.get_option('app_dir'))
|
||||
|
||||
def _setup_db(self):
|
||||
dbhelper.drop_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.create_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.drop_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
dbhelper.create_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
|
||||
def pre_install(self):
|
||||
comp.PythonInstallComponent.pre_install(self)
|
||||
self.tracewriter.dirs_made(*sh.mkdirslist(self.log_dir))
|
||||
if self.cfg.getboolean('horizon', 'eliminate_pip_gits'):
|
||||
fn = sh.joinpths(self.get_option('app_dir'), 'tools', 'pip-requires')
|
||||
if sh.isfile(fn):
|
||||
new_lines = []
|
||||
for line in sh.load_file(fn).splitlines():
|
||||
if line.find("git://") != -1:
|
||||
new_lines.append("# %s" % (line))
|
||||
else:
|
||||
new_lines.append(line)
|
||||
sh.write_file(fn, "\n".join(new_lines))
|
||||
|
||||
def _config_fixups(self):
|
||||
pass
|
||||
@@ -162,9 +161,7 @@ class HorizonInstaller(comp.PythonInstallComponent):
|
||||
self._config_fixups()
|
||||
|
||||
def _get_apache_user_group(self):
|
||||
user = self.cfg.getdefaulted('horizon', 'apache_user', sh.getuser())
|
||||
group = self.cfg.getdefaulted('horizon', 'apache_group', sh.getgroupname())
|
||||
return (user, group)
|
||||
return (self.get_option('apache_user'), self.get_option('apache_group'))
|
||||
|
||||
def config_params(self, config_fn):
|
||||
# This dict will be used to fill in the configuration
|
||||
@@ -177,15 +174,15 @@ class HorizonInstaller(comp.PythonInstallComponent):
|
||||
mp['ACCESS_LOG'] = sh.joinpths(self.log_dir, APACHE_ACCESS_LOG_FN)
|
||||
mp['ERROR_LOG'] = sh.joinpths(self.log_dir, APACHE_ERROR_LOG_FN)
|
||||
mp['HORIZON_DIR'] = self.get_option('app_dir')
|
||||
mp['HORIZON_PORT'] = self.cfg.getdefaulted('horizon', 'port', APACHE_DEF_PORT)
|
||||
mp['HORIZON_PORT'] = self.get_option('port', APACHE_DEF_PORT)
|
||||
mp['VPN_DIR'] = sh.joinpths(self.get_option('app_dir'), "vpn")
|
||||
else:
|
||||
mp['OPENSTACK_HOST'] = self.cfg.get('host', 'ip')
|
||||
mp['OPENSTACK_HOST'] = self.get_option('ip')
|
||||
mp['DB_NAME'] = DB_NAME
|
||||
mp['DB_USER'] = self.cfg.getdefaulted('db', 'sql_user', 'root')
|
||||
mp['DB_PASSWORD'] = self.cfg.get_password('sql', dbhelper.PASSWORD_PROMPT)
|
||||
mp['DB_HOST'] = self.cfg.get("db", "sql_host")
|
||||
mp['DB_PORT'] = self.cfg.get("db", "port")
|
||||
mp['DB_USER'] = self.get_option('db.user')
|
||||
mp['DB_PASSWORD'] = dbhelper.get_shared_passwords(self)
|
||||
mp['DB_HOST'] = self.get_option("db.host")
|
||||
mp['DB_PORT'] = self.get_option("db.port")
|
||||
return mp
|
||||
|
||||
|
||||
|
@@ -101,8 +101,9 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
|
||||
@property
|
||||
def env_exports(self):
|
||||
params = khelper.get_shared_params(self.cfg)
|
||||
to_set = dict()
|
||||
params = khelper.get_shared_params(**utils.merge_dicts(self.options,
|
||||
khelper.get_shared_passwords(self)))
|
||||
to_set = {}
|
||||
to_set['OS_PASSWORD'] = params['admin_password']
|
||||
to_set['OS_TENANT_NAME'] = params['demo_tenant']
|
||||
to_set['OS_USERNAME'] = params['demo_user']
|
||||
@@ -115,8 +116,17 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
return list(CONFIGS)
|
||||
|
||||
def _setup_db(self):
|
||||
dbhelper.drop_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.create_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.drop_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
dbhelper.create_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
|
||||
|
||||
def source_config(self, config_fn):
|
||||
real_fn = config_fn
|
||||
@@ -144,7 +154,8 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
return comp.PythonInstallComponent._config_param_replace(self, config_fn, contents, parameters)
|
||||
|
||||
def _config_adjust_root(self, contents, fn):
|
||||
params = khelper.get_shared_params(self.cfg)
|
||||
params = khelper.get_shared_params(**utils.merge_dicts(self.options,
|
||||
khelper.get_shared_passwords(self)))
|
||||
with io.BytesIO(contents) as stream:
|
||||
config = cfg.RewritableConfigParser()
|
||||
config.readfp(stream)
|
||||
@@ -155,7 +166,11 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
config.set('DEFAULT', 'debug', True)
|
||||
config.set('catalog', 'driver', 'keystone.catalog.backends.sql.Catalog')
|
||||
config.remove_option('DEFAULT', 'log_config')
|
||||
config.set('sql', 'connection', dbhelper.fetch_dbdsn(self.cfg, DB_NAME, utf8=True))
|
||||
config.set('sql', 'connection', dbhelper.fetch_dbdsn(dbname=DB_NAME,
|
||||
utf8=True,
|
||||
dbtype=self.get_option('db.type'),
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self))))
|
||||
config.set('ec2', 'driver', "keystone.contrib.ec2.backends.sql.Ec2")
|
||||
contents = config.stringify(fn)
|
||||
return contents
|
||||
@@ -169,7 +184,7 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
return contents
|
||||
|
||||
def warm_configs(self):
|
||||
khelper.get_shared_params(self.cfg)
|
||||
khelper.get_shared_passwords(self)
|
||||
|
||||
def config_params(self, config_fn):
|
||||
# These be used to fill in the configuration params
|
||||
@@ -183,7 +198,7 @@ class KeystoneRuntime(comp.PythonRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.bin_dir = sh.joinpths(self.get_option('app_dir'), 'bin')
|
||||
self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)
|
||||
self.wait_time = max(int(self.get_option('service_wait_seconds')), 1)
|
||||
self.init_fn = sh.joinpths(self.get_option('trace_dir'), INIT_WHAT_HAPPENED)
|
||||
|
||||
def post_start(self):
|
||||
@@ -193,13 +208,15 @@ class KeystoneRuntime(comp.PythonRuntime):
|
||||
LOG.info("Running commands to initialize keystone.")
|
||||
(fn, contents) = utils.load_template(self.name, INIT_WHAT_FN)
|
||||
LOG.debug("Initializing with contents of %s", fn)
|
||||
cfg = {
|
||||
'glance': ghelper.get_shared_params(self.cfg),
|
||||
'keystone': khelper.get_shared_params(self.cfg),
|
||||
'nova': nhelper.get_shared_params(self.cfg),
|
||||
}
|
||||
cfg = {}
|
||||
cfg['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self)))
|
||||
cfg['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'),
|
||||
**self.get_option('glance'))
|
||||
cfg['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'),
|
||||
**self.get_option('nova'))
|
||||
init_what = utils.param_replace_deep(copy.deepcopy(yaml.load(contents)), cfg)
|
||||
khelper.Initializer(cfg['keystone']).initialize(**init_what)
|
||||
khelper.Initializer(cfg['keystone']['service_token'],
|
||||
cfg['keystone']['endpoints']['admin']['uri']).initialize(**init_what)
|
||||
# Writing this makes sure that we don't init again
|
||||
sh.write_file(self.init_fn, utils.prettify_yaml(init_what))
|
||||
LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
|
||||
|
@@ -137,7 +137,8 @@ class NovaMixin(object):
|
||||
class NovaUninstaller(NovaMixin, comp.PythonUninstallComponent):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
|
||||
self.virsh = lv.Virsh(self.cfg, self.distro)
|
||||
self.virsh = lv.Virsh(int(self.get_option('service_wait_seconds')),
|
||||
self.distro)
|
||||
|
||||
def pre_uninstall(self):
|
||||
self._clear_libvirt_domains()
|
||||
@@ -156,15 +157,16 @@ class NovaUninstaller(NovaMixin, comp.PythonUninstallComponent):
|
||||
LOG.info("Cleaning up your system by running nova cleaner script: %s", colorizer.quote(cleaner_fn))
|
||||
# These environment additions are important
|
||||
# in that they eventually affect how this script runs
|
||||
env = dict()
|
||||
env['ENABLED_SERVICES'] = ",".join(self._filter_subsystems())
|
||||
env = {
|
||||
'ENABLED_SERVICES': ",".join(self._filter_subsystems()),
|
||||
}
|
||||
sh.execute(cleaner_fn, run_as_root=True, env_overrides=env)
|
||||
|
||||
def _clear_libvirt_domains(self):
|
||||
virt_driver = nhelper.canon_virt_driver(self.cfg.get('nova', 'virt_driver'))
|
||||
virt_driver = nhelper.canon_virt_driver(self.get_option('virt_driver'))
|
||||
if virt_driver == 'libvirt':
|
||||
inst_prefix = self.cfg.getdefaulted('nova', 'instance_name_prefix', 'instance-')
|
||||
libvirt_type = lv.canon_libvirt_type(self.cfg.get('nova', 'libvirt_type'))
|
||||
inst_prefix = self.get_option('instance_name_prefix', 'instance-')
|
||||
libvirt_type = lv.canon_libvirt_type(self.get_option('libvirt_type'))
|
||||
self.virsh.clear_domains(libvirt_type, inst_prefix)
|
||||
|
||||
|
||||
@@ -182,9 +184,9 @@ class NovaInstaller(NovaMixin, comp.PythonInstallComponent):
|
||||
|
||||
@property
|
||||
def env_exports(self):
|
||||
to_set = dict()
|
||||
to_set['NOVA_VERSION'] = self.cfg.get('nova', 'nova_version')
|
||||
to_set['COMPUTE_API_VERSION'] = to_set['NOVA_VERSION']
|
||||
to_set = {}
|
||||
to_set['NOVA_VERSION'] = self.get_option('nova_version')
|
||||
to_set['COMPUTE_API_VERSION'] = self.get_option('nova_version')
|
||||
return to_set
|
||||
|
||||
def verify(self):
|
||||
@@ -195,10 +197,10 @@ class NovaInstaller(NovaMixin, comp.PythonInstallComponent):
|
||||
warm_pws = list()
|
||||
mq_type = nhelper.canon_mq_type(self.get_option('mq'))
|
||||
if mq_type == 'rabbit':
|
||||
warm_pws.append(['rabbit', rhelper.PW_USER_PROMPT])
|
||||
driver_canon = nhelper.canon_virt_driver(self.cfg.get('nova', 'virt_driver'))
|
||||
for pw_key, pw_prompt in warm_pws:
|
||||
self.cfg.get_password(pw_key, pw_prompt)
|
||||
rhelper.get_shared_passwords(self)
|
||||
driver_canon = nhelper.canon_virt_driver(self.get_option('virt_driver'))
|
||||
for (pw_key, pw_prompt) in warm_pws:
|
||||
self.get_password(pw_key, pw_prompt)
|
||||
|
||||
def _sync_db(self):
|
||||
LOG.info("Syncing nova to database named: %s", colorizer.quote(DB_NAME))
|
||||
@@ -222,11 +224,20 @@ class NovaInstaller(NovaMixin, comp.PythonInstallComponent):
|
||||
self.tracewriter.file_touched(tgt_fn)
|
||||
|
||||
def _setup_db(self):
|
||||
dbhelper.drop_db(self.cfg, self.distro, DB_NAME)
|
||||
dbhelper.drop_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
# Explicitly use latin1: to avoid lp#829209, nova expects the database to
|
||||
# use latin1 by default, and then upgrades the database to utf8 (see the
|
||||
# 082_essex.py in nova)
|
||||
dbhelper.create_db(self.cfg, self.distro, DB_NAME, charset='latin1')
|
||||
dbhelper.create_db(distro=self.distro,
|
||||
dbtype=self.get_option('db.type'),
|
||||
dbname=DB_NAME,
|
||||
charset='latin1',
|
||||
**utils.merge_dicts(self.get_option('db'),
|
||||
dbhelper.get_shared_passwords(self)))
|
||||
|
||||
def _generate_nova_conf(self, fn):
|
||||
LOG.debug("Generating dynamic content for nova: %s.", (fn))
|
||||
@@ -243,7 +254,11 @@ class NovaInstaller(NovaMixin, comp.PythonInstallComponent):
|
||||
return (fn, sh.load_file(fn))
|
||||
|
||||
def _config_adjust_paste(self, contents, fn):
|
||||
params = khelper.get_shared_params(self.cfg, 'nova')
|
||||
params = khelper.get_shared_params(ip=self.get_option('ip'),
|
||||
service_user='nova',
|
||||
**utils.merge_dicts(self.get_option('keystone'),
|
||||
khelper.get_shared_passwords(self)))
|
||||
|
||||
with io.BytesIO(contents) as stream:
|
||||
config = cfg.RewritableConfigParser()
|
||||
config.readfp(stream)
|
||||
@@ -299,8 +314,8 @@ class NovaInstaller(NovaMixin, comp.PythonInstallComponent):
|
||||
class NovaRuntime(NovaMixin, comp.PythonRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)
|
||||
self.virsh = lv.Virsh(self.cfg, self.distro)
|
||||
self.wait_time = max(int(self.get_option('service_wait_seconds')), 1)
|
||||
self.virsh = lv.Virsh(int(self.get_option('service_wait_seconds')), self.distro)
|
||||
self.config_path = sh.joinpths(self.get_option('cfg_dir'), API_CONF)
|
||||
self.bin_dir = sh.joinpths(self.get_option('app_dir'), BIN_DIR)
|
||||
self.net_init_fn = sh.joinpths(self.get_option('trace_dir'), NET_INITED_FN)
|
||||
@@ -315,17 +330,17 @@ class NovaRuntime(NovaMixin, comp.PythonRuntime):
|
||||
'BIN_DIR': self.bin_dir
|
||||
}
|
||||
mp['BIN_DIR'] = self.bin_dir
|
||||
if self.cfg.getboolean('nova', 'enable_fixed'):
|
||||
if self.get_option('enable_fixed'):
|
||||
# Create a fixed network
|
||||
mp['FIXED_NETWORK_SIZE'] = self.cfg.getdefaulted('nova', 'fixed_network_size', '256')
|
||||
mp['FIXED_RANGE'] = self.cfg.getdefaulted('nova', 'fixed_range', '10.0.0.0/24')
|
||||
mp['FIXED_NETWORK_SIZE'] = self.get_option('fixed_network_size', '256')
|
||||
mp['FIXED_RANGE'] = self.get_option('fixed_range', '10.0.0.0/24')
|
||||
cmds.extend(FIXED_NET_CMDS)
|
||||
if self.cfg.getboolean('nova', 'enable_floating'):
|
||||
if self.get_option('enable_floating'):
|
||||
# Create a floating network + test floating pool
|
||||
cmds.extend(FLOATING_NET_CMDS)
|
||||
mp['FLOATING_RANGE'] = self.cfg.getdefaulted('nova', 'floating_range', '172.24.4.224/28')
|
||||
mp['TEST_FLOATING_RANGE'] = self.cfg.getdefaulted('nova', 'test_floating_range', '192.168.253.0/29')
|
||||
mp['TEST_FLOATING_POOL'] = self.cfg.getdefaulted('nova', 'test_floating_pool', 'test')
|
||||
mp['FLOATING_RANGE'] = self.get_option('floating_range', '172.24.4.224/28')
|
||||
mp['TEST_FLOATING_RANGE'] = self.get_option('test_floating_range', '192.168.253.0/29')
|
||||
mp['TEST_FLOATING_POOL'] = self.get_option('test_floating_pool', 'test')
|
||||
# Anything to run??
|
||||
if cmds:
|
||||
LOG.info("Creating your nova network to be used with instances.")
|
||||
@@ -355,9 +370,9 @@ class NovaRuntime(NovaMixin, comp.PythonRuntime):
|
||||
def pre_start(self):
|
||||
# Let the parent class do its thing
|
||||
comp.PythonRuntime.pre_start(self)
|
||||
virt_driver = nhelper.canon_virt_driver(self.cfg.get('nova', 'virt_driver'))
|
||||
virt_driver = nhelper.canon_virt_driver(self.get_option('virt_driver'))
|
||||
if virt_driver == 'libvirt':
|
||||
virt_type = lv.canon_libvirt_type(self.cfg.get('nova', 'libvirt_type'))
|
||||
virt_type = lv.canon_libvirt_type(self.get_option('libvirt_type'))
|
||||
LOG.info("Checking that your selected libvirt virtualization type %s is working and running.", colorizer.quote(virt_type))
|
||||
try:
|
||||
self.virsh.check_virt(virt_type)
|
||||
|
@@ -34,7 +34,7 @@ class Installer(comp.PythonInstallComponent):
|
||||
return {}
|
||||
|
||||
def _get_download_config(self):
|
||||
return (None, None)
|
||||
return None
|
||||
|
||||
|
||||
class Uninstaller(comp.PythonUninstallComponent):
|
||||
|
@@ -28,12 +28,6 @@ LOG = logging.getLogger(__name__)
|
||||
# Default password (guest)
|
||||
RESET_BASE_PW = ''
|
||||
|
||||
# Config keys we warm up so u won't be prompted later
|
||||
WARMUP_PWS = ['rabbit']
|
||||
|
||||
# Copies from helpers
|
||||
PW_USER_PROMPT = rhelper.PW_USER_PROMPT
|
||||
|
||||
|
||||
class RabbitUninstaller(comp.PkgUninstallComponent):
|
||||
def __init__(self, *args, **kargs):
|
||||
@@ -57,15 +51,14 @@ class RabbitInstaller(comp.PkgInstallComponent):
|
||||
self.runtime = self.siblings.get('running')
|
||||
|
||||
def warm_configs(self):
|
||||
for pw_key in WARMUP_PWS:
|
||||
self.cfg.get_password(pw_key, PW_USER_PROMPT)
|
||||
rhelper.get_shared_passwords(self)
|
||||
|
||||
def _setup_pw(self):
|
||||
user_id = self.cfg.get('rabbit', 'rabbit_userid')
|
||||
user_id = self.get_option('user_id')
|
||||
LOG.info("Setting up your rabbit-mq %s password.", colorizer.quote(user_id))
|
||||
self.runtime.restart()
|
||||
passwd = self.cfg.get_password("rabbit", PW_USER_PROMPT)
|
||||
cmd = self.distro.get_command('rabbit-mq', 'change_password') + [user_id, passwd]
|
||||
cmd = list(self.distro.get_command('rabbit-mq', 'change_password'))
|
||||
cmd += [user_id, rhelper.get_shared_passwords(self)['pw']]
|
||||
sh.execute(*cmd, run_as_root=True)
|
||||
LOG.info("Restarting so that your rabbit-mq password is reflected.")
|
||||
self.runtime.restart()
|
||||
@@ -78,7 +71,7 @@ class RabbitInstaller(comp.PkgInstallComponent):
|
||||
class RabbitRuntime(comp.ProgramRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.ProgramRuntime.__init__(self, *args, **kargs)
|
||||
self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)
|
||||
self.wait_time = max(int(self.get_option('service_wait_seconds')), 1)
|
||||
|
||||
def start(self):
|
||||
if self.status()[0].status != comp.STATUS_STARTED:
|
||||
|
247
anvil/env_rc.py
247
anvil/env_rc.py
@@ -1,247 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from urlparse import urlunparse
|
||||
import re
|
||||
|
||||
from anvil import cfg
|
||||
from anvil import colorizer
|
||||
from anvil import env
|
||||
from anvil import log as logging
|
||||
from anvil import settings
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# General extraction cfg keys + sections
|
||||
CFG_MAKE = {
|
||||
'FLAT_INTERFACE': ('nova', 'flat_interface'),
|
||||
'HOST_IP': ('host', 'ip'),
|
||||
}
|
||||
|
||||
# PW sections
|
||||
PASSWORDS_MAKES = {
|
||||
'ADMIN_PASSWORD': (cfg.PW_SECTION, 'horizon_keystone_admin'),
|
||||
'SERVICE_PASSWORD': (cfg.PW_SECTION, 'service_password'),
|
||||
'RABBIT_PASSWORD': (cfg.PW_SECTION, 'rabbit'),
|
||||
'SERVICE_TOKEN': (cfg.PW_SECTION, 'service_token'),
|
||||
'MYSQL_PASSWORD': (cfg.PW_SECTION, 'sql'),
|
||||
}
|
||||
|
||||
# Install root output name and env variable name
|
||||
INSTALL_ROOT = 'INSTALL_ROOT'
|
||||
|
||||
# Default ports
|
||||
EC2_PORT = 8773
|
||||
S3_PORT = 3333
|
||||
|
||||
# How we know if a line is an export or if it isn't (simple edition)
|
||||
EXP_PAT = re.compile("^\s*export\s+(.*?)=(.*?)$", re.IGNORECASE)
|
||||
|
||||
# How we unquote a string (simple edition)
|
||||
QUOTED_PAT = re.compile(r"^\s*[\"](.*)[\"]\s*$")
|
||||
|
||||
# Allow external includes via this template
|
||||
EXTERN_TPL = """
|
||||
# Allow local overrides of env variables using {fn}
|
||||
if [ -f "{fn}" ]; then
|
||||
source "{fn}"
|
||||
fi
|
||||
"""
|
||||
|
||||
# Attempt to use them from other installs (devstack and such)
|
||||
EXTERN_INCLUDES = ['localrc', 'eucarc']
|
||||
|
||||
|
||||
class RcWriter(object):
|
||||
|
||||
def __init__(self, cfg, root_dir, components):
|
||||
self.cfg = cfg
|
||||
self.root_dir = root_dir
|
||||
self.components = components
|
||||
self.lines = None
|
||||
self.created = 0
|
||||
|
||||
def _make_export(self, export_name, value):
|
||||
self.created += 1
|
||||
return "export %s=%s" % (export_name, sh.shellquote(value))
|
||||
|
||||
def _make_dict_export(self, kvs):
|
||||
lines = list()
|
||||
for var_name in sorted(kvs.keys()):
|
||||
var_value = kvs.get(var_name)
|
||||
if var_value is not None:
|
||||
lines.append(self._make_export(var_name, str(var_value)))
|
||||
return lines
|
||||
|
||||
def _get_ec2_envs(self):
|
||||
to_set = {}
|
||||
ip = self.cfg.get('host', 'ip')
|
||||
ec2_url_default = urlunparse(('http', "%s:%s" % (ip, EC2_PORT), "services/Cloud", '', '', ''))
|
||||
to_set['EC2_URL'] = self.cfg.getdefaulted('extern', 'ec2_url', ec2_url_default)
|
||||
s3_url_default = urlunparse(('http', "%s:%s" % (ip, S3_PORT), "services/Cloud", '', '', ''))
|
||||
to_set['S3_URL'] = self.cfg.getdefaulted('extern', 's3_url', s3_url_default)
|
||||
return to_set
|
||||
|
||||
def _generate_ec2_env(self):
|
||||
lines = []
|
||||
lines.append('# EC2 and/or S3 stuff')
|
||||
lines.extend(self._make_dict_export(self._get_ec2_envs()))
|
||||
lines.append("")
|
||||
return lines
|
||||
|
||||
def _get_general_envs(self):
|
||||
to_set = {}
|
||||
for (out_name, cfg_data) in CFG_MAKE.items():
|
||||
(section, key) = (cfg_data)
|
||||
to_set[out_name] = self.cfg.get(section, key)
|
||||
to_set[INSTALL_ROOT] = self.root_dir
|
||||
return to_set
|
||||
|
||||
def _get_password_envs(self):
|
||||
to_set = {}
|
||||
for (out_name, cfg_data) in PASSWORDS_MAKES.items():
|
||||
(section, key) = cfg_data
|
||||
to_set[out_name] = self.cfg.get(section, key)
|
||||
return to_set
|
||||
|
||||
def _generate_passwords(self):
|
||||
lines = []
|
||||
lines.append('# Password stuff')
|
||||
lines.extend(self._make_dict_export(self._get_password_envs()))
|
||||
lines.append("")
|
||||
return lines
|
||||
|
||||
def _generate_general(self):
|
||||
lines = []
|
||||
lines.append('# General stuff')
|
||||
lines.extend(self._make_dict_export(self._get_general_envs()))
|
||||
lines.append("")
|
||||
return lines
|
||||
|
||||
def _generate_lines(self):
|
||||
lines = []
|
||||
lines.extend(self._generate_general())
|
||||
lines.extend(self._generate_passwords())
|
||||
lines.extend(self._generate_ec2_env())
|
||||
lines.extend(self._generate_extern_inc())
|
||||
lines.extend(self._generate_components())
|
||||
return lines
|
||||
|
||||
def _generate_components(self):
|
||||
lines = []
|
||||
for (c, component) in self.components:
|
||||
there_envs = component.env_exports
|
||||
if there_envs:
|
||||
lines.append('# %s stuff' % (c.title().strip()))
|
||||
lines.extend(self._make_dict_export(there_envs))
|
||||
lines.append('')
|
||||
return lines
|
||||
|
||||
def write(self, fn):
|
||||
if self.lines is None:
|
||||
self.lines = self._generate_lines()
|
||||
out_lines = list(self.lines)
|
||||
if sh.isfile(fn):
|
||||
out_lines.insert(0, '')
|
||||
out_lines.insert(0, '# Updated on %s' % (utils.rcf8222date()))
|
||||
out_lines.insert(0, '')
|
||||
else:
|
||||
out_lines.insert(0, '')
|
||||
out_lines.insert(0, '# Created on %s' % (utils.rcf8222date()))
|
||||
# Don't use sh 'lib' here so that we always
|
||||
# read this (even if dry-run)
|
||||
with open(fn, 'a') as fh:
|
||||
fh.write(utils.joinlinesep(*out_lines))
|
||||
|
||||
def _generate_extern_inc(self):
|
||||
lines = []
|
||||
lines.append('# External includes stuff')
|
||||
for inc_fn in EXTERN_INCLUDES:
|
||||
extern_inc = EXTERN_TPL.format(fn=inc_fn)
|
||||
lines.append(extern_inc.strip())
|
||||
lines.append('')
|
||||
return lines
|
||||
|
||||
|
||||
class RcReader(object):
|
||||
|
||||
def _is_comment(self, line):
|
||||
if line.lstrip().startswith("#"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def extract(self, fn):
|
||||
contents = ''
|
||||
LOG.debug("Loading bash 'style' resource file %r", fn)
|
||||
try:
|
||||
# Don't use sh here so that we always
|
||||
# read this (even if dry-run)
|
||||
with open(fn, 'r') as fh:
|
||||
contents = fh.read()
|
||||
except IOError as e:
|
||||
return {}
|
||||
return self._dict_convert(contents)
|
||||
|
||||
def _unescape_string(self, text):
|
||||
return text.decode('string_escape').strip()
|
||||
|
||||
def _dict_convert(self, contents):
|
||||
extracted_vars = {}
|
||||
for line in contents.splitlines():
|
||||
if self._is_comment(line):
|
||||
continue
|
||||
m = EXP_PAT.search(line)
|
||||
if m:
|
||||
key = m.group(1).strip()
|
||||
value = m.group(2).strip()
|
||||
quoted_mtch = QUOTED_PAT.match(value)
|
||||
if quoted_mtch:
|
||||
value = self._unescape_string(quoted_mtch.group(1))
|
||||
extracted_vars[key] = value
|
||||
return extracted_vars
|
||||
|
||||
def load(self, fn):
|
||||
kvs = self.extract(fn)
|
||||
for (key, value) in kvs.items():
|
||||
env.set(key, value)
|
||||
return len(kvs)
|
||||
|
||||
|
||||
def load(read_fns=None):
|
||||
if not read_fns:
|
||||
read_fns = [
|
||||
settings.gen_rc_filename('core'),
|
||||
]
|
||||
loaded_am = 0
|
||||
for fn in read_fns:
|
||||
am_loaded = RcReader().load(fn)
|
||||
loaded_am += am_loaded
|
||||
return (loaded_am, read_fns)
|
||||
|
||||
|
||||
def write(action, write_fns=None, components=None):
|
||||
if not components:
|
||||
components = []
|
||||
if not write_fns:
|
||||
write_fns = [
|
||||
settings.gen_rc_filename('core'),
|
||||
]
|
||||
writer = RcWriter(action.cfg, action.root_dir, components)
|
||||
for fn in write_fns:
|
||||
writer.write(fn)
|
||||
return (writer.created, write_fns)
|
@@ -54,10 +54,9 @@ def canon_libvirt_type(virt_type):
|
||||
|
||||
class Virsh(object):
|
||||
|
||||
def __init__(self, config, distro):
|
||||
self.cfg = config
|
||||
def __init__(self, service_wait, distro):
|
||||
self.distro = distro
|
||||
self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)
|
||||
self.wait_time = max(int(service_wait), 1)
|
||||
|
||||
def _service_status(self):
|
||||
cmd = self.distro.get_command('libvirt', 'status')
|
||||
|
@@ -25,10 +25,39 @@ from anvil import cfg
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InputPassword(object):
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
class ProxyPassword(object):
|
||||
def __init__(self, cache=None):
|
||||
if not cache:
|
||||
self.cache = {}
|
||||
else:
|
||||
self.cache = cache
|
||||
self.resolvers = []
|
||||
|
||||
def _valid_password(self, pw):
|
||||
if pw is None:
|
||||
return False
|
||||
if len(pw) > 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_password(self, option, prompt_text='', length=8, **kwargs):
|
||||
if option in self.cache:
|
||||
return self.cache[option]
|
||||
password = ''
|
||||
for resolver in self.resolvers:
|
||||
found_password = resolver.get_password(option,
|
||||
prompt_text=prompt_text,
|
||||
length=length, **kwargs)
|
||||
if self._valid_password(found_password):
|
||||
password = found_password
|
||||
break
|
||||
if len(password) == 0:
|
||||
LOG.warn("Password provided for %r is empty", option)
|
||||
self.cache[option] = password
|
||||
return password
|
||||
|
||||
|
||||
class InputPassword(object):
|
||||
def _valid_password(self, pw):
|
||||
cleaned_pw = pw.strip()
|
||||
if len(cleaned_pw) == 0:
|
||||
@@ -55,18 +84,7 @@ class InputPassword(object):
|
||||
return self._prompt_user(kargs.get('prompt_text', '??'))
|
||||
|
||||
|
||||
class ConfigPassword(object):
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
|
||||
def get_password(self, option, **kargs):
|
||||
return self.cfg.get(cfg.PW_SECTION, option)
|
||||
|
||||
|
||||
class RandomPassword(object):
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
|
||||
def generate_random(self, length):
|
||||
"""Returns a randomly generated password of the specified length."""
|
||||
LOG.debug("Generating a pseudo-random password of %d characters",
|
||||
|
@@ -23,10 +23,10 @@ RC_FN_TEMPL = "%s.rc"
|
||||
# Where the configs and templates should be at...
|
||||
BIN_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
|
||||
CONFIG_DIR = os.path.join(BIN_DIR, "conf")
|
||||
COMPONENT_CONF_DIR = os.path.join(CONFIG_DIR, "components")
|
||||
DISTRO_DIR = os.path.join(CONFIG_DIR, "distros")
|
||||
TEMPLATE_DIR = os.path.join(CONFIG_DIR, "templates")
|
||||
PERSONA_DIR = os.path.join(CONFIG_DIR, "personas")
|
||||
CONFIG_LOCATION = os.path.join(CONFIG_DIR, 'anvil.ini')
|
||||
TEMPLATE_DIR = os.path.join(CONFIG_DIR, "templates")
|
||||
|
||||
|
||||
def gen_rc_filename(root_name):
|
||||
|
@@ -116,6 +116,13 @@ def rcf8222date():
|
||||
return strftime("%a, %d %b %Y %H:%M:%S", localtime())
|
||||
|
||||
|
||||
def merge_dicts(*dicts):
|
||||
merged = {}
|
||||
for mp in dicts:
|
||||
merged.update(mp)
|
||||
return merged
|
||||
|
||||
|
||||
def make_url(scheme, host, port=None,
|
||||
path='', params='', query='', fragment=''):
|
||||
|
||||
|
287
conf/anvil.ini
287
conf/anvil.ini
@@ -1,287 +0,0 @@
|
||||
# Anvil's local configuration
|
||||
|
||||
# When a value looks like a bash variable + default then it is parsed like a bash
|
||||
# variable and will perform similar lookups. Ie ${SQL_HOST:-localhost} will
|
||||
# look in environment variable SQL_HOST and if that does not exist then
|
||||
# localhost will be used instead.
|
||||
#
|
||||
# We also allow for simple referencing of other variables, similar to bash
|
||||
# variables to occur when a keys value like the following format are found:
|
||||
#
|
||||
# web_host = ${RUNNING_HOST:-http://$(X:Y)}
|
||||
#
|
||||
# For this example, the RUNNING_HOST enviroment variable will be referenced.
|
||||
# If it is not found (no value exists), then "http://$(X:Y)" will be
|
||||
# examined and found to be contain a expression (denoted by "$(X:Y)").
|
||||
#
|
||||
# Then in that expression there are components of the format "X:Y" which the
|
||||
# configuration class will attempt to resolve those values by looking up in the
|
||||
# configuration file for a value in section "X" with option "Y" and replacing the
|
||||
# retrieved value for what was previously "$(X:Y)". Multiple of these "expressions"
|
||||
# are allowed and each will have its expression "text" replaced with the resolved
|
||||
# value before the final value for the original variable is determined.
|
||||
#
|
||||
# For this example if the section X with option Y contained value "1.2.3.4" then
|
||||
# the final string would be "http://1.2.3.4" which would then be cached as the value
|
||||
# for option web_host.
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
# Which run type to use.
|
||||
run_type = ${RUN_TYPE:-anvil.runners.fork:ForkRunner}
|
||||
|
||||
# How many seconds to wait until a service comes online before using it.
|
||||
# For example, before uploading to glance we need keystone and glance to be online.
|
||||
# Sometimes this takes 5 to 10 seconds to start these up....
|
||||
service_wait_seconds = ${SERVICE_WAIT_SECONDS:-5}
|
||||
|
||||
[host]
|
||||
|
||||
# Set api host endpoint
|
||||
# If this is empty in code we will try to determine your network ip.
|
||||
ip = ${HOST_IP:-}
|
||||
|
||||
[rabbit]
|
||||
|
||||
# Where is rabbit located?
|
||||
rabbit_host = ${RABBIT_HOST:-$(host:ip)}
|
||||
|
||||
# Which rabbit user should be used
|
||||
rabbit_userid = ${RABBIT_USER:-guest}
|
||||
|
||||
[db]
|
||||
|
||||
# Where you db is located at and how to access it.
|
||||
sql_host = ${SQL_HOST:-localhost}
|
||||
sql_user = ${SQL_USER:-root}
|
||||
port = ${SQL_PORT:-3306}
|
||||
|
||||
# What type of database is this?
|
||||
type = ${SQL_TYPE:-mysql}
|
||||
|
||||
[keystone]
|
||||
|
||||
# Where is the keystone auth host at?
|
||||
keystone_auth_host = ${KEYSTONE_AUTH_HOST:-$(host:ip)}
|
||||
keystone_auth_port = ${KEYSTONE_AUTH_PORT:-35357}
|
||||
keystone_auth_protocol = ${KEYSTONE_AUTH_PROTOCOL:-http}
|
||||
|
||||
# Where is the keystone service host at?
|
||||
keystone_service_host = ${KEYSTONE_SERVICE_HOST:-$(host:ip)}
|
||||
keystone_service_port = ${KEYSTONE_SERVICE_PORT:-5000}
|
||||
keystone_service_protocol = ${KEYSTONE_SERVICE_PROTOCOL:-http}
|
||||
|
||||
[glance]
|
||||
|
||||
glance_host = ${GLANCE_HOST:-$(host:ip)}
|
||||
glance_port = ${GLANCE_HOSTPORT:-9292}
|
||||
glance_protocol = ${GLANCE_PROTOCOL:-http}
|
||||
|
||||
# Specify a comma-separated list of images to download and install into glance.
|
||||
image_urls = http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz,
|
||||
http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz,
|
||||
|
||||
[nova]
|
||||
|
||||
# Should nova be in verbose mode?
|
||||
verbose = ${NOVA_VERBOSE:-1}
|
||||
|
||||
# Force backing images to raw format?
|
||||
force_raw_images = 1
|
||||
|
||||
# Set api_rate_limit = 0 (or blank) to turn OFF rate limiting
|
||||
api_rate_limit = ${API_RATE_LIMIT:-}
|
||||
|
||||
# Currently novaclient needs you to specify the *compute api* version.
|
||||
nova_version = ${NOVA_VERSION:-1.1}
|
||||
|
||||
# Which scheduler will nova be running with?
|
||||
# Nova supports pluggable schedulers. FilterScheduler should work in most cases.
|
||||
scheduler = ${NOVA_SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
|
||||
|
||||
# Network settings
|
||||
# Very useful to read over:
|
||||
# http://docs.openstack.org/trunk/openstack-compute/admin/content/configuring-networking-on-the-compute-node.html
|
||||
|
||||
# A fixed network will be created for you (unless disabled)
|
||||
enable_fixed = 1
|
||||
fixed_range = ${NOVA_FIXED_RANGE:-10.0.0.0/24}
|
||||
fixed_network_size = ${NOVA_FIXED_NETWORK_SIZE:-256}
|
||||
|
||||
# Which network manager and which interface should be used
|
||||
network_manager = ${NET_MAN:-nova.network.manager.FlatDHCPManager}
|
||||
public_interface = ${PUBLIC_INTERFACE:-eth0}
|
||||
|
||||
# DHCP Warning: If your flat interface device uses DHCP, there will be a hiccup while the network
|
||||
# is moved from the flat interface to the flat network bridge. This will happen when you launch
|
||||
# your first instance. Upon launch you will lose all connectivity to the node, and the vm launch will probably fail.
|
||||
#
|
||||
# If you are running on a single node and don't need to access the VMs from devices other than
|
||||
# that node, you can set the flat interface to the same value as FLAT_NETWORK_BRIDGE.
|
||||
# This will stop the network hiccup from occurring.
|
||||
flat_interface = ${FLAT_INTERFACE:-eth0}
|
||||
vlan_interface = ${VLAN_INTERFACE:-$(nova:public_interface)}
|
||||
flat_network_bridge = ${FLAT_NETWORK_BRIDGE:-br100}
|
||||
|
||||
# If using a flat manager (not dhcp) then you probably want this on
|
||||
flat_injected = 0
|
||||
|
||||
# A floating network will be created for you (unless disabled)
|
||||
enable_floating = 1
|
||||
floating_range = ${FLOATING_RANGE:-172.24.4.224/28}
|
||||
test_floating_pool = ${TEST_FLOATING_POOL:-test}
|
||||
test_floating_range = ${TEST_FLOATING_RANGE:-192.168.253.0/29}
|
||||
|
||||
# TODO document these
|
||||
vncproxy_url = ${VNCPROXY_URL:-http://$(host:ip):6080/vnc_auto.html}
|
||||
xvpvncproxy_url = ${XVPVNCPROXY_URL:-http://$(host:ip):6081/console}
|
||||
vncserver_proxyclient_address = ${VNCSERVER_PROXYCLIENT_ADDRESS:-}
|
||||
ec2_dmz_host = ${EC2_DMZ_HOST:-$(host:ip)}
|
||||
|
||||
# Adjust this if you want to have libvirt's vnc accessible
|
||||
# Ie, making it 0.0.0.0 will allow easier access from external machines.
|
||||
vncserver_listen = ${VNCSERVER_LISTEN:-127.0.0.1}
|
||||
|
||||
# This decides which firewall driver to use:
|
||||
# The default here should work with linux + iptables + libvirt special sauce...
|
||||
libvirt_firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
|
||||
# This is just a firewall based on iptables, for non-libvirt usage
|
||||
basic_firewall_driver = nova.virt.firewall.IptablesFirewallDriver
|
||||
|
||||
# Volume settings
|
||||
volume_group = ${VOLUME_GROUP:-nova-volumes}
|
||||
volume_backing_file = ${VOLUME_BACKING_FILE:-}
|
||||
volume_backing_file_size =${VOLUME_BACKING_FILE_SIZE:-2052M}
|
||||
volume_name_prefix = ${VOLUME_NAME_PREFIX:-volume-}
|
||||
volume_name_postfix = ${VOLUME_NAME_POSTFIX:-%08x}
|
||||
|
||||
# How instances will be named
|
||||
instance_name_prefix = ${INSTANCE_NAME_PREFIX:-instance-}
|
||||
instance_name_postfix = ${INSTANCE_NAME_POSTFIX:-%08x}
|
||||
|
||||
# Where instances will be stored, defaults to $NOVA_DIR/instances
|
||||
instances_path = ${INSTANCES_PATH:-}
|
||||
|
||||
# Are we setup in multihost mode?
|
||||
# Multi-host is a mode where each compute node runs its own network node.
|
||||
# This allows network operations and routing for a VM to occur on the server
|
||||
# that is running the VM - removing a SPOF and bandwidth bottleneck.
|
||||
multi_host = ${MULTI_HOST:-0}
|
||||
|
||||
# Virtualization settings
|
||||
# Drivers known (libvirt, xensever, vmware, baremetal)
|
||||
# Defaults to libvirt (the most compatible) if unknown.
|
||||
virt_driver = ${VIRT_DRIVER:-libvirt}
|
||||
|
||||
# Only useful if above libvirt_type is "libvirt"
|
||||
# Types known (qemu, kvm, xen, uml, lxc)
|
||||
# Defaults to qemu (the most compatible) if unknown (or blank).
|
||||
libvirt_type = ${LIBVIRT_TYPE:-}
|
||||
|
||||
# What type of image service will be used?
|
||||
img_service = ${IMG_SERVICE:-nova.image.glance.GlanceImageService}
|
||||
|
||||
# Ensure base images checksummed
|
||||
checksum_base_images = ${CHECKSUM_BASE_IMAGES:-1}
|
||||
|
||||
# Only applicable if using glance...
|
||||
glance_server = ${GLANCE_SERVER:-$(glance:glance_host):$(glance:glance_port)}
|
||||
|
||||
# Used however you want - ensure you know nova's conf file format if you use this!
|
||||
extra_flags = ${NOVA_EXTRA_FLAGS:-}
|
||||
|
||||
[extern]
|
||||
|
||||
# Set the ec2 url so euca2ools works
|
||||
# Typically like http://localhost:8773/services/Cloud
|
||||
# If blank we will generate this.
|
||||
ec2_url = ${EC2_URL:-}
|
||||
|
||||
# Set the s3 url so euca2ools works
|
||||
# Typically like http://localhost:3333/services/Cloud
|
||||
# If blank we will generate this.
|
||||
s3_url = ${S3_URL:-}
|
||||
|
||||
# Not used (currently)??
|
||||
ec2_user_id = 42
|
||||
ec2_cert_fn = ~/cert.pm
|
||||
|
||||
[download_from]
|
||||
|
||||
# These values can be either git://repo+branch or a uri with ".git" in it
|
||||
# or http:// or https:// for a file (ie on launchpad)
|
||||
# which contains a given release (ie a tarball)
|
||||
|
||||
# Compute service
|
||||
nova = git://github.com/openstack/nova.git?branch=master
|
||||
|
||||
# Compute service client library
|
||||
nova_client = git://github.com/openstack/python-novaclient.git?branch=master
|
||||
|
||||
# Image catalog service
|
||||
glance = git://github.com/openstack/glance.git?branch=master
|
||||
|
||||
# Image catalog service client library
|
||||
glance_client = git://github.com/openstack/python-glanceclient.git?branch=master
|
||||
|
||||
# Unified auth system (manages accounts/tokens)
|
||||
keystone = git://github.com/openstack/keystone.git?branch=master
|
||||
|
||||
# Unified auth system (manages accounts/tokens) client library
|
||||
keystone_client = git://github.com/openstack/python-keystoneclient.git?branch=master
|
||||
|
||||
# Django powered web control panel for openstack
|
||||
horizon = git://github.com/openstack/horizon.git?branch=master
|
||||
|
||||
# Unified openstack client library
|
||||
openstack_client = git://github.com/openstack/python-openstackclient.git?branch=master
|
||||
|
||||
# A websockets/html5 or flash powered VNC console for vm instances
|
||||
no_vnc = git://github.com/kanaka/noVNC.git?branch=master
|
||||
|
||||
# Client for the new volume api
|
||||
cinder_client = git://github.com/openstack/python-cinderclient.git?branch=master
|
||||
|
||||
# Client for the network connectivity as a service api
|
||||
quantum_client = git://github.com/openstack/python-quantumclient.git?branch=master
|
||||
|
||||
# Client for the openstack distributed object store swift
|
||||
swift_client = git://github.com/openstack/python-swiftclient.git?branch=master
|
||||
|
||||
[horizon]
|
||||
|
||||
# What user will apache be serving from.
|
||||
#
|
||||
# Root will typically not work (for apache on most distros)
|
||||
# sudo adduser <username> then sudo adduser <username> admin will be what you want to set this up (in ubuntu)
|
||||
# I typically use user "horizon" for ubuntu and the runtime user (who will have sudo access) for RHEL.
|
||||
#
|
||||
# NOTE: If blank the currently executing user will be used.
|
||||
apache_user = ${APACHE_USER:-}
|
||||
|
||||
# This is the group of the previous user (adjust as needed)
|
||||
apache_group = ${APACHE_GROUP:-$(horizon:apache_user)}
|
||||
|
||||
# Port horizon should run on
|
||||
port = ${HORIZON_PORT:-80}
|
||||
|
||||
[passwords]
|
||||
|
||||
# This section is where passwords could be stored. This section also has special meaning
|
||||
# in code in that the configuration class we use will look in this section for passwords
|
||||
# and if no password is found (ie an empty string) then the user will be prompted to enter
|
||||
# a password, if they do not enter one (or its blank) then one will be generated for the user.
|
||||
|
||||
# NOTE: You will need to send the same MYSQL_PASSWORD to every host if you are doing a multi-node openstack installation.
|
||||
sql = ${MYSQL_PASSWORD:-}
|
||||
|
||||
# Change the rabbit password since the default is "guest"
|
||||
rabbit = ${RABBIT_PASSWORD:-}
|
||||
|
||||
# This password will be used by horizon and keystone as the admin password
|
||||
horizon_keystone_admin = ${ADMIN_PASSWORD:-}
|
||||
|
||||
# Openstack components need to have an admin token to validate user tokens.
|
||||
service_token = ${SERVICE_TOKEN:-}
|
||||
service_password = ${SERVICE_PASSWORD:-}
|
7
conf/components/cinder-client.yaml
Normal file
7
conf/components/cinder-client.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Settings for component cinder-client
|
||||
---
|
||||
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/python-cinderclient.git?branch=master"
|
||||
|
||||
...
|
8
conf/components/db.yaml
Normal file
8
conf/components/db.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Settings for component db
|
||||
---
|
||||
# Where you db is located at and how to access it.
|
||||
host: localhost
|
||||
port: 3306
|
||||
type: mysql
|
||||
user: root
|
||||
...
|
12
conf/components/general.yaml
Normal file
12
conf/components/general.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
# Settings for component general
|
||||
---
|
||||
# Python component run type to use (defaults to forking)
|
||||
run_type: "anvil.runners.fork:ForkRunner"
|
||||
|
||||
ip: "$(auto:ip)"
|
||||
|
||||
# How many seconds to wait until a service comes online before using it.
|
||||
# For example, before uploading to glance we need keystone and glance to be online.
|
||||
# Sometimes this takes 5 to 10 seconds to start these up....
|
||||
service_wait_seconds: 5
|
||||
...
|
7
conf/components/glance-client.yaml
Normal file
7
conf/components/glance-client.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Settings for component glance-client
|
||||
---
|
||||
|
||||
# Where we download this from...
|
||||
get_from: git://github.com/openstack/python-glanceclient.git?branch=master
|
||||
|
||||
...
|
28
conf/components/glance.yaml
Normal file
28
conf/components/glance.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Settings for component glance
|
||||
---
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/glance.git?branch=master"
|
||||
host: "$(auto:ip)"
|
||||
api_port: 9292
|
||||
reg_port: 9191
|
||||
protocol: http
|
||||
# List of images to download and install into glance.
|
||||
image_urls:
|
||||
- http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz
|
||||
- http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz
|
||||
# Needed for setting up your database
|
||||
db:
|
||||
type: "$(db:type)"
|
||||
user: "$(db:user)"
|
||||
host: "$(db:host)"
|
||||
port: "$(db:port)"
|
||||
# Interactions with keystone are via the following settings
|
||||
paste_flavor: 'keystone'
|
||||
keystone:
|
||||
auth_host: "$(keystone:auth_host)"
|
||||
auth_port: "$(keystone:auth_port)"
|
||||
auth_proto: "$(keystone:auth_proto)"
|
||||
service_host: "$(keystone:service_host)"
|
||||
service_port: "$(keystone:service_port)"
|
||||
service_proto: "$(keystone:service_proto)"
|
||||
...
|
25
conf/components/horizon.yaml
Normal file
25
conf/components/horizon.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
# Settings for component horizon
|
||||
---
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/horizon.git?branch=master"
|
||||
|
||||
# This is the group of the user (adjust as needed)
|
||||
apache_group: "$(auto:user)"
|
||||
|
||||
# What user will apache be serving from.
|
||||
#
|
||||
# Root will typically not work (for apache on most distros)
|
||||
# sudo adduser <username> then sudo adduser <username> admin will be what you want to set this up (in ubuntu)
|
||||
# I typically use user "horizon" for ubuntu and the runtime user (who will have sudo access) for RHEL.
|
||||
apache_user: "$(auto:user)"
|
||||
|
||||
# Port horizon should run on
|
||||
port: 80
|
||||
|
||||
# Needed for setting up your database
|
||||
db:
|
||||
type: "$(db:type)"
|
||||
user: "$(db:user)"
|
||||
host: "$(db:host)"
|
||||
port: "$(db:port)"
|
||||
...
|
7
conf/components/keystone-client.yaml
Normal file
7
conf/components/keystone-client.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Settings for component keystone-client
|
||||
---
|
||||
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/python-keystoneclient.git?branch=master"
|
||||
|
||||
...
|
28
conf/components/keystone.yaml
Normal file
28
conf/components/keystone.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Settings for component keystone
|
||||
---
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/keystone.git?branch=master"
|
||||
|
||||
# Where is the keystone auth host at?
|
||||
auth_host: "$(auto:ip)"
|
||||
auth_port: 35357
|
||||
auth_proto: http
|
||||
|
||||
# Where is the keystone service host at?
|
||||
service_host: "$(auto:ip)"
|
||||
service_port: 5000
|
||||
service_proto: http
|
||||
|
||||
# Needed for setting up your database
|
||||
db:
|
||||
type: "$(db:type)"
|
||||
user: "$(db:user)"
|
||||
host: "$(db:host)"
|
||||
port: "$(db:port)"
|
||||
|
||||
# Needed when running to setup the right roles/endpoints...
|
||||
glance:
|
||||
api_port: "$(glance:api_port)"
|
||||
protocol: "$(glance:protocol)"
|
||||
reg_port: "$(glance:reg_port)"
|
||||
...
|
7
conf/components/no-vnc.yaml
Normal file
7
conf/components/no-vnc.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Settings for component no-vnc
|
||||
---
|
||||
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/kanaka/noVNC.git?branch=master"
|
||||
|
||||
...
|
7
conf/components/nova-client.yaml
Normal file
7
conf/components/nova-client.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Settings for component nova-client
|
||||
---
|
||||
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/python-novaclient.git?branch=master"
|
||||
|
||||
...
|
139
conf/components/nova.yaml
Normal file
139
conf/components/nova.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
# Settings for component nova
|
||||
---
|
||||
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/nova.git?branch=master"
|
||||
|
||||
# Host and ports for the different nova services
|
||||
api_host: "$(auto:ip)"
|
||||
api_port: 8774
|
||||
s3_host: "$(auto:ip)"
|
||||
s3_port: 3333
|
||||
volume_host: "$(auto:ip)"
|
||||
volume_port: 8776
|
||||
ec2_host: "$(auto:ip)"
|
||||
ec2_port: 8773
|
||||
ec2_admin_host: "$(auto:ip)"
|
||||
ec2_admin_port: 8773
|
||||
protocol: http
|
||||
|
||||
# Very useful to read over the following
|
||||
#
|
||||
# http://docs.openstack.org/trunk/openstack-compute/admin/content/configuring-networking-on-the-compute-node.html
|
||||
# https://github.com/openstack/nova/blob/master/etc/nova/nova.conf.sample
|
||||
|
||||
# Set api_rate_limit = 0 (or blank) to turn OFF rate limiting
|
||||
api_rate_limit: False
|
||||
|
||||
# The internal ip of the ec2 api server
|
||||
ec2_dmz_host: "$(auto:ip)"
|
||||
|
||||
# A fixed network will be created for you (unless disabled)
|
||||
enable_fixed: True
|
||||
fixed_network_size: 256
|
||||
fixed_range: "10.0.0.0/24"
|
||||
|
||||
# Used however you want - ensure you know nova's conf file format if you use this!
|
||||
extra_flags: ""
|
||||
|
||||
# DHCP Warning: If your flat interface device uses DHCP, there will be a hiccup while the network
|
||||
# is moved from the flat interface to the flat network bridge. This will happen when you launch
|
||||
# your first instance. Upon launch you will lose all connectivity to the node, and the vm launch will probably fail.
|
||||
#
|
||||
# If you are running on a single node and don't need to access the VMs from devices other than
|
||||
# that node, you can set the flat interface to the same value as FLAT_NETWORK_BRIDGE.
|
||||
# This will stop the network hiccup from occurring.
|
||||
|
||||
# If using a flat manager (not dhcp) then you probably want this on
|
||||
flat_injected: False
|
||||
flat_interface: eth0
|
||||
flat_network_bridge: br100
|
||||
|
||||
# A floating network will be created for you (unless disabled)
|
||||
enable_floating: True
|
||||
floating_range: "172.24.4.224/28"
|
||||
test_floating_pool: test
|
||||
test_floating_range: "192.168.253.0/29"
|
||||
|
||||
# Force backing images to raw format?
|
||||
force_raw_images: True
|
||||
checksum_base_images: True
|
||||
glance_server: "$(glance:host):$(glance:api_port)"
|
||||
img_service: nova.image.glance.GlanceImageService
|
||||
|
||||
# How instances will be named and where
|
||||
instance_name_postfix: "%08x"
|
||||
instance_name_prefix: "instance-"
|
||||
|
||||
# Defaults to $NOVA_DIR/instances if empty
|
||||
instances_path: ''
|
||||
|
||||
# This decides which firewall driver to use:
|
||||
# The default here should work with linux + iptables + libvirt special sauce...
|
||||
libvirt_firewall_driver: nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
|
||||
# Only useful if above virt_driver is "libvirt"
|
||||
# Types known (qemu, kvm, xen, uml, lxc)
|
||||
# Defaults to qemu (the most compatible) if unknown (or blank).
|
||||
libvirt_type: "qemu"
|
||||
|
||||
# This is just a firewall based on iptables, for non-libvirt usage
|
||||
basic_firewall_driver: nova.virt.firewall.IptablesFirewallDriver
|
||||
|
||||
# Multi-host is a mode where each compute node runs its own network node.
|
||||
# This allows network operations and routing for a VM to occur on the server
|
||||
# that is running the VM - removing a SPOF and bandwidth bottleneck.
|
||||
multi_host: False
|
||||
|
||||
# Which network manager and which interface should be used
|
||||
network_manager: nova.network.manager.FlatDHCPManager
|
||||
|
||||
# Interface for public IP addresses
|
||||
public_interface: eth0
|
||||
|
||||
# Currently novaclient needs you to specify the *compute api* version.
|
||||
nova_version: "1.1"
|
||||
|
||||
# Which scheduler will nova be running with?
|
||||
# Nova supports pluggable schedulers. FilterScheduler should work in most cases.
|
||||
scheduler: nova.scheduler.filter_scheduler.FilterScheduler
|
||||
|
||||
# Should nova be in verbose mode?
|
||||
verbose: True
|
||||
|
||||
# Virtualization settings
|
||||
# Drivers known (libvirt, xensever, vmware, baremetal)
|
||||
# Defaults to libvirt (the most compatible) if unknown.
|
||||
virt_driver: libvirt
|
||||
vlan_interface: $(nova:public_interface)
|
||||
|
||||
# Vnc server settings
|
||||
vncproxy_url: "http://$(auto:ip):6080/vnc_auto.html"
|
||||
vncserver_listen: 127.0.0.1
|
||||
vncserver_proxyclient_address: ""
|
||||
xvpvncproxy_url: "http://$(auto:ip):6081/console"
|
||||
|
||||
# Not currently working (to be replaced by cinder)
|
||||
volume_backing_file: ""
|
||||
volume_backing_file_size: 2052M
|
||||
volume_group: nova-volumes
|
||||
volume_name_postfix: "%08x"
|
||||
volume_name_prefix: "volume-"
|
||||
|
||||
# Needed for setting up your database
|
||||
db:
|
||||
type: "$(db:type)"
|
||||
user: "$(db:user)"
|
||||
host: "$(db:host)"
|
||||
port: "$(db:port)"
|
||||
|
||||
# Interactions with keystone are via the following settings
|
||||
keystone:
|
||||
auth_host: "$(keystone:auth_host)"
|
||||
auth_port: "$(keystone:auth_port)"
|
||||
auth_proto: "$(keystone:auth_proto)"
|
||||
service_host: "$(keystone:service_host)"
|
||||
service_port: "$(keystone:service_port)"
|
||||
service_proto: "$(keystone:service_proto)"
|
||||
|
||||
...
|
6
conf/components/openstack-client.yaml
Normal file
6
conf/components/openstack-client.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
# Settings for component openstack-client
|
||||
---
|
||||
|
||||
get_from: "git://github.com/openstack/python-openstackclient.git?branch=master"
|
||||
|
||||
...
|
6
conf/components/quantum-client.yaml
Normal file
6
conf/components/quantum-client.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
# Settings for component quantum-client
|
||||
---
|
||||
|
||||
get_from: "git://github.com/openstack/python-quantumclient.git?branch=master"
|
||||
|
||||
...
|
8
conf/components/rabbit-mq.yaml
Normal file
8
conf/components/rabbit-mq.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Settings for component rabbit-mq
|
||||
---
|
||||
# Where is rabbit located?
|
||||
host: "$(auto:ip)"
|
||||
|
||||
# Which rabbit user should be used
|
||||
user_id: guest
|
||||
...
|
6
conf/components/swift-client.yaml
Normal file
6
conf/components/swift-client.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
# Settings for component swift-client
|
||||
---
|
||||
|
||||
get_from: "git://github.com/openstack/python-swiftclient.git?branch=master"
|
||||
|
||||
...
|
86
smithy
86
smithy
@@ -51,7 +51,6 @@ from anvil import actions
|
||||
from anvil import cfg
|
||||
from anvil import colorizer
|
||||
from anvil import distro
|
||||
from anvil import env_rc
|
||||
from anvil import log as logging
|
||||
from anvil import opts
|
||||
from anvil import passwords
|
||||
@@ -68,65 +67,6 @@ from ordereddict import OrderedDict
|
||||
LOG = logging.getLogger()
|
||||
|
||||
|
||||
def get_config_locations(start_locations=None):
|
||||
locs = []
|
||||
if start_locations:
|
||||
locs.extend(start_locations)
|
||||
locs.append(settings.CONFIG_LOCATION)
|
||||
locs.append(sh.joinpths("/etc", 'anvil', 'anvil.ini'))
|
||||
return locs
|
||||
|
||||
|
||||
def find_config(locations=None):
|
||||
"""
|
||||
Finds the potential anvil configuration files.
|
||||
"""
|
||||
if not locations:
|
||||
locations = get_config_locations()
|
||||
real_paths = []
|
||||
for path in locations:
|
||||
LOG.debug("Looking for configuration in: %r", path)
|
||||
if sh.isfile(path):
|
||||
LOG.debug("Found a 'possible' configuration in: %r", path)
|
||||
real_paths.append(path)
|
||||
return real_paths
|
||||
|
||||
|
||||
def establish_config(args):
|
||||
"""
|
||||
Creates the stack configuration object using the set of
|
||||
desired configuration resolvers+password resolvers to be used and returns
|
||||
the wrapper that knows how to activate those resolvers.
|
||||
|
||||
Arguments:
|
||||
args: command line args
|
||||
"""
|
||||
|
||||
config = cfg.ProxyConfig()
|
||||
config.add_read_resolver(cfg.CliResolver.create(args['cli_overrides']))
|
||||
config.add_read_resolver(cfg.EnvResolver())
|
||||
start_configs = []
|
||||
if 'config_fn' in args and args['config_fn']:
|
||||
start_configs.append(args['config_fn'])
|
||||
else:
|
||||
start_configs.extend(get_config_locations())
|
||||
real_configs = find_config(start_configs)
|
||||
config.add_read_resolver(cfg.ConfigResolver(cfg.RewritableConfigParser(fns=real_configs)))
|
||||
utils.log_iterable(utils.get_class_names(config.read_resolvers),
|
||||
header="Config lookup will use the following resolvers:",
|
||||
logger=LOG)
|
||||
|
||||
config.add_password_resolver(passwords.ConfigPassword(config))
|
||||
if args.get('prompt_for_passwords', True):
|
||||
config.add_password_resolver(passwords.InputPassword(config))
|
||||
config.add_password_resolver(passwords.RandomPassword(config))
|
||||
utils.log_iterable(utils.get_class_names(config.pw_resolvers),
|
||||
header="Password finding will use the following lookups:",
|
||||
logger=LOG)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def backup_persona(install_dir, action, persona_fn):
|
||||
(name, ext) = os.path.splitext(os.path.basename(persona_fn))
|
||||
ext = ext.lstrip(".")
|
||||
@@ -157,7 +97,6 @@ def run(args):
|
||||
# Determine + setup the root directory...
|
||||
# If not provided attempt to locate it via the environment control files
|
||||
args_root_dir = args.pop("dir")
|
||||
env_rc.load()
|
||||
root_dir = env.get_key('INSTALL_ROOT')
|
||||
if not root_dir:
|
||||
root_dir = args_root_dir
|
||||
@@ -190,14 +129,9 @@ def run(args):
|
||||
except Exception as e:
|
||||
raise RuntimeError("Error loading persona file: %s due to %s" % (person_fn, e))
|
||||
|
||||
# Get the config reader (which is a combination
|
||||
# of many configs..)
|
||||
config = establish_config(args)
|
||||
|
||||
# Get the object we will be running with...
|
||||
runner_cls = actions.class_for(action)
|
||||
runner = runner_cls(dist,
|
||||
config,
|
||||
runner = runner_cls(distro=dist,
|
||||
root_dir=root_dir,
|
||||
name=action,
|
||||
**args)
|
||||
@@ -222,24 +156,6 @@ def run(args):
|
||||
LOG.info("It took %s seconds or %s minutes to complete action %s.",
|
||||
colorizer.quote(pretty_time['seconds']), colorizer.quote(pretty_time['minutes']), colorizer.quote(action))
|
||||
|
||||
if config.opts_cache:
|
||||
LOG.info("After action %s your settings which were applied are:", colorizer.quote(action))
|
||||
table = OrderedDict()
|
||||
all_read_set = {}
|
||||
all_read_set.update(config.opts_read)
|
||||
all_read_set.update(config.opts_set)
|
||||
for section in sorted(list(all_read_set.keys())):
|
||||
options = set()
|
||||
if section in config.opts_read:
|
||||
options.update(list(config.opts_read[section]))
|
||||
if section in config.opts_set:
|
||||
options.update(list(config.opts_set[section]))
|
||||
option_values = {}
|
||||
for option in options:
|
||||
option_values[option] = config.opts_cache[cfg.make_id(section, option)]
|
||||
table[section] = option_values
|
||||
utils.log_object(table, item_max_len=80)
|
||||
|
||||
LOG.debug("Final environment settings:")
|
||||
utils.log_object(env.get(), logger=LOG, level=logging.DEBUG, item_max_len=64)
|
||||
|
||||
|
Reference in New Issue
Block a user