Build OpenStack packages from custom specs
Maintain spec files for OpenStack packages. Start OpenStack daemons as native system services under different users. Write configuration to /etc. Implements: blueprint different-openstack-users Implements: blueprint purge-config Implements: blueprint package-novnc Change-Id: I454c1e88011c75997d879bf8b90cd87c8db3f123
This commit is contained in:
parent
b9699a75ad
commit
052daddfd7
@ -122,10 +122,8 @@ class Action(object):
|
||||
component_dir = sh.joinpths(self.root_dir, component)
|
||||
trace_dir = sh.joinpths(component_dir, 'traces')
|
||||
app_dir = sh.joinpths(component_dir, 'app')
|
||||
cfg_dir = sh.joinpths(component_dir, 'config')
|
||||
return {
|
||||
'app_dir': app_dir,
|
||||
'cfg_dir': cfg_dir,
|
||||
'component_dir': component_dir,
|
||||
'root_dir': self.root_dir,
|
||||
'trace_dir': trace_dir,
|
||||
|
@ -60,20 +60,7 @@ class InstallAction(action.Action):
|
||||
logger=LOG)
|
||||
|
||||
def _run(self, persona, component_order, instances):
|
||||
removals = ['unconfigure']
|
||||
self._run_phase(
|
||||
action.PhaseFunctors(
|
||||
start=lambda i: LOG.info('Configuring %s.', colorizer.quote(i.name)),
|
||||
run=lambda i: i.configure(),
|
||||
end=None,
|
||||
),
|
||||
component_order,
|
||||
instances,
|
||||
"configure",
|
||||
*removals
|
||||
)
|
||||
|
||||
removals += ['pre-uninstall', 'post-uninstall']
|
||||
removals = ['pre-uninstall', 'post-uninstall']
|
||||
self._run_phase(
|
||||
action.PhaseFunctors(
|
||||
start=lambda i: LOG.info('Preinstalling %s.', colorizer.quote(i.name)),
|
||||
@ -104,6 +91,19 @@ class InstallAction(action.Action):
|
||||
*removals
|
||||
)
|
||||
|
||||
removals += ['unconfigure']
|
||||
self._run_phase(
|
||||
action.PhaseFunctors(
|
||||
start=lambda i: LOG.info('Configuring %s.', colorizer.quote(i.name)),
|
||||
run=lambda i: i.configure(),
|
||||
end=None,
|
||||
),
|
||||
component_order,
|
||||
instances,
|
||||
"configure",
|
||||
*removals
|
||||
)
|
||||
|
||||
self._run_phase(
|
||||
action.PhaseFunctors(
|
||||
start=lambda i: LOG.info('Post-installing %s.', colorizer.quote(i.name)),
|
||||
|
@ -57,31 +57,23 @@ class PrepareAction(action.Action):
|
||||
"download-patch",
|
||||
*removals
|
||||
)
|
||||
self._run_phase(
|
||||
action.PhaseFunctors(
|
||||
start=lambda i: LOG.info('Preparing %s.', colorizer.quote(i.name)),
|
||||
run=lambda i: i.prepare(),
|
||||
end=None,
|
||||
),
|
||||
component_order,
|
||||
instances,
|
||||
"prepare",
|
||||
*removals
|
||||
)
|
||||
removals += ["package-destroy"]
|
||||
dependency_handler_class = self.distro.dependency_handler_class
|
||||
dependency_handler = dependency_handler_class(self.distro,
|
||||
self.root_dir,
|
||||
instances.values())
|
||||
|
||||
general_package = "general"
|
||||
dependency_handler.package_start()
|
||||
self._run_phase(
|
||||
action.PhaseFunctors(
|
||||
start=lambda i: LOG.info("Packing OpenStack and its dependencies"),
|
||||
run=lambda i: dependency_handler.package(),
|
||||
start=lambda i: LOG.info("Packing %s", colorizer.quote(i.name)),
|
||||
run=dependency_handler.package_instance,
|
||||
end=None,
|
||||
),
|
||||
[general_package],
|
||||
{general_package: instances[general_package]},
|
||||
component_order,
|
||||
instances,
|
||||
"package",
|
||||
*removals
|
||||
)
|
||||
dependency_handler.package_finish()
|
||||
|
@ -170,8 +170,6 @@ class YamlInterpolator(object):
|
||||
self.base = base
|
||||
self.auto_specials = {
|
||||
'ip': utils.get_host_ip,
|
||||
'user': sh.getuser,
|
||||
'group': sh.getgroupname,
|
||||
'home': sh.gethomedir,
|
||||
'hostname': sh.hostname,
|
||||
}
|
||||
|
@ -50,6 +50,7 @@ class Component(object):
|
||||
self.passwords = passwords
|
||||
|
||||
self.bin_dir = "/usr/bin"
|
||||
self.cfg_dir = "/etc/%s" % self.name
|
||||
|
||||
def get_password(self, option):
|
||||
pw_val = self.passwords.get(option)
|
||||
@ -90,7 +91,6 @@ class Component(object):
|
||||
return {
|
||||
'APP_DIR': self.get_option('app_dir'),
|
||||
'COMPONENT_DIR': self.get_option('component_dir'),
|
||||
'CONFIG_DIR': self.get_option('cfg_dir'),
|
||||
'TRACE_DIR': self.get_option('trace_dir'),
|
||||
}
|
||||
|
||||
@ -99,3 +99,24 @@ class Component(object):
|
||||
# warmup the configs u might use (ie for prompting for passwords
|
||||
# earlier rather than later)
|
||||
pass
|
||||
|
||||
def subsystem_names(self):
|
||||
return self.subsystems.keys()
|
||||
|
||||
def package_names(self):
|
||||
"""Return a set of names of all packages for this component.
|
||||
"""
|
||||
names = set()
|
||||
try:
|
||||
for pack in self.packages:
|
||||
names.add(pack["name"])
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
daemon_to_package = self.distro._components[self.name].get(
|
||||
"daemon_to_package", {})
|
||||
for key in self.subsystem_names():
|
||||
try:
|
||||
names.add(daemon_to_package[key])
|
||||
except KeyError:
|
||||
names.add("openstack-%s-%s" % (self.name, key))
|
||||
return names
|
||||
|
@ -12,7 +12,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil.components import base
|
||||
from anvil import downloader as down
|
||||
from anvil import log as logging
|
||||
@ -123,39 +122,16 @@ class PkgInstallComponent(base.Component):
|
||||
sh.write_file(tgt_fn, contents, tracewriter=self.tracewriter)
|
||||
return len(config_fns)
|
||||
|
||||
def _configure_symlinks(self):
|
||||
links = self.configurator.symlinks
|
||||
if not links:
|
||||
return 0
|
||||
# This sort happens so that we link in the correct order
|
||||
# although it might not matter. Either way. We ensure that the right
|
||||
# order happens. Ie /etc/blah link runs before /etc/blah/blah
|
||||
link_srcs = sorted(links.keys())
|
||||
link_srcs.reverse()
|
||||
link_nice = []
|
||||
for source in link_srcs:
|
||||
links_to_be = links[source]
|
||||
for link in links_to_be:
|
||||
link_nice.append("%s => %s" % (link, source))
|
||||
utils.log_iterable(link_nice, logger=LOG,
|
||||
header="Creating %s sym-links" % (len(link_nice)))
|
||||
links_made = 0
|
||||
for source in link_srcs:
|
||||
links_to_be = links[source]
|
||||
for link in links_to_be:
|
||||
try:
|
||||
LOG.debug("Symlinking %s to %s.", link, source)
|
||||
sh.symlink(source, link, tracewriter=self.tracewriter)
|
||||
links_made += 1
|
||||
except (IOError, OSError) as e:
|
||||
LOG.warn("Symlinking %s to %s failed: %s", colorizer.quote(link), colorizer.quote(source), e)
|
||||
return links_made
|
||||
|
||||
def prepare(self):
|
||||
pass
|
||||
|
||||
def configure(self):
|
||||
return self._configure_files() + self._configure_symlinks()
|
||||
files = self._configure_files()
|
||||
conf_dir = "/etc/%s" % self.name
|
||||
if sh.isdir(conf_dir):
|
||||
sh.execute(
|
||||
["chown", "-R",
|
||||
"%s:%s" % (self.name, self.name),
|
||||
conf_dir],
|
||||
check_exit_code=False)
|
||||
return files
|
||||
|
||||
|
||||
class PythonInstallComponent(PkgInstallComponent):
|
||||
@ -183,15 +159,7 @@ class PkgUninstallComponent(base.Component):
|
||||
self.tracereader = tr.TraceReader(trace_fn)
|
||||
|
||||
def unconfigure(self):
|
||||
self._unconfigure_links()
|
||||
|
||||
def _unconfigure_links(self):
|
||||
sym_files = self.tracereader.symlinks_made()
|
||||
if sym_files:
|
||||
utils.log_iterable(sym_files, logger=LOG,
|
||||
header="Removing %s symlink files" % (len(sym_files)))
|
||||
for fn in sym_files:
|
||||
sh.unlink(fn)
|
||||
pass
|
||||
|
||||
def post_uninstall(self):
|
||||
self._uninstall_files()
|
||||
|
@ -16,17 +16,13 @@
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil import exceptions as excp
|
||||
from anvil import importer
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import trace as tr
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components import base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_RUNNER = 'anvil.runners.fork:ForkRunner'
|
||||
|
||||
####
|
||||
#### STATUS CONSTANTS
|
||||
@ -99,9 +95,7 @@ class ProgramRuntime(base.Component):
|
||||
# Attempt to wait until all potentially started applications
|
||||
# are actually started (for whatever defintion of started is applicable)
|
||||
# for up to a given amount of attempts and wait time between attempts.
|
||||
num_started = len(self.applications)
|
||||
if not num_started:
|
||||
raise excp.StatusException("No %r programs started, can not wait for them to become active..." % (self.name))
|
||||
num_started = len(self.subsystems)
|
||||
|
||||
def waiter(try_num):
|
||||
LOG.info("Waiting %s seconds for component %s programs to start.", between_wait, colorizer.quote(self.name))
|
||||
@ -132,135 +126,82 @@ class EmptyRuntime(ProgramRuntime):
|
||||
pass
|
||||
|
||||
|
||||
class PythonRuntime(ProgramRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
ProgramRuntime.__init__(self, *args, **kargs)
|
||||
start_trace = tr.trace_filename(self.get_option('trace_dir'), 'start')
|
||||
self.tracewriter = tr.TraceWriter(start_trace, break_if_there=True)
|
||||
self.tracereader = tr.TraceReader(start_trace)
|
||||
class ServiceRuntime(ProgramRuntime):
|
||||
def get_command(self, command, program):
|
||||
program = self.daemon_name(program)
|
||||
return [(arg if arg != "NAME" else program)
|
||||
for arg in self.distro.get_command("service", command)]
|
||||
|
||||
def app_params(self, program):
|
||||
params = dict(self.params)
|
||||
if program and program.name:
|
||||
params['APP_NAME'] = str(program.name)
|
||||
return params
|
||||
def daemon_name(self, program):
|
||||
return program
|
||||
|
||||
def start(self):
|
||||
# Perform a check just to make sure said programs aren't already started and bail out
|
||||
# so that it we don't unintentionally start new ones and thus causing confusion for all
|
||||
# involved...
|
||||
what_may_already_be_started = []
|
||||
try:
|
||||
what_may_already_be_started = self.tracereader.apps_started()
|
||||
except excp.NoTraceException:
|
||||
pass
|
||||
if what_may_already_be_started:
|
||||
msg = "%s programs of component %s may already be running, did you forget to stop those?"
|
||||
raise excp.StartException(msg % (len(what_may_already_be_started), self.name))
|
||||
|
||||
# Select how we are going to start it and get on with the show...
|
||||
runner_entry_point = self.get_option("run_type", default_value=DEFAULT_RUNNER)
|
||||
starter_args = [self, runner_entry_point]
|
||||
starter = importer.construct_entry_point(runner_entry_point, *starter_args)
|
||||
amount_started = 0
|
||||
amount = 0
|
||||
for program in self.applications:
|
||||
self._start_app(program, starter)
|
||||
amount_started += 1
|
||||
return amount_started
|
||||
if not self.status_app(program):
|
||||
if self.start_app(program):
|
||||
amount += 1
|
||||
return amount
|
||||
|
||||
def _start_app(self, program, starter):
|
||||
app_working_dir = program.working_dir
|
||||
if not app_working_dir:
|
||||
app_working_dir = self.get_option('app_dir')
|
||||
def start_app(self, program):
|
||||
LOG.info("Starting program %s under component %s.",
|
||||
colorizer.quote(program), self.name)
|
||||
|
||||
# Un-templatize whatever argv (program options) the program has specified
|
||||
# with whatever program params were retrieved to create the 'real' set
|
||||
# of program options (if applicable)
|
||||
app_params = self.app_params(program)
|
||||
if app_params:
|
||||
app_argv = [utils.expand_template(arg, app_params) for arg in program.argv]
|
||||
else:
|
||||
app_argv = program.argv
|
||||
LOG.debug("Starting %r using a %r", program.name, starter)
|
||||
|
||||
# TODO(harlowja): clean this function params up (should just take a program)
|
||||
details_path = starter.start(program.name,
|
||||
app_pth=program.path,
|
||||
app_dir=app_working_dir,
|
||||
opts=app_argv)
|
||||
|
||||
# This trace is used to locate details about what/how to stop
|
||||
LOG.info("Started program %s under component %s.", colorizer.quote(program.name), self.name)
|
||||
self.tracewriter.app_started(program.name, details_path, starter.name)
|
||||
|
||||
def _locate_investigators(self, applications_started):
|
||||
# Recreate the runners that can be used to dive deeper into the applications list
|
||||
# that was started (a 3 tuple of (name, trace, who_started)).
|
||||
investigators_created = {}
|
||||
to_investigate = []
|
||||
for (name, _trace, who_started) in applications_started:
|
||||
investigator = investigators_created.get(who_started)
|
||||
if investigator is None:
|
||||
try:
|
||||
investigator_args = [self, who_started]
|
||||
investigator = importer.construct_entry_point(who_started, *investigator_args)
|
||||
investigators_created[who_started] = investigator
|
||||
except RuntimeError as e:
|
||||
LOG.warn("Could not load class %s which should be used to investigate %s: %s",
|
||||
colorizer.quote(who_started), colorizer.quote(name), e)
|
||||
continue
|
||||
to_investigate.append((name, investigator))
|
||||
return to_investigate
|
||||
start_cmd = self.get_command("start", program)
|
||||
try:
|
||||
sh.execute(start_cmd, shell=True)
|
||||
except excp.ProcessExecutionError:
|
||||
LOG.error("Failed to start program %s under component %s.",
|
||||
colorizer.quote(program), self.name)
|
||||
return False
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
# Anything to stop in the first place??
|
||||
what_was_started = []
|
||||
amount = 0
|
||||
for program in self.applications:
|
||||
if self.status_app(program):
|
||||
if self.stop_app(program):
|
||||
amount += 1
|
||||
return amount
|
||||
|
||||
def stop_app(self, program):
|
||||
LOG.info("Stopping program %s under component %s.",
|
||||
colorizer.quote(program), self.name)
|
||||
stop_cmd = self.get_command("stop", program)
|
||||
try:
|
||||
what_was_started = self.tracereader.apps_started()
|
||||
except excp.NoTraceException:
|
||||
pass
|
||||
if not what_was_started:
|
||||
return 0
|
||||
sh.execute(stop_cmd, shell=True)
|
||||
except excp.ProcessExecutionError:
|
||||
LOG.error("Failed to stop program %s under component %s.",
|
||||
colorizer.quote(program), self.name)
|
||||
return False
|
||||
return True
|
||||
|
||||
# Get the investigators/runners which can be used
|
||||
# to actually do the stopping and attempt to perform said stop.
|
||||
applications_stopped = []
|
||||
for (name, handler) in self._locate_investigators(what_was_started):
|
||||
handler.stop(name)
|
||||
applications_stopped.append(name)
|
||||
if applications_stopped:
|
||||
utils.log_iterable(applications_stopped,
|
||||
header="Stopped %s programs started under %s component" % (len(applications_stopped), self.name),
|
||||
logger=LOG)
|
||||
|
||||
# Only if we stopped the amount which was supposedly started can
|
||||
# we actually remove the trace where those applications have been
|
||||
# marked as started in (ie the connection back to how they were started)
|
||||
if len(applications_stopped) < len(what_was_started):
|
||||
diff = len(what_was_started) - len(applications_stopped)
|
||||
LOG.warn(("%s less applications were stopped than were started, please check out %s"
|
||||
" to stop these program manually."), diff, colorizer.quote(self.tracereader.filename(), quote_color='yellow'))
|
||||
else:
|
||||
sh.unlink(self.tracereader.filename())
|
||||
|
||||
return len(applications_stopped)
|
||||
def status_app(self, program):
|
||||
status_cmd = self.get_command("status", program)
|
||||
try:
|
||||
sh.execute(status_cmd, shell=True)
|
||||
except excp.ProcessExecutionError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def statii(self):
|
||||
# Anything to get status on in the first place??
|
||||
what_was_started = []
|
||||
try:
|
||||
what_was_started = self.tracereader.apps_started()
|
||||
except excp.NoTraceException:
|
||||
pass
|
||||
if not what_was_started:
|
||||
return []
|
||||
|
||||
# Get the investigators/runners which can be used
|
||||
# to actually do the status inquiry and attempt to perform said inquiry.
|
||||
statii = []
|
||||
for (name, handler) in self._locate_investigators(what_was_started):
|
||||
(status, details) = handler.status(name)
|
||||
statii.append(ProgramStatus(name=name,
|
||||
for program in self.applications:
|
||||
status = (STATUS_STARTED
|
||||
if self.status_app(program)
|
||||
else STATUS_STOPPED)
|
||||
statii.append(ProgramStatus(name=program,
|
||||
status=status,
|
||||
details=details))
|
||||
details={}))
|
||||
return statii
|
||||
|
||||
|
||||
class OpenStackRuntime(ServiceRuntime):
|
||||
@property
|
||||
def applications(self):
|
||||
return self.subsystem_names()
|
||||
|
||||
def daemon_name(self, program):
|
||||
return "openstack-%s-%s" % (self.name, program)
|
||||
|
@ -16,18 +16,16 @@
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components import base_install as binstall
|
||||
from anvil.components import base_runtime as bruntime
|
||||
|
||||
from anvil.components.configurators import cinder as cconf
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Sync db command
|
||||
SYNC_DB_CMD = [sh.joinpths('$BIN_DIR', 'cinder-manage'),
|
||||
SYNC_DB_CMD = ['sudo', '-u', 'cinder', '/usr/bin/cinder-manage',
|
||||
# Available commands:
|
||||
'db', 'sync']
|
||||
|
||||
@ -47,34 +45,3 @@ class CinderInstaller(binstall.PythonInstallComponent):
|
||||
LOG.info("Syncing cinder to database: %s", colorizer.quote(self.configurator.DB_NAME))
|
||||
cmds = [{'cmd': SYNC_DB_CMD}]
|
||||
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
|
||||
|
||||
def config_params(self, config_fn):
|
||||
mp = binstall.PythonInstallComponent.config_params(self, config_fn)
|
||||
mp['BIN_DIR'] = self.bin_dir
|
||||
return mp
|
||||
|
||||
|
||||
class CinderRuntime(bruntime.PythonRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
bruntime.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.config_path = sh.joinpths(self.get_option('cfg_dir'), cconf.API_CONF)
|
||||
|
||||
@property
|
||||
def applications(self):
|
||||
apps = []
|
||||
for (name, _values) in self.subsystems.items():
|
||||
name = "cinder-%s" % (name.lower())
|
||||
path = sh.joinpths(self.bin_dir, name)
|
||||
if sh.is_executable(path):
|
||||
apps.append(bruntime.Program(name, path, argv=self._fetch_argv(name)))
|
||||
return apps
|
||||
|
||||
def app_params(self, program):
|
||||
params = bruntime.PythonRuntime.app_params(self, program)
|
||||
params['CFG_FILE'] = self.config_path
|
||||
return params
|
||||
|
||||
def _fetch_argv(self, name):
|
||||
return [
|
||||
'--config-file', '$CFG_FILE',
|
||||
]
|
||||
|
@ -40,14 +40,6 @@ class Configurator(object):
|
||||
def config_files(self):
|
||||
return list(self.configs)
|
||||
|
||||
@property
|
||||
def symlinks(self):
|
||||
links = {}
|
||||
for fn in self.config_files:
|
||||
source_fn = self.target_config(fn)
|
||||
links[source_fn] = [sh.joinpths(self.link_dir, fn)]
|
||||
return links
|
||||
|
||||
@property
|
||||
def link_dir(self):
|
||||
link_dir_base = self.installer.distro.get_command_config('base_link_dir')
|
||||
@ -82,7 +74,7 @@ class Configurator(object):
|
||||
return contents
|
||||
|
||||
def target_config(self, config_fn):
|
||||
return sh.joinpths(self.installer.get_option('cfg_dir'), config_fn)
|
||||
return sh.joinpths(self.installer.cfg_dir, config_fn)
|
||||
|
||||
def setup_rpc(self, conf, rpc_backend=None):
|
||||
# How is your message queue setup?
|
||||
|
@ -43,6 +43,7 @@ class CinderConfigurator(base.Configurator):
|
||||
config.add_with_section('filter:authtoken', k, v)
|
||||
|
||||
def _config_adjust_api(self, config):
|
||||
config.add('log_dir', '/var/log/cinder')
|
||||
self.setup_rpc(config)
|
||||
# Setup your sql connection
|
||||
config.add('sql_connection', self.fetch_dbdsn())
|
||||
|
@ -56,7 +56,6 @@ class GlanceConfigurator(base.Configurator):
|
||||
config.add('debug', self.installer.get_bool_option('verbose'))
|
||||
config.add('verbose', self.installer.get_bool_option('verbose'))
|
||||
config.add('sql_connection', self.fetch_dbdsn())
|
||||
config.remove('DEFAULT', 'log_file')
|
||||
config.add_with_section('paste_deploy', 'flavor', self.installer.get_option('paste_flavor'))
|
||||
for (k, v) in self._fetch_keystone_params().items():
|
||||
config.add_with_section('keystone_authtoken', k, v)
|
||||
@ -66,13 +65,8 @@ class GlanceConfigurator(base.Configurator):
|
||||
gparams = ghelper.get_shared_params(**self.installer.options)
|
||||
config.add('bind_port', gparams['endpoints']['public']['port'])
|
||||
|
||||
config.add( 'default_store', 'file')
|
||||
img_store_dir = sh.joinpths(self.installer.get_option('component_dir'), 'images')
|
||||
config.add('filesystem_store_datadir', img_store_dir)
|
||||
LOG.debug("Ensuring file system store directory %r exists and is empty." % (img_store_dir))
|
||||
if sh.isdir(img_store_dir):
|
||||
sh.deldir(img_store_dir)
|
||||
sh.mkdirslist(img_store_dir, tracewriter=self.installer.tracewriter)
|
||||
config.add('default_store', 'file')
|
||||
config.add('filesystem_store_datadir', "/var/lib/glance/images")
|
||||
|
||||
def _config_adjust_reg(self, config):
|
||||
self._config_adjust_api_reg(config)
|
||||
|
@ -1,63 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from anvil import shell as sh
|
||||
from anvil.components.configurators import base
|
||||
|
||||
# Config files messed with...
|
||||
HORIZON_LOCAL_SETTINGS_CONF = "local_settings.py"
|
||||
HORIZON_APACHE_CONF = 'horizon_apache.conf'
|
||||
CONFIGS = [HORIZON_LOCAL_SETTINGS_CONF, HORIZON_APACHE_CONF]
|
||||
|
||||
class HorizonConfigurator(base.Configurator):
|
||||
|
||||
def __init__(self, installer):
|
||||
super(HorizonConfigurator, self).__init__(installer, CONFIGS)
|
||||
|
||||
@property
|
||||
def symlinks(self):
|
||||
links = super(HorizonConfigurator, self).symlinks
|
||||
links[self.installer.access_log] = [sh.joinpths(self.link_dir,
|
||||
'access.log')]
|
||||
links[self.installer.error_log] = [sh.joinpths(self.link_dir,
|
||||
'error.log')]
|
||||
return links
|
||||
|
||||
def target_config(self, config_name):
|
||||
if config_name == HORIZON_LOCAL_SETTINGS_CONF:
|
||||
return sh.joinpths(self.installer.get_option('app_dir'),
|
||||
'openstack_dashboard',
|
||||
'local',
|
||||
config_name)
|
||||
else:
|
||||
return super(HorizonConfigurator, self).target_config(config_name)
|
||||
|
||||
class HorizonRhelConfigurator(HorizonConfigurator):
|
||||
|
||||
def __init__(self, installer):
|
||||
super(HorizonRhelConfigurator, self).__init__(installer)
|
||||
|
||||
@property
|
||||
def symlinks(self):
|
||||
links = super(HorizonRhelConfigurator, self).symlinks
|
||||
apache_conf_tgt = self.target_config(HORIZON_APACHE_CONF)
|
||||
if apache_conf_tgt not in links:
|
||||
links[apache_conf_tgt] = []
|
||||
links[apache_conf_tgt].append(sh.joinpths(
|
||||
'/etc/',
|
||||
self.installer.distro.get_command_config('apache', 'name'),
|
||||
'conf.d', HORIZON_APACHE_CONF))
|
||||
return links
|
@ -54,6 +54,8 @@ class KeystoneConfigurator(base.Configurator):
|
||||
config.add_with_section('logger_root', 'handlers', "devel,production")
|
||||
|
||||
def _config_adjust_root(self, config):
|
||||
config.add('log_dir', '/var/log/keystone')
|
||||
config.add('log_file', 'keystone-all.log')
|
||||
params = khelper.get_shared_params(**utils.merge_dicts(self.installer.options,
|
||||
khelper.get_shared_passwords(self.installer)))
|
||||
config.add('admin_token', params['service_token'])
|
||||
|
@ -66,6 +66,8 @@ class NovaConfigurator(base.Configurator):
|
||||
hostip = self.installer.get_option('ip')
|
||||
|
||||
nova_conf.add('verbose', self.installer.get_bool_option('log_verbose'))
|
||||
nova_conf.add('state_path', '/var/lib/nova')
|
||||
nova_conf.add('log_dir', '/var/log/nova')
|
||||
|
||||
# Allow destination machine to match source for resize.
|
||||
nova_conf.add('allow_resize_to_same_host', True)
|
||||
@ -119,11 +121,7 @@ class NovaConfigurator(base.Configurator):
|
||||
nova_conf.add('checksum_base_images', self.installer.get_bool_option('checksum_base_images'))
|
||||
|
||||
# Setup the interprocess locking directory (don't put me on shared storage)
|
||||
lock_path = self.installer.get_option('lock_path')
|
||||
if not lock_path:
|
||||
lock_path = sh.joinpths(self.installer.get_option('component_dir'), 'locks')
|
||||
sh.mkdirslist(lock_path, tracewriter=self.tracewriter)
|
||||
nova_conf.add('lock_path', lock_path)
|
||||
nova_conf.add('lock_path', '/var/lock/nova')
|
||||
|
||||
# Vnc settings setup
|
||||
self._configure_vnc(nova_conf)
|
||||
@ -146,12 +144,6 @@ class NovaConfigurator(base.Configurator):
|
||||
# the CPU usage of an idle VM tenfold.
|
||||
nova_conf.add('use_usb_tablet', False)
|
||||
|
||||
# Where instances will be stored
|
||||
instances_path = self.installer.get_option('instances_path')
|
||||
if not instances_path:
|
||||
instances_path = sh.joinpths(self.installer.get_option('component_dir'), 'instances')
|
||||
self._configure_instances_path(instances_path, nova_conf)
|
||||
|
||||
# Is this a multihost setup?
|
||||
self._configure_multihost(nova_conf)
|
||||
|
||||
@ -292,7 +284,7 @@ class NovaConfigurator(base.Configurator):
|
||||
|
||||
# Configs dhcp bridge stuff???
|
||||
# TODO(harlowja) why is this the same as the nova.conf?
|
||||
nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.installer.get_option('cfg_dir'), API_CONF))
|
||||
nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.installer.cfg_dir, API_CONF))
|
||||
|
||||
# Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12
|
||||
nova_conf.add('fixed_range', self.installer.get_option('fixed_range'))
|
||||
@ -320,15 +312,6 @@ class NovaConfigurator(base.Configurator):
|
||||
nova_conf.add('multi_host', True)
|
||||
nova_conf.add('send_arp_for_ha', True)
|
||||
|
||||
# Ensures the place where instances will be is useable
|
||||
def _configure_instances_path(self, instances_path, nova_conf):
|
||||
nova_conf.add('instances_path', instances_path)
|
||||
if not sh.isdir(instances_path):
|
||||
LOG.debug("Attempting to create instance directory: %r", instances_path)
|
||||
sh.mkdirslist(instances_path, tracewriter=self.tracewriter)
|
||||
LOG.debug("Adjusting permissions of instance directory: %r", instances_path)
|
||||
sh.chmod(instances_path, 0777)
|
||||
|
||||
# Any special libvirt configurations go here
|
||||
def _configure_libvirt(self, virt_type, nova_conf):
|
||||
nova_conf.add('libvirt_type', virt_type)
|
||||
|
@ -117,6 +117,8 @@ class QuantumConfigurator(base.Configurator):
|
||||
config.add("api_paste_config", self.target_config(PASTE_CONF))
|
||||
# TODO(aababilov): add debug to other services conf files
|
||||
config.add('debug', self.installer.get_bool_option("debug"))
|
||||
config.add("log_file", "quantum-server.log")
|
||||
config.add("log_dir", "/var/log/quantum")
|
||||
|
||||
# Setup the interprocess locking directory
|
||||
# (don't put me on shared storage)
|
||||
@ -128,6 +130,9 @@ class QuantumConfigurator(base.Configurator):
|
||||
|
||||
self.setup_rpc(config, 'quantum.openstack.common.rpc.impl_kombu')
|
||||
|
||||
config.current_section = "AGENT"
|
||||
config.add("root_helper", "sudo quantum-rootwrap /etc/quantum/rootwrap.conf")
|
||||
|
||||
config.current_section = "keystone_authtoken"
|
||||
for (k, v) in self._fetch_keystone_params().items():
|
||||
config.add(k, v)
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components import base_install as binstall
|
||||
@ -136,58 +135,6 @@ class DBInstaller(binstall.PkgInstallComponent):
|
||||
|
||||
|
||||
class DBRuntime(bruntime.ProgramRuntime):
|
||||
def _get_command(self, action):
|
||||
db_type = self.get_option("type")
|
||||
distro_options = self.distro.get_command_config(db_type)
|
||||
if distro_options is None:
|
||||
raise NotImplementedError(BASE_ERROR % (action, db_type))
|
||||
return self.distro.get_command(db_type, action)
|
||||
|
||||
@property
|
||||
def applications(self):
|
||||
db_type = self.get_option("type")
|
||||
return [
|
||||
bruntime.Program(db_type),
|
||||
]
|
||||
|
||||
def _run_action(self, action, check_exit_code=True):
|
||||
cmd = self._get_command(action)
|
||||
if not cmd:
|
||||
raise NotImplementedError("No distro command provided to perform action %r" % (action))
|
||||
return sh.execute(cmd, check_exit_code=check_exit_code)
|
||||
|
||||
def start(self):
|
||||
if self.statii()[0].status != bruntime.STATUS_STARTED:
|
||||
self._run_action('start')
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def stop(self):
|
||||
if self.statii()[0].status != bruntime.STATUS_STOPPED:
|
||||
self._run_action('stop')
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def restart(self):
|
||||
LOG.info("Restarting your database.")
|
||||
self._run_action('restart')
|
||||
return 1
|
||||
|
||||
def statii(self):
|
||||
(sysout, stderr) = self._run_action('status', False)
|
||||
combined = (sysout + stderr).lower()
|
||||
st = bruntime.STATUS_UNKNOWN
|
||||
if combined.find("running") != -1:
|
||||
st = bruntime.STATUS_STARTED
|
||||
elif utils.has_any(combined, 'stop', 'unrecognized'):
|
||||
st = bruntime.STATUS_STOPPED
|
||||
return [
|
||||
bruntime.ProgramStatus(name=self.applications[0].name,
|
||||
status=st,
|
||||
details={
|
||||
'STDOUT': sysout,
|
||||
'STDERR': stderr,
|
||||
}),
|
||||
]
|
||||
return [self.distro.get_command(self.get_option("type"), "daemon")[0]]
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
from anvil.utils import OrderedDict
|
||||
@ -33,10 +32,10 @@ from anvil.components.configurators import glance as gconf
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Sync db command
|
||||
SYNC_DB_CMD = [sh.joinpths('$BIN_DIR', 'glance-manage'),
|
||||
'--debug', '-v',
|
||||
# Available commands:
|
||||
'db_sync']
|
||||
SYNC_DB_CMD = ['sudo', '-u', 'glance', '/usr/bin/glance-manage',
|
||||
'--debug', '-v',
|
||||
# Available commands:
|
||||
'db_sync']
|
||||
|
||||
|
||||
class GlanceInstaller(binstall.PythonInstallComponent):
|
||||
@ -63,38 +62,14 @@ class GlanceInstaller(binstall.PythonInstallComponent):
|
||||
to_set[("GLANCE_%s_URI" % (endpoint.upper()))] = details['uri']
|
||||
return to_set
|
||||
|
||||
def config_params(self, config_fn):
|
||||
# These be used to fill in the configuration params
|
||||
mp = binstall.PythonInstallComponent.config_params(self, config_fn)
|
||||
mp['BIN_DIR'] = self.bin_dir
|
||||
return mp
|
||||
|
||||
|
||||
class GlanceRuntime(bruntime.PythonRuntime):
|
||||
@property
|
||||
def applications(self):
|
||||
apps = []
|
||||
for (name, _values) in self.subsystems.items():
|
||||
name = "glance-%s" % (name.lower())
|
||||
path = sh.joinpths(self.bin_dir, name)
|
||||
if sh.is_executable(path):
|
||||
apps.append(bruntime.Program(name, path, argv=self._fetch_argv(name)))
|
||||
return apps
|
||||
|
||||
def _fetch_argv(self, name):
|
||||
if name.find('api') != -1:
|
||||
return ['--config-file', sh.joinpths('$CONFIG_DIR', gconf.API_CONF)]
|
||||
elif name.find('registry') != -1:
|
||||
return ['--config-file', sh.joinpths('$CONFIG_DIR', gconf.REG_CONF)]
|
||||
else:
|
||||
return []
|
||||
|
||||
class GlanceRuntime(bruntime.OpenStackRuntime):
|
||||
def _get_image_urls(self):
|
||||
uris = self.get_option('image_urls', default_value=[])
|
||||
return [u.strip() for u in uris if len(u.strip())]
|
||||
|
||||
def post_start(self):
|
||||
bruntime.PythonRuntime.post_start(self)
|
||||
bruntime.OpenStackRuntime.post_start(self)
|
||||
if self.get_bool_option('load-images'):
|
||||
# Install any images that need activating...
|
||||
self.wait_active()
|
||||
|
@ -14,154 +14,15 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from anvil import exceptions as excp
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components import base_install as binstall
|
||||
from anvil.components import base_runtime as bruntime
|
||||
|
||||
from anvil.components.configurators import horizon as hconf
|
||||
|
||||
import binascii
|
||||
import os
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# See https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY
|
||||
#
|
||||
# Needs to be a multiple of 2 for our usage...
|
||||
SECRET_KEY_LEN = 10
|
||||
|
||||
# Users which apache may not like starting as..
|
||||
BAD_APACHE_USERS = ['root']
|
||||
|
||||
|
||||
class HorizonUninstaller(binstall.PkgUninstallComponent):
|
||||
def __init__(self, *args, **kargs):
|
||||
binstall.PkgUninstallComponent.__init__(self, *args, **kargs)
|
||||
|
||||
|
||||
class HorizonInstaller(binstall.PythonInstallComponent):
|
||||
def __init__(self, *args, **kargs):
|
||||
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
|
||||
self.blackhole_dir = sh.joinpths(self.get_option('app_dir'), '.blackhole')
|
||||
self.access_log = sh.joinpths('/var/log/',
|
||||
self.distro.get_command_config('apache', 'name'),
|
||||
'horizon_access.log')
|
||||
self.error_log = sh.joinpths('/var/log/',
|
||||
self.distro.get_command_config('apache', 'name'),
|
||||
'horizon_error.log')
|
||||
self.configurator = hconf.HorizonConfigurator(self)
|
||||
|
||||
def verify(self):
|
||||
binstall.PythonInstallComponent.verify(self)
|
||||
self._check_ug()
|
||||
|
||||
def _check_ug(self):
|
||||
(user, group) = self._get_apache_user_group()
|
||||
if not sh.user_exists(user):
|
||||
msg = "No user named %r exists on this system!" % (user)
|
||||
raise excp.ConfigException(msg)
|
||||
if not sh.group_exists(group):
|
||||
msg = "No group named %r exists on this system!" % (group)
|
||||
raise excp.ConfigException(msg)
|
||||
if user in BAD_APACHE_USERS:
|
||||
msg = ("You may want to adjust your configuration, "
|
||||
"(user=%s, group=%s) will not work with apache!"
|
||||
% (user, group))
|
||||
raise excp.ConfigException(msg)
|
||||
|
||||
def _setup_blackhole(self):
|
||||
# Create an empty directory that apache uses as docroot
|
||||
sh.mkdirslist(self.blackhole_dir, tracewriter=self.tracewriter)
|
||||
|
||||
def _setup_logs(self, clear=False):
|
||||
log_fns = [self.access_log, self.error_log]
|
||||
utils.log_iterable(log_fns, logger=LOG,
|
||||
header="Adjusting %s log files" % (len(log_fns)))
|
||||
for fn in log_fns:
|
||||
if clear:
|
||||
sh.unlink(fn, True)
|
||||
sh.touch_file(fn, die_if_there=False, tracewriter=self.tracewriter)
|
||||
sh.chmod(fn, 0666)
|
||||
return len(log_fns)
|
||||
|
||||
def _configure_files(self):
|
||||
am = binstall.PythonInstallComponent._configure_files(self)
|
||||
am += self._setup_logs(self.get_bool_option('clear-logs'))
|
||||
return am
|
||||
|
||||
def post_install(self):
|
||||
binstall.PythonInstallComponent.post_install(self)
|
||||
if self.get_bool_option('make-blackhole'):
|
||||
self._setup_blackhole()
|
||||
|
||||
def _get_apache_user_group(self):
|
||||
return (self.get_option('apache_user'), self.get_option('apache_group'))
|
||||
|
||||
def config_params(self, config_fn):
|
||||
# This dict will be used to fill in the configuration
|
||||
# params with actual values
|
||||
mp = binstall.PythonInstallComponent.config_params(self, config_fn)
|
||||
if config_fn == hconf.HORIZON_APACHE_CONF:
|
||||
(user, group) = self._get_apache_user_group()
|
||||
mp['GROUP'] = group
|
||||
mp['USER'] = user
|
||||
mp['HORIZON_DIR'] = self.get_option('app_dir')
|
||||
mp['HORIZON_PORT'] = self.get_int_option('port', default_value=80)
|
||||
mp['APACHE_NAME'] = self.distro.get_command_config('apache', 'name')
|
||||
mp['ERROR_LOG'] = self.error_log
|
||||
mp['ACCESS_LOG'] = self.access_log
|
||||
mp['BLACK_HOLE_DIR'] = self.blackhole_dir
|
||||
else:
|
||||
mp['OPENSTACK_HOST'] = self.get_option('ip')
|
||||
if SECRET_KEY_LEN <= 0:
|
||||
mp['SECRET_KEY'] = ''
|
||||
else:
|
||||
mp['SECRET_KEY'] = binascii.b2a_hex(os.urandom(SECRET_KEY_LEN / 2))
|
||||
return mp
|
||||
|
||||
|
||||
class HorizonRuntime(bruntime.ProgramRuntime):
|
||||
def start(self):
|
||||
if self.statii()[0].status != bruntime.STATUS_STARTED:
|
||||
self._run_action('start')
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def _run_action(self, action, check_exit_code=True):
|
||||
cmd = self.distro.get_command('apache', action)
|
||||
if not cmd:
|
||||
raise NotImplementedError("No distro command provided to perform action %r" % (action))
|
||||
return sh.execute(cmd, check_exit_code=check_exit_code)
|
||||
|
||||
def restart(self):
|
||||
self._run_action('restart')
|
||||
return 1
|
||||
|
||||
def stop(self):
|
||||
if self.statii()[0].status != bruntime.STATUS_STOPPED:
|
||||
self._run_action('stop')
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def statii(self):
|
||||
(sysout, stderr) = self._run_action('status', check_exit_code=False)
|
||||
combined = (sysout + stderr).lower()
|
||||
st = bruntime.STATUS_UNKNOWN
|
||||
if combined.find("is running") != -1:
|
||||
st = bruntime.STATUS_STARTED
|
||||
elif utils.has_any(combined, 'stopped', 'unrecognized', 'not running'):
|
||||
st = bruntime.STATUS_STOPPED
|
||||
return [
|
||||
bruntime.ProgramStatus(name='apache',
|
||||
status=st,
|
||||
details={
|
||||
'STDOUT': sysout,
|
||||
'STDERR': stderr,
|
||||
}),
|
||||
]
|
||||
class HorizonRuntime(bruntime.ServiceRuntime):
|
||||
@property
|
||||
def applications(self):
|
||||
return [self.distro.get_command("apache", "daemon")[0]]
|
||||
|
@ -42,9 +42,8 @@ INIT_WHAT_FN = 'init_what.yaml'
|
||||
INIT_WHAT_HAPPENED = "keystone.inited.yaml"
|
||||
|
||||
# Invoking the keystone manage command uses this template
|
||||
MANAGE_CMD = [sh.joinpths('$BIN_DIR', 'keystone-manage'),
|
||||
'--config-file=$CONFIG_FILE',
|
||||
'--debug', '-v']
|
||||
MANAGE_CMD = ['sudo', '-u', 'keystone', '/usr/bin/keystone-manage',
|
||||
'--debug', '-v']
|
||||
|
||||
|
||||
class KeystoneInstaller(binstall.PythonInstallComponent):
|
||||
@ -75,7 +74,6 @@ class KeystoneInstaller(binstall.PythonInstallComponent):
|
||||
to_set['OS_TENANT_NAME'] = params['admin_tenant']
|
||||
to_set['OS_USERNAME'] = params['admin_user']
|
||||
to_set['OS_AUTH_URL'] = params['endpoints']['public']['uri']
|
||||
to_set['SERVICE_ENDPOINT'] = params['endpoints']['admin']['uri']
|
||||
for (endpoint, details) in params['endpoints'].items():
|
||||
if endpoint.find('templated') != -1:
|
||||
continue
|
||||
@ -94,17 +92,10 @@ class KeystoneInstaller(binstall.PythonInstallComponent):
|
||||
def warm_configs(self):
|
||||
khelper.get_shared_passwords(self)
|
||||
|
||||
def config_params(self, config_fn):
|
||||
# These be used to fill in the configuration params
|
||||
mp = binstall.PythonInstallComponent.config_params(self, config_fn)
|
||||
mp['BIN_DIR'] = self.bin_dir
|
||||
mp['CONFIG_FILE'] = sh.joinpths(self.get_option('cfg_dir'), kconf.ROOT_CONF)
|
||||
return mp
|
||||
|
||||
|
||||
class KeystoneRuntime(bruntime.PythonRuntime):
|
||||
class KeystoneRuntime(bruntime.OpenStackRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
bruntime.PythonRuntime.__init__(self, *args, **kargs)
|
||||
bruntime.OpenStackRuntime.__init__(self, *args, **kargs)
|
||||
self.init_fn = sh.joinpths(self.get_option('trace_dir'), INIT_WHAT_HAPPENED)
|
||||
|
||||
def _filter_init(self, init_what):
|
||||
@ -145,25 +136,6 @@ class KeystoneRuntime(bruntime.PythonRuntime):
|
||||
sh.write_file(self.init_fn, utils.prettify_yaml(init_what))
|
||||
LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
|
||||
|
||||
@property
|
||||
def applications(self):
|
||||
apps = []
|
||||
for (name, _values) in self.subsystems.items():
|
||||
name = "keystone-%s" % (name.lower())
|
||||
path = sh.joinpths(self.bin_dir, name)
|
||||
if sh.is_executable(path):
|
||||
apps.append(bruntime.Program(name, path, argv=self._fetch_argv(name)))
|
||||
return apps
|
||||
|
||||
def _fetch_argv(self, name):
|
||||
return [
|
||||
'--config-file=%s' % (sh.joinpths('$CONFIG_DIR', kconf.ROOT_CONF)),
|
||||
"--debug",
|
||||
'--verbose',
|
||||
'--nouse-syslog',
|
||||
'--log-config=%s' % (sh.joinpths('$CONFIG_DIR', kconf.LOGGING_CONF)),
|
||||
]
|
||||
|
||||
|
||||
class KeystoneTester(btesting.PythonTestingComponent):
|
||||
# Disable the keystone client integration tests
|
||||
|
@ -35,13 +35,13 @@ NET_INITED_FN = 'nova.network.inited.yaml'
|
||||
|
||||
# This makes the database be in sync with nova
|
||||
DB_SYNC_CMD = [
|
||||
{'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE', 'db', 'sync']},
|
||||
{'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage', 'db', 'sync']},
|
||||
]
|
||||
|
||||
# Used to create a fixed network when initializating nova
|
||||
FIXED_NET_CMDS = [
|
||||
{
|
||||
'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE',
|
||||
'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage',
|
||||
'network', 'create', 'private', '$FIXED_RANGE', '1', '$FIXED_NETWORK_SIZE'],
|
||||
},
|
||||
]
|
||||
@ -49,10 +49,11 @@ FIXED_NET_CMDS = [
|
||||
# Used to create a floating network + test floating pool
|
||||
FLOATING_NET_CMDS = [
|
||||
{
|
||||
'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE', 'floating', 'create', '$FLOATING_RANGE'],
|
||||
'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage',
|
||||
'floating', 'create', '$FLOATING_RANGE'],
|
||||
},
|
||||
{
|
||||
'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE',
|
||||
'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage',
|
||||
'floating', 'create', '--ip_range=$TEST_FLOATING_RANGE', '--pool=$TEST_FLOATING_POOL'],
|
||||
},
|
||||
]
|
||||
@ -136,19 +137,12 @@ class NovaInstaller(binstall.PythonInstallComponent):
|
||||
# Patch up your virtualization system
|
||||
self._fix_virt()
|
||||
|
||||
def config_params(self, config_fn):
|
||||
mp = binstall.PythonInstallComponent.config_params(self, config_fn)
|
||||
mp['CFG_FILE'] = sh.joinpths(self.get_option('cfg_dir'), nconf.API_CONF)
|
||||
mp['BIN_DIR'] = self.bin_dir
|
||||
return mp
|
||||
|
||||
|
||||
class NovaRuntime(bruntime.PythonRuntime):
|
||||
class NovaRuntime(bruntime.OpenStackRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
bruntime.PythonRuntime.__init__(self, *args, **kargs)
|
||||
bruntime.OpenStackRuntime.__init__(self, *args, **kargs)
|
||||
self.wait_time = self.get_int_option('service_wait_seconds')
|
||||
self.virsh = lv.Virsh(self.wait_time, self.distro)
|
||||
self.config_path = sh.joinpths(self.get_option('cfg_dir'), nconf.API_CONF)
|
||||
self.net_init_fn = sh.joinpths(self.get_option('trace_dir'), NET_INITED_FN)
|
||||
|
||||
def _do_network_init(self):
|
||||
@ -156,11 +150,7 @@ class NovaRuntime(bruntime.PythonRuntime):
|
||||
if not sh.isfile(ran_fn) and self.get_bool_option('do-network-init'):
|
||||
# Figure out the commands to run
|
||||
cmds = []
|
||||
mp = {
|
||||
'CFG_FILE': self.config_path,
|
||||
'BIN_DIR': self.bin_dir
|
||||
}
|
||||
mp['BIN_DIR'] = self.bin_dir
|
||||
mp = {}
|
||||
if self.get_bool_option('enable_fixed'):
|
||||
# Create a fixed network
|
||||
mp['FIXED_NETWORK_SIZE'] = self.get_option('fixed_network_size', default_value='256')
|
||||
@ -187,19 +177,9 @@ class NovaRuntime(bruntime.PythonRuntime):
|
||||
def post_start(self):
|
||||
self._do_network_init()
|
||||
|
||||
@property
|
||||
def applications(self):
|
||||
apps = []
|
||||
for (name, _values) in self.subsystems.items():
|
||||
name = "nova-%s" % (name.lower())
|
||||
path = sh.joinpths(self.bin_dir, name)
|
||||
if sh.is_executable(path):
|
||||
apps.append(bruntime.Program(name, path, argv=self._fetch_argv(name)))
|
||||
return apps
|
||||
|
||||
def pre_start(self):
|
||||
# Let the parent class do its thing
|
||||
bruntime.PythonRuntime.pre_start(self)
|
||||
bruntime.OpenStackRuntime.pre_start(self)
|
||||
virt_driver = utils.canon_virt_driver(self.get_option('virt_driver'))
|
||||
if virt_driver == 'libvirt':
|
||||
virt_type = lv.canon_libvirt_type(self.get_option('libvirt_type'))
|
||||
@ -213,13 +193,3 @@ class NovaRuntime(bruntime.PythonRuntime):
|
||||
"perhaps you should be using %r instead: %s" %
|
||||
(virt_type, lv.DEF_VIRT_TYPE, e))
|
||||
raise excp.StartException(msg)
|
||||
|
||||
def app_params(self, program):
|
||||
params = bruntime.PythonRuntime.app_params(self, program)
|
||||
params['CFG_FILE'] = self.config_path
|
||||
return params
|
||||
|
||||
def _fetch_argv(self, name):
|
||||
return [
|
||||
'--config-file', '$CFG_FILE',
|
||||
]
|
||||
|
@ -1,43 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from anvil import shell as sh
|
||||
|
||||
from anvil.components import base_runtime as bruntime
|
||||
|
||||
# Where the application is really
|
||||
UTIL_DIR = 'utils'
|
||||
|
||||
VNC_PROXY_APP = 'nova-novncproxy'
|
||||
|
||||
|
||||
class NoVNCRuntime(bruntime.PythonRuntime):
|
||||
@property
|
||||
def applications(self):
|
||||
path = sh.joinpths(self.get_option('app_dir'), UTIL_DIR, VNC_PROXY_APP)
|
||||
argv = ['--config-file', self._get_nova_conf(), '--web', '.']
|
||||
return [
|
||||
bruntime.Program(VNC_PROXY_APP, path, argv=argv),
|
||||
]
|
||||
|
||||
def _get_nova_conf(self):
|
||||
nova_comp_name = self.get_option('nova-component')
|
||||
if nova_comp_name in self.instances:
|
||||
# FIXME(harlowja): Have to reach into the nova component to get the config path (puke)
|
||||
nova_runtime = self.instances[nova_comp_name]
|
||||
return nova_runtime.config_path
|
||||
else:
|
||||
raise RuntimeError("NoVNC can not be started without the location of the nova configuration file")
|
@ -16,21 +16,30 @@
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil import log as logging
|
||||
from anvil import shell as sh
|
||||
|
||||
from anvil.components import base
|
||||
from anvil.components import base_install as binstall
|
||||
from anvil.components import base_runtime as bruntime
|
||||
|
||||
from anvil.components.configurators import quantum as qconf
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Sync db command
|
||||
# FIXME(aababilov)
|
||||
SYNC_DB_CMD = [sh.joinpths("$BIN_DIR", "quantum-db-manage"),
|
||||
SYNC_DB_CMD = ["sudo", "-u", "quantum", "/usr/bin/quantum-db-manage",
|
||||
"sync"]
|
||||
|
||||
|
||||
class QuantumInstaller(binstall.PythonInstallComponent):
|
||||
class QuantumPluginMixin(base.Component):
|
||||
def subsystem_names(self):
|
||||
core_plugin = self.get_option("core_plugin")
|
||||
return [(name if name != "agent" else "%s-agent" % (core_plugin))
|
||||
for name in self.subsystems.iterkeys()]
|
||||
|
||||
|
||||
class QuantumInstaller(binstall.PythonInstallComponent, QuantumPluginMixin):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(QuantumInstaller, self).__init__(*args, **kargs)
|
||||
self.configurator = qconf.QuantumConfigurator(self)
|
||||
@ -47,40 +56,10 @@ class QuantumInstaller(binstall.PythonInstallComponent):
|
||||
#utils.execute_template(*cmds, cwd=self.bin_dir,
|
||||
# params=self.config_params(None))
|
||||
|
||||
def config_params(self, config_fn):
|
||||
# These be used to fill in the configuration params
|
||||
mp = super(QuantumInstaller, self).config_params(config_fn)
|
||||
mp["BIN_DIR"] = self.bin_dir
|
||||
return mp
|
||||
|
||||
class QuantumUninstaller(binstall.PkgUninstallComponent, QuantumPluginMixin):
|
||||
pass
|
||||
|
||||
|
||||
class QuantumRuntime(bruntime.PythonRuntime):
|
||||
|
||||
system = "quantum"
|
||||
|
||||
def __init__(self, *args, **kargs):
|
||||
super(QuantumRuntime, self).__init__(*args, **kargs)
|
||||
|
||||
self.config_path = sh.joinpths(self.get_option("cfg_dir"), qconf.API_CONF)
|
||||
|
||||
# TODO(aababilov): move to base class
|
||||
@property
|
||||
def applications(self):
|
||||
apps = []
|
||||
for (name, _values) in self.subsystems.items():
|
||||
name = "%s-%s" % (self.system, name.lower())
|
||||
path = sh.joinpths(self.bin_dir, name)
|
||||
if sh.is_executable(path):
|
||||
apps.append(bruntime.Program(
|
||||
name, path, argv=self._fetch_argv(name)))
|
||||
return apps
|
||||
|
||||
def app_params(self, program):
|
||||
params = bruntime.PythonRuntime.app_params(self, program)
|
||||
params["CFG_FILE"] = self.config_path
|
||||
return params
|
||||
|
||||
def _fetch_argv(self, name):
|
||||
return [
|
||||
"--config-file", "$CFG_FILE",
|
||||
]
|
||||
class QuantumRuntime(bruntime.OpenStackRuntime, QuantumPluginMixin):
|
||||
pass
|
||||
|
@ -28,28 +28,11 @@ from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components import db
|
||||
from anvil.components import horizon
|
||||
from anvil.components import nova
|
||||
from anvil.components import rabbit
|
||||
|
||||
from anvil.components.configurators import horizon as hconf
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# See: http://wiki.libvirt.org/page/SSHPolicyKitSetup
|
||||
# FIXME(harlowja) take from distro config??
|
||||
LIBVIRT_POLICY_FN = "/etc/polkit-1/localauthority/50-local.d/50-libvirt-access.pkla"
|
||||
LIBVIRT_POLICY_CONTENTS = """
|
||||
[libvirt Management Access]
|
||||
Identity=${idents}
|
||||
Action=org.libvirt.unix.manage
|
||||
ResultAny=yes
|
||||
ResultInactive=yes
|
||||
ResultActive=yes
|
||||
"""
|
||||
DEF_IDENT = 'unix-group:libvirtd'
|
||||
|
||||
|
||||
class DBInstaller(db.DBInstaller):
|
||||
|
||||
@ -64,39 +47,6 @@ class DBInstaller(db.DBInstaller):
|
||||
sh.write_file_and_backup(DBInstaller.MYSQL_CONF, my_cnf.stringify())
|
||||
|
||||
|
||||
class HorizonInstaller(horizon.HorizonInstaller):
|
||||
|
||||
HTTPD_CONF = '/etc/httpd/conf/httpd.conf'
|
||||
|
||||
def __init__(self, *args, **kargs):
|
||||
horizon.HorizonInstaller.__init__(self, *args, **kargs)
|
||||
self.configurator = hconf.HorizonRhelConfigurator(self)
|
||||
|
||||
def _config_fix_httpd(self):
|
||||
LOG.info("Fixing up: %s", colorizer.quote(HorizonInstaller.HTTPD_CONF))
|
||||
(user, group) = self._get_apache_user_group()
|
||||
new_lines = []
|
||||
for line in sh.load_file(HorizonInstaller.HTTPD_CONF).splitlines():
|
||||
# Directives in the configuration files are case-insensitive,
|
||||
# but arguments to directives are often case sensitive...
|
||||
# NOTE(harlowja): we aren't handling multi-line fixups...
|
||||
if re.match(r"^\s*User\s+(.*)$", line, re.I):
|
||||
line = "User %s" % (user)
|
||||
if re.match(r"^\s*Group\s+(.*)$", line, re.I):
|
||||
line = "Group %s" % (group)
|
||||
if re.match(r"^\s*Listen\s+(.*)$", line, re.I):
|
||||
line = "Listen 0.0.0.0:80"
|
||||
new_lines.append(line)
|
||||
sh.write_file_and_backup(HorizonInstaller.HTTPD_CONF, utils.joinlinesep(*new_lines))
|
||||
|
||||
def _config_fixups(self):
|
||||
self._config_fix_httpd()
|
||||
|
||||
def post_install(self):
|
||||
horizon.HorizonInstaller.post_install(self)
|
||||
self._config_fixups()
|
||||
|
||||
|
||||
class RabbitRuntime(rabbit.RabbitRuntime):
|
||||
|
||||
def _fix_log_dir(self):
|
||||
@ -124,34 +74,3 @@ class RabbitRuntime(rabbit.RabbitRuntime):
|
||||
def restart(self):
|
||||
self._fix_log_dir()
|
||||
return rabbit.RabbitRuntime.restart(self)
|
||||
|
||||
|
||||
class NovaInstaller(nova.NovaInstaller):
|
||||
|
||||
def _get_policy(self, ident_users):
|
||||
return utils.expand_template(LIBVIRT_POLICY_CONTENTS,
|
||||
params={
|
||||
'idents': (";".join(ident_users)),
|
||||
})
|
||||
|
||||
def _get_policy_users(self):
|
||||
ident_users = [
|
||||
DEF_IDENT,
|
||||
'unix-user:%s' % (sh.getuser()),
|
||||
]
|
||||
return ident_users
|
||||
|
||||
def configure(self):
|
||||
configs_made = nova.NovaInstaller.configure(self)
|
||||
driver_canon = utils.canon_virt_driver(self.get_option('virt_driver'))
|
||||
if driver_canon == 'libvirt':
|
||||
# Create a libvirtd user group
|
||||
if not sh.group_exists('libvirtd'):
|
||||
cmd = ['groupadd', 'libvirtd']
|
||||
sh.execute(cmd)
|
||||
if not sh.isfile(LIBVIRT_POLICY_FN):
|
||||
contents = self._get_policy(self._get_policy_users())
|
||||
sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
|
||||
sh.write_file(LIBVIRT_POLICY_FN, contents)
|
||||
configs_made += 1
|
||||
return configs_made
|
||||
|
@ -123,7 +123,7 @@ class DependencyHandler(object):
|
||||
splitlines()[-1].strip())
|
||||
return python_names
|
||||
|
||||
def package(self):
|
||||
def package_start(self):
|
||||
requires_files = []
|
||||
extra_pips = []
|
||||
for inst in self.instances:
|
||||
@ -138,6 +138,12 @@ class DependencyHandler(object):
|
||||
self.gather_pips_to_install(requires_files, extra_pips)
|
||||
self.clean_pip_requires(requires_files)
|
||||
|
||||
def package_instance(self, instance):
|
||||
pass
|
||||
|
||||
def package_finish(self):
|
||||
pass
|
||||
|
||||
def install(self):
|
||||
for inst in self.instances:
|
||||
for pkg in inst.get_option("nopackages") or []:
|
||||
|
@ -15,10 +15,10 @@
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
from datetime import datetime
|
||||
import pkg_resources
|
||||
import rpm
|
||||
|
||||
from anvil import colorizer
|
||||
from anvil import env
|
||||
@ -26,6 +26,7 @@ from anvil import log as logging
|
||||
from anvil.packaging import base
|
||||
from anvil.packaging.helpers import pip_helper
|
||||
from anvil.packaging.helpers import yum_helper
|
||||
from anvil import settings
|
||||
from anvil import shell as sh
|
||||
from anvil import utils
|
||||
|
||||
@ -45,8 +46,16 @@ class YumInstallHelper(base.InstallHelper):
|
||||
|
||||
|
||||
class YumDependencyHandler(base.DependencyHandler):
|
||||
OPENSTACK_DEPS_PACKAGE_NAME = "openstack-deps"
|
||||
OPENSTACK_EPOCH = 2
|
||||
SPEC_TEMPLATE_DIR = "packaging/specs"
|
||||
API_NAMES = {
|
||||
"nova": "Compute",
|
||||
"glance": "Image",
|
||||
"keystone": "Identity",
|
||||
"cinder": "Volume",
|
||||
"quantum": "Networking",
|
||||
}
|
||||
SERVER_NAMES = ["nova", "glance", "keystone", "quantum", "cinder"]
|
||||
py2rpm_executable = sh.which("py2rpm", ["tools/"])
|
||||
REPO_FN = "anvil.repo"
|
||||
YUM_REPO_DIR = "/etc/yum.repos.d/"
|
||||
@ -54,6 +63,7 @@ class YumDependencyHandler(base.DependencyHandler):
|
||||
'distribute',
|
||||
'setuptools',
|
||||
]
|
||||
rpmbuild_executable = sh.which("rpmbuild")
|
||||
|
||||
def __init__(self, distro, root_dir, instances):
|
||||
super(YumDependencyHandler, self).__init__(distro, root_dir, instances)
|
||||
@ -62,6 +72,8 @@ class YumDependencyHandler(base.DependencyHandler):
|
||||
self.deps_src_repo_dir = sh.joinpths(self.deps_dir, "openstack-deps-sources")
|
||||
self.anvil_repo_filename = sh.joinpths(self.deps_dir, self.REPO_FN)
|
||||
self.helper = yum_helper.Helper()
|
||||
self.rpm_sources_dir = sh.joinpths(self.rpmbuild_dir, "SOURCES")
|
||||
self.anvil_repo_dir = sh.joinpths(self.root_dir, "repo")
|
||||
|
||||
def py2rpm_start_cmdline(self):
|
||||
cmdline = [
|
||||
@ -88,12 +100,57 @@ class YumDependencyHandler(base.DependencyHandler):
|
||||
] + arch_dependent
|
||||
return cmdline
|
||||
|
||||
def package(self):
|
||||
super(YumDependencyHandler, self).package()
|
||||
self._write_all_deps_package()
|
||||
self._build_dependencies()
|
||||
self._build_openstack()
|
||||
self._create_deps_repo()
|
||||
def package_instance(self, instance):
|
||||
# clear before...
|
||||
sh.deldir(self.rpmbuild_dir)
|
||||
for dirname in (sh.joinpths(self.rpmbuild_dir, "SPECS"),
|
||||
sh.joinpths(self.rpmbuild_dir, "SOURCES")):
|
||||
sh.mkdir(dirname, recurse=True)
|
||||
if instance.name == "general":
|
||||
self._build_dependencies()
|
||||
self._move_rpms("anvil-deps")
|
||||
self._create_repo("anvil-deps")
|
||||
else:
|
||||
app_dir = instance.get_option("app_dir")
|
||||
if sh.isdir(app_dir):
|
||||
self._build_openstack_package(app_dir)
|
||||
self._move_rpms("anvil")
|
||||
# ...and after
|
||||
sh.deldir(self.rpmbuild_dir)
|
||||
|
||||
def package_finish(self):
|
||||
self._create_repo("anvil")
|
||||
|
||||
def _move_rpms(self, repo_name):
|
||||
repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
|
||||
src_repo_dir = "%s-sources" % repo_dir
|
||||
sh.mkdir(repo_dir, recurse=True)
|
||||
sh.mkdir(src_repo_dir, recurse=True)
|
||||
for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "RPMS"),
|
||||
recursive=True, files_only=True):
|
||||
sh.move(filename, repo_dir, force=True)
|
||||
for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "SRPMS"),
|
||||
recursive=True, files_only=True):
|
||||
sh.move(filename, src_repo_dir, force=True)
|
||||
return repo_dir
|
||||
|
||||
def _create_repo(self, repo_name):
|
||||
repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
|
||||
src_repo_dir = "%s-sources" % repo_dir
|
||||
for a_dir in repo_dir, src_repo_dir:
|
||||
cmdline = ["createrepo", a_dir]
|
||||
LOG.info("Creating repo at %s" % a_dir)
|
||||
sh.execute(cmdline)
|
||||
repo_filename = sh.joinpths(self.anvil_repo_dir, "%s.repo" % repo_name)
|
||||
LOG.info("Writing %s" % repo_filename)
|
||||
(_fn, content) = utils.load_template("packaging", "common.repo")
|
||||
params = {
|
||||
"repo_name": repo_name,
|
||||
"baseurl_bin": "file://%s" % repo_dir,
|
||||
"baseurl_src": "file://%s" % src_repo_dir
|
||||
}
|
||||
sh.write_file(
|
||||
repo_filename, utils.expand_template(content, params))
|
||||
|
||||
def _get_yum_available(self):
|
||||
yum_map = {}
|
||||
@ -149,109 +206,6 @@ class YumDependencyHandler(base.DependencyHandler):
|
||||
def _get_component_name(pkg_dir):
|
||||
return sh.basename(sh.dirname(pkg_dir))
|
||||
|
||||
def _write_all_deps_package(self):
|
||||
spec_filename = sh.joinpths(
|
||||
self.rpmbuild_dir,
|
||||
"SPECS",
|
||||
"%s.spec" % self.OPENSTACK_DEPS_PACKAGE_NAME)
|
||||
|
||||
# Clean out previous dirs.
|
||||
for dirname in (self.rpmbuild_dir, self.deps_repo_dir,
|
||||
self.deps_src_repo_dir):
|
||||
sh.deldir(dirname)
|
||||
sh.mkdirslist(dirname, tracewriter=self.tracewriter)
|
||||
|
||||
def get_version_release():
|
||||
right_now = datetime.now()
|
||||
components = [
|
||||
str(right_now.year),
|
||||
str(right_now.month),
|
||||
str(right_now.day),
|
||||
]
|
||||
return (".".join(components), right_now.strftime("%s"))
|
||||
|
||||
(version, release) = get_version_release()
|
||||
spec_content = """Name: %s
|
||||
Version: %s
|
||||
Release: %s
|
||||
License: Apache 2.0
|
||||
Summary: OpenStack dependencies
|
||||
BuildArch: noarch
|
||||
|
||||
""" % (self.OPENSTACK_DEPS_PACKAGE_NAME, version, release)
|
||||
|
||||
packages = {}
|
||||
for inst in self.instances:
|
||||
try:
|
||||
for pack in inst.packages:
|
||||
packages[pack["name"]] = pack
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
scripts = {}
|
||||
script_map = {
|
||||
"pre-install": "%pre",
|
||||
"post-install": "%post",
|
||||
"pre-uninstall": "%preun",
|
||||
"post-uninstall": "%postun",
|
||||
}
|
||||
for pack_name in sorted(packages.iterkeys()):
|
||||
pack = packages[pack_name]
|
||||
cont = [spec_content, "Requires: ", pack["name"]]
|
||||
version = pack.get("version")
|
||||
if version:
|
||||
cont.append(" ")
|
||||
cont.append(version)
|
||||
cont.append("\n")
|
||||
spec_content = "".join(cont)
|
||||
for script_name in script_map.iterkeys():
|
||||
try:
|
||||
script_list = pack[script_name]
|
||||
except (KeyError, ValueError):
|
||||
continue
|
||||
script_body = scripts.get(script_name, "")
|
||||
script_body = "%s\n# %s\n" % (script_body, pack_name)
|
||||
for script in script_list:
|
||||
try:
|
||||
line = " ".join(
|
||||
sh.shellquote(word)
|
||||
for word in script["cmd"])
|
||||
except (KeyError, ValueError):
|
||||
continue
|
||||
if script.get("ignore_failure"):
|
||||
ignore = " 2>/dev/null || true"
|
||||
else:
|
||||
ignore = ""
|
||||
script_body = "".join((
|
||||
script_body,
|
||||
line,
|
||||
ignore,
|
||||
"\n"))
|
||||
scripts[script_name] = script_body
|
||||
|
||||
spec_content += "\n%description\n\n"
|
||||
for script_name in sorted(script_map.iterkeys()):
|
||||
try:
|
||||
script_body = scripts[script_name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
spec_content = "%s\n%s\n%s\n" % (
|
||||
spec_content,
|
||||
script_map[script_name],
|
||||
script_body)
|
||||
|
||||
spec_content += "\n%files\n"
|
||||
sh.write_file(spec_filename, spec_content,
|
||||
tracewriter=self.tracewriter)
|
||||
cmdline = [
|
||||
"rpmbuild", "-ba",
|
||||
"--define", "_topdir %s" % self.rpmbuild_dir,
|
||||
spec_filename,
|
||||
]
|
||||
LOG.info("Building %s RPM" % self.OPENSTACK_DEPS_PACKAGE_NAME)
|
||||
sh.execute(cmdline)
|
||||
|
||||
def _build_dependencies(self):
|
||||
(pips_downloaded, package_files) = self.download_dependencies()
|
||||
|
||||
@ -313,44 +267,157 @@ BuildArch: noarch
|
||||
quiet=True)
|
||||
p_bar.update(i + 1)
|
||||
|
||||
def _build_openstack(self):
|
||||
if not self.package_dirs:
|
||||
LOG.warn("No RPM packages of OpenStack installs to build")
|
||||
return
|
||||
component_names = [self._get_component_name(d)
|
||||
for d in self.package_dirs]
|
||||
utils.log_iterable(sorted(component_names), logger=LOG,
|
||||
header=("Building %s OpenStack RPM"
|
||||
" packages") % (len(self.package_dirs)))
|
||||
with utils.progress_bar(name='Building',
|
||||
max_am=len(self.package_dirs)) as p_bar:
|
||||
for (i, pkg_dir) in enumerate(sorted(self.package_dirs)):
|
||||
component_name = self._get_component_name(pkg_dir)
|
||||
cmdline = self.py2rpm_start_cmdline() + ["--", pkg_dir]
|
||||
out_filename = sh.joinpths(self.log_dir,
|
||||
"py2rpm.%s.out" % (component_name))
|
||||
sh.execute_save_output(cmdline, out_filename=out_filename,
|
||||
quiet=True)
|
||||
p_bar.update(i + 1)
|
||||
@staticmethod
|
||||
def _python_setup_py_get(pkg_dir, field):
|
||||
"""
|
||||
:param field: e.g., "name" or "version"
|
||||
"""
|
||||
cmdline = [sys.executable, "setup.py", "--%s" % field]
|
||||
value = sh.execute(cmdline, cwd=pkg_dir)[0].splitlines()[-1].strip()
|
||||
if not value:
|
||||
LOG.error("Cannot determine %s for %s", field, pkg_dir)
|
||||
return value
|
||||
|
||||
def _create_deps_repo(self):
|
||||
for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "RPMS"),
|
||||
recursive=True, files_only=True):
|
||||
sh.move(filename, self.deps_repo_dir, force=True)
|
||||
for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "SRPMS"),
|
||||
recursive=True, files_only=True):
|
||||
sh.move(filename, self.deps_src_repo_dir, force=True)
|
||||
for repo_dir in self.deps_repo_dir, self.deps_src_repo_dir:
|
||||
cmdline = ["createrepo", repo_dir]
|
||||
LOG.info("Creating repo at %s" % repo_dir)
|
||||
sh.execute(cmdline)
|
||||
LOG.info("Writing %s to %s", self.REPO_FN, self.anvil_repo_filename)
|
||||
(_fn, content) = utils.load_template('packaging', self.REPO_FN)
|
||||
params = {"baseurl_bin": "file://%s" % self.deps_repo_dir,
|
||||
"baseurl_src": "file://%s" % self.deps_src_repo_dir}
|
||||
sh.write_file(self.anvil_repo_filename,
|
||||
utils.expand_template(content, params),
|
||||
tracewriter=self.tracewriter)
|
||||
def _write_spec_file(self, pkg_dir, rpm_name, template_name, params):
|
||||
if not params.setdefault("requires", []):
|
||||
requires_filename = "%s/tools/pip-requires" % pkg_dir
|
||||
if sh.isfile(requires_filename):
|
||||
requires_python = []
|
||||
with open(requires_filename, "r") as requires_file:
|
||||
for line in requires_file.readlines():
|
||||
line = line.split("#", 1)[0].strip()
|
||||
if line:
|
||||
requires_python.append(line)
|
||||
if requires_python:
|
||||
params["requires"] = self._convert_names_python2rpm(
|
||||
requires_python)
|
||||
params["epoch"] = self.OPENSTACK_EPOCH
|
||||
content = utils.load_template(self.SPEC_TEMPLATE_DIR, template_name)[1]
|
||||
spec_filename = sh.joinpths(
|
||||
self.rpmbuild_dir, "SPECS", "%s.spec" % rpm_name)
|
||||
sh.write_file(spec_filename, utils.expand_template(content, params))
|
||||
return spec_filename
|
||||
|
||||
def _copy_startup_scripts(self, spec_filename):
|
||||
common_init_content = utils.load_template(
|
||||
"packaging", "common.init")[1]
|
||||
for src in rpm.spec(spec_filename).sources:
|
||||
script = sh.basename(src[0])
|
||||
if not (script.endswith(".init")):
|
||||
continue
|
||||
target_filename = sh.joinpths(self.rpm_sources_dir, script)
|
||||
if sh.isfile(target_filename):
|
||||
continue
|
||||
bin_name = utils.strip_prefix_suffix(
|
||||
script, "openstack-", ".init")
|
||||
params = {
|
||||
"bin": bin_name,
|
||||
"package": bin_name.split("-", 1)[0],
|
||||
}
|
||||
sh.write_file(
|
||||
target_filename,
|
||||
utils.expand_template(common_init_content, params))
|
||||
|
||||
def _copy_sources(self, pkg_dir):
|
||||
component_name = self._get_component_name(pkg_dir)
|
||||
other_sources_dir = sh.joinpths(
|
||||
settings.TEMPLATE_DIR, "packaging/sources", component_name)
|
||||
if sh.isdir(other_sources_dir):
|
||||
for filename in sh.listdir(other_sources_dir, files_only=True):
|
||||
sh.copy(filename, self.rpm_sources_dir)
|
||||
|
||||
def _build_from_spec(self, pkg_dir, spec_filename):
|
||||
if sh.isfile(sh.joinpths(pkg_dir, "setup.py")):
|
||||
self._write_python_tarball(pkg_dir)
|
||||
else:
|
||||
self._write_git_tarball(pkg_dir, spec_filename)
|
||||
self._copy_sources(pkg_dir)
|
||||
self._copy_startup_scripts(spec_filename)
|
||||
cmdline = [
|
||||
self.rpmbuild_executable,
|
||||
"-ba",
|
||||
"--define", "_topdir %s" % self.rpmbuild_dir,
|
||||
spec_filename,
|
||||
]
|
||||
sh.execute_save_output(
|
||||
cmdline, sh.joinpths(self.log_dir, sh.basename(spec_filename)))
|
||||
|
||||
def _write_git_tarball(self, pkg_dir, spec_filename):
|
||||
cmdline = [
|
||||
"rpm",
|
||||
"-q",
|
||||
"--specfile", spec_filename,
|
||||
"--qf", "%{NAME}-%{VERSION}\n"
|
||||
]
|
||||
tar_base = sh.execute(cmdline, cwd=pkg_dir)[0].splitlines()[0].strip()
|
||||
# git 1.7.1 from RHEL doesn't understand --format=tar.gz
|
||||
output_filename = sh.joinpths(
|
||||
self.rpm_sources_dir, "%s.tar" % tar_base)
|
||||
cmdline = [
|
||||
"git",
|
||||
"archive",
|
||||
"--format=tar",
|
||||
"--prefix=%s/" % tar_base,
|
||||
"--output=%s" % output_filename,
|
||||
"HEAD",
|
||||
]
|
||||
sh.execute(cmdline, cwd=pkg_dir)
|
||||
cmdline = ["gzip", output_filename]
|
||||
sh.execute(cmdline)
|
||||
|
||||
def _write_python_tarball(self, pkg_dir):
|
||||
cmdline = [
|
||||
sys.executable,
|
||||
"setup.py",
|
||||
"sdist",
|
||||
"--formats", "gztar",
|
||||
"--dist-dir", self.rpm_sources_dir,
|
||||
]
|
||||
sh.execute(cmdline, cwd=pkg_dir)
|
||||
|
||||
def _build_openstack_package(self, pkg_dir):
|
||||
component_name = self._get_component_name(pkg_dir)
|
||||
params = {}
|
||||
rpm_name = None
|
||||
template_name = None
|
||||
if sh.isfile(sh.joinpths(pkg_dir, "setup.py")):
|
||||
name = self._python_setup_py_get(pkg_dir, "name")
|
||||
params["version"] = self._python_setup_py_get(pkg_dir, "version")
|
||||
if component_name.endswith("client"):
|
||||
clientname = utils.strip_prefix_suffix(
|
||||
name, "python-", "client")
|
||||
if not clientname:
|
||||
LOG.error("Bad client package name %s", name)
|
||||
return
|
||||
params["clientname"] = clientname
|
||||
params["apiname"] = self.API_NAMES.get(
|
||||
clientname, clientname.title())
|
||||
rpm_name = name
|
||||
template_name = "python-commonclient.spec"
|
||||
elif component_name in self.SERVER_NAMES:
|
||||
rpm_name = "openstack-%s" % name
|
||||
elif component_name == "horizon":
|
||||
rpm_name = "python-django-horizon"
|
||||
else:
|
||||
rpm_name = component_name
|
||||
template_name = "%s.spec" % rpm_name
|
||||
spec_filename = sh.joinpths(
|
||||
settings.TEMPLATE_DIR,
|
||||
self.SPEC_TEMPLATE_DIR,
|
||||
template_name)
|
||||
if not sh.isfile(spec_filename):
|
||||
rpm_name = None
|
||||
if rpm_name:
|
||||
template_name = template_name or "%s.spec" % rpm_name
|
||||
spec_filename = self._write_spec_file(
|
||||
pkg_dir, rpm_name, template_name, params)
|
||||
self._build_from_spec(pkg_dir, spec_filename)
|
||||
else:
|
||||
cmdline = self.py2rpm_start_cmdline() + ["--", pkg_dir]
|
||||
sh.execute_save_output(
|
||||
cmdline,
|
||||
cwd=pkg_dir,
|
||||
out_filename=sh.joinpths(self.log_dir, component_name))
|
||||
|
||||
def _convert_names_python2rpm(self, python_names):
|
||||
if not python_names:
|
||||
@ -360,24 +427,27 @@ BuildArch: noarch
|
||||
for name in sh.execute(cmdline)[0].splitlines():
|
||||
# name is "Requires: rpm-name"
|
||||
try:
|
||||
rpm_names.append(name.split(":")[1].strip())
|
||||
rpm_names.append(name.split(":", 1)[1].strip())
|
||||
except IndexError:
|
||||
pass
|
||||
return rpm_names
|
||||
|
||||
def install(self):
|
||||
super(YumDependencyHandler, self).install()
|
||||
repo_filename = sh.joinpths(self.YUM_REPO_DIR, self.REPO_FN)
|
||||
|
||||
# Ensure we copy the local repo file name to the main repo so that
|
||||
# yum will find it when installing packages.
|
||||
sh.write_file(repo_filename, sh.load_file(self.anvil_repo_filename),
|
||||
tracewriter=self.tracewriter)
|
||||
for repo_name in "anvil", "anvil-deps":
|
||||
repo_filename = sh.joinpths(
|
||||
self.anvil_repo_dir, "%s.repo" % repo_name)
|
||||
if sh.isfile(repo_filename):
|
||||
sh.write_file(
|
||||
"%s/%s.repo" % (self.YUM_REPO_DIR, repo_name),
|
||||
sh.load_file(repo_filename),
|
||||
tracewriter=self.tracewriter)
|
||||
|
||||
# Erase it if its been previously installed.
|
||||
cmdline = []
|
||||
if self.helper.is_installed(self.OPENSTACK_DEPS_PACKAGE_NAME):
|
||||
cmdline.append(self.OPENSTACK_DEPS_PACKAGE_NAME)
|
||||
for p in self.nopackages:
|
||||
if self.helper.is_installed(p):
|
||||
cmdline.append(p)
|
||||
@ -389,13 +459,17 @@ BuildArch: noarch
|
||||
cmdline = ["yum", "clean", "all"]
|
||||
sh.execute(cmdline)
|
||||
|
||||
cmdline = ["yum", "install", "-y", self.OPENSTACK_DEPS_PACKAGE_NAME]
|
||||
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
|
||||
rpm_names = []
|
||||
for inst in self.instances:
|
||||
for p in inst.package_names():
|
||||
if p not in self.nopackages:
|
||||
rpm_names.append(p)
|
||||
|
||||
rpm_names = self._convert_names_python2rpm(self.python_names)
|
||||
if rpm_names:
|
||||
cmdline = ["yum", "install", "-y"] + rpm_names
|
||||
cmdline = ["yum", "install", "-y"] + sorted(set(rpm_names))
|
||||
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
|
||||
for name in rpm_names:
|
||||
self.tracewriter.package_installed(name)
|
||||
|
||||
def uninstall(self):
|
||||
super(YumDependencyHandler, self).uninstall()
|
||||
@ -404,13 +478,14 @@ BuildArch: noarch
|
||||
no_remove = env.get_key('REQUIRED_PACKAGES', '').split()
|
||||
no_remove = sorted(set(no_remove))
|
||||
rpm_names = []
|
||||
for name in self._convert_names_python2rpm(self.python_names):
|
||||
if self.helper.is_installed(name) and name not in no_remove:
|
||||
rpm_names.append(name)
|
||||
for inst in self.instances:
|
||||
for p in inst.package_names():
|
||||
if self.helper.is_installed(p) and p not in no_remove:
|
||||
rpm_names.append(p)
|
||||
|
||||
if rpm_names:
|
||||
cmdline = ["yum", "remove", "--remove-leaves", "-y"]
|
||||
for p in no_remove:
|
||||
cmdline.append("--exclude=%s" % (p))
|
||||
cmdline.extend(rpm_names)
|
||||
cmdline.extend(sorted(set(rpm_names)))
|
||||
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
|
||||
|
@ -1,40 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import weakref
|
||||
|
||||
from anvil.components.base_runtime import STATUS_UNKNOWN
|
||||
|
||||
|
||||
class Runner(object):
|
||||
__meta__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, runtime, name):
|
||||
self.runtime = weakref.proxy(runtime)
|
||||
self.name = name
|
||||
|
||||
def start(self, app_name, app_pth, app_dir, opts):
|
||||
# Returns a file name that contains what was started
|
||||
pass
|
||||
|
||||
def stop(self, app_name):
|
||||
# Stops the given app
|
||||
pass
|
||||
|
||||
def status(self, app_name):
|
||||
# Attempt to give the status of a app + details
|
||||
return (STATUS_UNKNOWN, '')
|
@ -1,187 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import errno
|
||||
import json
|
||||
|
||||
from anvil import exceptions as excp
|
||||
from anvil import log as logging
|
||||
from anvil import runners as base
|
||||
from anvil import shell as sh
|
||||
from anvil import trace as tr
|
||||
from anvil import utils
|
||||
|
||||
from anvil.components.base_runtime import (STATUS_STARTED, STATUS_UNKNOWN)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PID_FN = "PID_FN"
|
||||
STDOUT_FN = "STDOUT_FN"
|
||||
STDERR_FN = "STDERR_FN"
|
||||
ARGS = "ARGS"
|
||||
NAME = "NAME"
|
||||
FORK_TEMPL = "%s.fork"
|
||||
|
||||
|
||||
class ForkFiles(object):
|
||||
def __init__(self, pid, stdout, stderr, trace=None):
|
||||
self.pid = pid
|
||||
self.stdout = stdout
|
||||
self.stderr = stderr
|
||||
self.trace = trace
|
||||
|
||||
def extract_pid(self):
|
||||
# Load the pid file and take out the pid from it...
|
||||
#
|
||||
# Typically said file has a integer pid in it so load said file
|
||||
# and covert its contents to an int or fail trying...
|
||||
if self.pid:
|
||||
try:
|
||||
return int(sh.load_file(self.pid).strip())
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
def as_list(self):
|
||||
possibles = [self.pid, self.stdout, self.stderr, self.trace]
|
||||
return [i for i in possibles if i is not None]
|
||||
|
||||
def as_dict(self):
|
||||
return {
|
||||
PID_FN: self.pid,
|
||||
STDOUT_FN: self.stdout,
|
||||
STDERR_FN: self.stderr,
|
||||
}
|
||||
|
||||
|
||||
class ForkRunner(base.Runner):
|
||||
def stop(self, app_name):
|
||||
# The location of the pid file should be in the attached
|
||||
# runtimes trace directory, so see if we can find said file
|
||||
# and then attempt to kill the pid that exists in that file
|
||||
# which if succesffully will signal to the rest of this code
|
||||
# that we can go through and cleanup the other remnants of said
|
||||
# pid such as the stderr/stdout files that were being written to...
|
||||
trace_dir = self.runtime.get_option('trace_dir')
|
||||
if not sh.isdir(trace_dir):
|
||||
msg = "No trace directory found from which to stop: %r" % (app_name)
|
||||
raise excp.StopException(msg)
|
||||
fork_fns = self._form_file_names(app_name)
|
||||
skip_kill = True
|
||||
pid = None
|
||||
try:
|
||||
pid = fork_fns.extract_pid()
|
||||
skip_kill = False
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
skip_kill = False
|
||||
if not skip_kill and pid is None:
|
||||
msg = "Could not extract a valid pid from %r" % (fork_fns.pid)
|
||||
raise excp.StopException(msg)
|
||||
# Bother trying to kill said process?
|
||||
if not skip_kill:
|
||||
(killed, attempts) = sh.kill(pid)
|
||||
else:
|
||||
(killed, attempts) = (True, 0)
|
||||
# Trash the files if it worked
|
||||
if killed:
|
||||
if not skip_kill:
|
||||
LOG.debug("Killed pid '%s' after %s attempts.", pid, attempts)
|
||||
for leftover_fn in fork_fns.as_list():
|
||||
if sh.exists(leftover_fn):
|
||||
LOG.debug("Removing forking related file %r", (leftover_fn))
|
||||
sh.unlink(leftover_fn)
|
||||
else:
|
||||
msg = "Could not stop %r after %s attempts" % (app_name, attempts)
|
||||
raise excp.StopException(msg)
|
||||
|
||||
def status(self, app_name):
|
||||
# Attempt to find the status of a given app by finding where that apps
|
||||
# pid file is and loading said pids details (from stderr/stdout) files
|
||||
# that should exist as well as by using shell utilities to determine
|
||||
# if said pid is still running...
|
||||
trace_dir = self.runtime.get_option('trace_dir')
|
||||
if not sh.isdir(trace_dir):
|
||||
return (STATUS_UNKNOWN, '')
|
||||
fork_fns = self._form_file_names(app_name)
|
||||
pid = fork_fns.extract_pid()
|
||||
stderr = ''
|
||||
try:
|
||||
stderr = sh.load_file(fork_fns.stderr)
|
||||
except (IOError, ValueError, TypeError):
|
||||
pass
|
||||
stdout = ''
|
||||
try:
|
||||
stdout = sh.load_file(fork_fns.stdout)
|
||||
except (IOError, ValueError, TypeError):
|
||||
pass
|
||||
details = {
|
||||
'STDOUT': stdout,
|
||||
'STDERR': stderr,
|
||||
}
|
||||
if pid is not None and sh.is_running(pid):
|
||||
return (STATUS_STARTED, details)
|
||||
else:
|
||||
return (STATUS_UNKNOWN, details)
|
||||
|
||||
def _form_file_names(self, app_name):
|
||||
# Form all files names which should be connected to the given forked application name
|
||||
fork_fn = FORK_TEMPL % (app_name)
|
||||
trace_dir = self.runtime.get_option('trace_dir')
|
||||
trace_fn = None
|
||||
if trace_dir:
|
||||
trace_fn = tr.trace_filename(trace_dir, fork_fn)
|
||||
base_fork_fn = sh.joinpths(trace_dir, fork_fn)
|
||||
return ForkFiles(pid=base_fork_fn + ".pid",
|
||||
stdout=base_fork_fn + ".stdout",
|
||||
stderr=base_fork_fn + ".stderr",
|
||||
trace=trace_fn)
|
||||
|
||||
def _begin_start(self, app_name, app_pth, app_wkdir, args):
|
||||
fork_fns = self._form_file_names(app_name)
|
||||
trace_fn = fork_fns.trace
|
||||
# Ensure all arguments for this app in string format
|
||||
args = [str(i) for i in args if i is not None]
|
||||
if trace_fn:
|
||||
# Not needed, but useful to know where the files are located at
|
||||
#
|
||||
# TODO(harlowja): use this info instead of forming the filenames
|
||||
# repeatly
|
||||
trace_info = {}
|
||||
trace_info.update(fork_fns.as_dict())
|
||||
# Useful to know what args were sent along
|
||||
trace_info[ARGS] = json.dumps(args)
|
||||
run_trace = tr.TraceWriter(trace_fn)
|
||||
for (k, v) in trace_info.items():
|
||||
if v is not None:
|
||||
run_trace.trace(k, v)
|
||||
LOG.debug("Forking %r by running command %r with args (%s)" % (app_name, app_pth, " ".join(args)))
|
||||
sh.fork(app_pth, app_wkdir, fork_fns.pid, fork_fns.stdout, fork_fns.stderr, *args)
|
||||
return trace_fn
|
||||
|
||||
def _post_start(self, app_name):
|
||||
fork_fns = self._form_file_names(app_name)
|
||||
utils.log_iterable(fork_fns.as_list(),
|
||||
header="Forked %s with details in the following files" % (app_name),
|
||||
logger=LOG)
|
||||
|
||||
def start(self, app_name, app_pth, app_dir, opts):
|
||||
trace_fn = self._begin_start(app_name, app_pth, app_dir, opts)
|
||||
self._post_start(app_name)
|
||||
return trace_fn
|
@ -469,6 +469,8 @@ def write_file(fn, text, flush=True, quiet=False, tracewriter=None):
|
||||
if not is_dry_run():
|
||||
mkdirslist(dirname(fn), tracewriter=tracewriter)
|
||||
with open(fn, "w") as fh:
|
||||
if isinstance(text, unicode):
|
||||
text = text.encode("utf-8")
|
||||
fh.write(text)
|
||||
if flush:
|
||||
fh.flush()
|
||||
|
@ -69,6 +69,10 @@ class TraceWriter(object):
|
||||
what['from'] = uri
|
||||
self.trace(DOWNLOADED, json.dumps(what))
|
||||
|
||||
def pip_installed(self, pip_info):
|
||||
self._start()
|
||||
self.trace(PIP_INSTALL, json.dumps(pip_info))
|
||||
|
||||
def dirs_made(self, *dirs):
|
||||
self._start()
|
||||
for d in dirs:
|
||||
@ -78,6 +82,10 @@ class TraceWriter(object):
|
||||
self._start()
|
||||
self.trace(FILE_TOUCHED, fn)
|
||||
|
||||
def package_installed(self, pkg_name):
|
||||
self._start()
|
||||
self.trace(PKG_INSTALL, pkg_name)
|
||||
|
||||
def app_started(self, name, info_fn, how):
|
||||
self._start()
|
||||
data = dict()
|
||||
@ -190,3 +198,12 @@ class TraceReader(object):
|
||||
if type(pip_info_full) is dict:
|
||||
pips_installed.append(pip_info_full)
|
||||
return pips_installed
|
||||
|
||||
def packages_installed(self):
|
||||
lines = self.read()
|
||||
pkgs_installed = list()
|
||||
pkg_list = list()
|
||||
for (cmd, action) in lines:
|
||||
if cmd == PKG_INSTALL and len(action):
|
||||
pkg_list.append(action)
|
||||
return pkg_list
|
||||
|
@ -573,3 +573,11 @@ def canon_virt_driver(virt_driver):
|
||||
if not (virt_driver in VIRT_DRIVER_MAP):
|
||||
return 'libvirt'
|
||||
return virt_driver
|
||||
|
||||
|
||||
def strip_prefix_suffix(line, prefix=None, suffix=None):
|
||||
if prefix and line.startswith(prefix):
|
||||
line = line[len(prefix):]
|
||||
if suffix and line.endswith(suffix):
|
||||
line = line[:-len(suffix)]
|
||||
return line
|
||||
|
@ -1,8 +1,5 @@
|
||||
# Settings for component general
|
||||
---
|
||||
# Python component run type to use (defaults to forking)
|
||||
run_type: "anvil.runners.fork:ForkRunner"
|
||||
|
||||
ip: "$(auto:ip)"
|
||||
|
||||
# How many seconds to wait until a service comes online before using it.
|
||||
@ -39,5 +36,5 @@ wanted_passwords:
|
||||
admin_password: 'keystone admin user'
|
||||
service_password: 'service authentication password'
|
||||
sql: "database user"
|
||||
|
||||
|
||||
...
|
||||
|
@ -3,18 +3,4 @@
|
||||
# Where we download this from...
|
||||
get_from: "git://github.com/openstack/horizon.git?branch=master"
|
||||
|
||||
# This is the group of the user (adjust as needed)
|
||||
apache_group: "$(auto:group)"
|
||||
|
||||
# What user will apache be serving from.
|
||||
#
|
||||
# Root will typically not work (for apache on most distros)
|
||||
# sudo adduser <username> then sudo adduser <username> admin will be what you want to set this up
|
||||
#
|
||||
# It will default to the running user if not provided...
|
||||
apache_user: "$(auto:user)"
|
||||
|
||||
# Port horizon should run on
|
||||
port: 80
|
||||
|
||||
...
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Settings for component no-vnc
|
||||
# Settings for component novnc
|
||||
---
|
||||
|
||||
# Where we download this from...
|
@ -14,4 +14,11 @@ core_plugin: linuxbridge
|
||||
network_vlan_ranges: physnet1:100:299
|
||||
physical_interface_mappings: physnet1:100:299
|
||||
|
||||
patches:
|
||||
# After check-out/download time patches
|
||||
download:
|
||||
# Require kombu>=1.0.4. Original requirement kombu==1.0.4
|
||||
# breaks yum
|
||||
- "conf/patches/quantum/kombu-requirement.patch"
|
||||
|
||||
...
|
||||
|
@ -18,15 +18,17 @@ dependency_handler:
|
||||
pyparsing: pyparsing
|
||||
pysendfile: pysendfile
|
||||
pytz: pytz
|
||||
sqlalchemy-migrate: python-migrate
|
||||
arch_dependent:
|
||||
- selenium
|
||||
commands:
|
||||
service:
|
||||
restart: service NAME restart
|
||||
start: service NAME start
|
||||
status: service NAME status
|
||||
stop: service NAME stop
|
||||
apache:
|
||||
name: httpd
|
||||
restart: service httpd restart
|
||||
start: service httpd start
|
||||
status: service httpd status
|
||||
stop: service httpd stop
|
||||
daemon: httpd
|
||||
libvirt:
|
||||
restart: service libvirtd restart
|
||||
status: service libvirtd status
|
||||
@ -43,9 +45,7 @@ commands:
|
||||
restart: service mysqld restart
|
||||
set_pwd: mysql --user=$USER --password=$OLD_PASSWORD -e
|
||||
"USE mysql; UPDATE user SET password=PASSWORD('$NEW_PASSWORD') WHERE User='$USER'; FLUSH PRIVILEGES;"
|
||||
start: service mysqld start
|
||||
status: service mysqld status
|
||||
stop: service mysqld stop
|
||||
daemon: mysqld
|
||||
# Pip command varies depending on the distro
|
||||
pip: pip-python
|
||||
# Where component symlinks will go, the component name will become a directory
|
||||
@ -62,12 +62,14 @@ components:
|
||||
cinder:
|
||||
action_classes:
|
||||
install: anvil.components.cinder:CinderInstaller
|
||||
running: anvil.components.cinder:CinderRuntime
|
||||
running: anvil.components.base_runtime:OpenStackRuntime
|
||||
test: anvil.components.base_testing:PythonTestingComponent
|
||||
coverage: anvil.components.base_testing:PythonTestingComponent
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
pips:
|
||||
- name: hp3parclient
|
||||
daemon_to_package:
|
||||
all: openstack-cinder
|
||||
cinder-client:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
@ -107,15 +109,16 @@ components:
|
||||
- name: iputils
|
||||
removable: false
|
||||
# Needed to build the newer lxml version used by nova
|
||||
# Build time dependencies
|
||||
- name: libxml2-devel
|
||||
removable: false
|
||||
- name: libxslt-devel
|
||||
removable: false
|
||||
- name: lsof
|
||||
- name: mysql-devel
|
||||
removable: false
|
||||
- name: mlocate
|
||||
- name: postgresql-devel
|
||||
removable: false
|
||||
- name: openssh-server
|
||||
- name: openldap-devel
|
||||
removable: false
|
||||
- name: psmisc
|
||||
removable: false
|
||||
@ -136,6 +139,8 @@ components:
|
||||
removable: false
|
||||
- name: python-setuptools
|
||||
removable: false
|
||||
- name: sqlite-devel
|
||||
removable: false
|
||||
glance:
|
||||
action_classes:
|
||||
install: anvil.components.glance:GlanceInstaller
|
||||
@ -143,14 +148,16 @@ components:
|
||||
coverage: anvil.components.glance:GlanceTester
|
||||
test: anvil.components.glance:GlanceTester
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
packages:
|
||||
- name: MySQL-python
|
||||
pips:
|
||||
# warlock requires jsonschema>=0.7,<2
|
||||
# pip downloads jsonschema-2.0 and
|
||||
# then ignores warlock's requirement
|
||||
- name: jsonschema
|
||||
version: ">=0.7,<2"
|
||||
daemon_to_package:
|
||||
api: openstack-glance
|
||||
registry: openstack-glance
|
||||
scrubber: openstack-glance
|
||||
glance-client:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
@ -160,15 +167,13 @@ components:
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
horizon:
|
||||
action_classes:
|
||||
install: anvil.distros.rhel:HorizonInstaller
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
running: anvil.components.horizon:HorizonRuntime
|
||||
test: anvil.components.base_testing:PythonTestingComponent
|
||||
coverage: anvil.components.base_testing:PythonTestingComponent
|
||||
uninstall: anvil.components.horizon:HorizonUninstaller
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
packages:
|
||||
- name: httpd
|
||||
- name: mod_wsgi
|
||||
- name: nodejs
|
||||
- name: openstack-dashboard
|
||||
django-openstack-auth:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
@ -182,8 +187,8 @@ components:
|
||||
test: anvil.components.keystone:KeystoneTester
|
||||
coverage: anvil.components.keystone:KeystoneTester
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
packages:
|
||||
- name: MySQL-python
|
||||
daemon_to_package:
|
||||
all: openstack-keystone
|
||||
keystone-client:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
@ -193,31 +198,11 @@ components:
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
nova:
|
||||
action_classes:
|
||||
install: anvil.distros.rhel:NovaInstaller
|
||||
install: anvil.components.nova:NovaInstaller
|
||||
running: anvil.components.nova:NovaRuntime
|
||||
test: anvil.components.base_testing:PythonTestingComponent
|
||||
coverage: anvil.components.base_testing:PythonTestingComponent
|
||||
uninstall: anvil.components.nova:NovaUninstaller
|
||||
packages:
|
||||
- name: MySQL-python
|
||||
# Helpful utilities/core
|
||||
# system requirements
|
||||
- name: dnsmasq
|
||||
removable: false
|
||||
- name: ebtables
|
||||
removable: false
|
||||
- name: iptables
|
||||
removable: false
|
||||
- name: iputils
|
||||
removable: false
|
||||
- name: kpartx
|
||||
removable: false
|
||||
- name: parted
|
||||
removable: false
|
||||
- name: sqlite
|
||||
removable: false
|
||||
- name: vconfig
|
||||
removable: false
|
||||
pips:
|
||||
# This seems to be a core dependency for a 'cas' tool
|
||||
# so don't try to remove it since it will also remove
|
||||
@ -225,43 +210,14 @@ components:
|
||||
# installed in rhel uses a old version of crypto which
|
||||
# other components actually can't use. This sucks...
|
||||
- name: paramiko
|
||||
subsystems:
|
||||
compute:
|
||||
packages:
|
||||
- name: avahi
|
||||
removable: false
|
||||
- name: fuse # Needed for mounting
|
||||
removable: false
|
||||
- name: guestfish
|
||||
removable: false
|
||||
- name: iscsi-initiator-utils
|
||||
removable: false
|
||||
- name: libguestfs
|
||||
removable: false
|
||||
- name: libguestfs-mount
|
||||
removable: false
|
||||
- name: libguestfs-tools
|
||||
removable: false
|
||||
- name: libvirt
|
||||
removable: false
|
||||
- name: libvirt-client
|
||||
removable: false
|
||||
- name: libvirt-python
|
||||
removable: false
|
||||
- name: postgresql-devel # for psycopg2
|
||||
removable: false
|
||||
- name: qemu-img
|
||||
removable: false
|
||||
- name: qemu-kvm
|
||||
removable: false
|
||||
volume:
|
||||
packages:
|
||||
- name: iscsi-initiator-utils
|
||||
removable: false
|
||||
- name: lvm2
|
||||
removable: false
|
||||
- name: scsi-target-utils
|
||||
removable: false
|
||||
daemon_to_package:
|
||||
api-metadata: openstack-nova-api
|
||||
api-ec2: openstack-nova-api
|
||||
api-os-compute: openstack-nova-api
|
||||
dhcpbridge: openstack-nova-network
|
||||
xvpvncproxy: openstack-nova-console
|
||||
spicehtml5proxy: openstack-nova-console
|
||||
consoleauth: openstack-nova-console
|
||||
nova-client:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
@ -269,16 +225,13 @@ components:
|
||||
test: anvil.components.base_testing:PythonTestingComponent
|
||||
coverage: anvil.components.base_testing:PythonTestingComponent
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
no-vnc:
|
||||
novnc:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
running: anvil.components.novnc:NoVNCRuntime
|
||||
running: anvil.components.base_runtime:EmptyRuntime
|
||||
test: anvil.components.base_testing:EmptyTestingComponent
|
||||
coverage: anvil.components.base_testing:EmptyTestingComponent
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
packages:
|
||||
- name: python-websockify
|
||||
- name: numpy
|
||||
openstack-client:
|
||||
action_classes:
|
||||
install: anvil.components.openstack_client:OpenStackClientInstaller
|
||||
@ -306,7 +259,15 @@ components:
|
||||
running: anvil.components.quantum:QuantumRuntime
|
||||
test: anvil.components.base_testing:PythonTestingComponent
|
||||
coverage: anvil.components.base_testing:PythonTestingComponent
|
||||
uninstall: anvil.components.base_install:PkgUninstallComponent
|
||||
uninstall: anvil.components.quantum:QuantumUninstaller
|
||||
daemon_to_package:
|
||||
linuxbridge-agent: openstack-quantum-linuxbridge
|
||||
openvswitch-agent: openstack-quantum-openvswitch
|
||||
ovs-cleanup: openstack-quantum-openvswitch
|
||||
dhcp-agent: openstack-quantum
|
||||
l3-agent: openstack-quantum
|
||||
rpc-zmq-receiver: openstack-quantum
|
||||
server: openstack-quantum
|
||||
quantum-client:
|
||||
action_classes:
|
||||
install: anvil.components.base_install:PythonInstallComponent
|
||||
|
13
conf/patches/quantum/kombu-requirement.patch
Normal file
13
conf/patches/quantum/kombu-requirement.patch
Normal file
@ -0,0 +1,13 @@
|
||||
diff --git a/tools/pip-requires b/tools/pip-requires
|
||||
index 7ed4a43..bd47095 100644
|
||||
--- a/tools/pip-requires
|
||||
+++ b/tools/pip-requires
|
||||
@@ -8,7 +8,7 @@ eventlet>=0.9.17
|
||||
greenlet>=0.3.1
|
||||
httplib2
|
||||
iso8601>=0.1.4
|
||||
-kombu==1.0.4
|
||||
+kombu>=1.0.4
|
||||
netaddr
|
||||
python-quantumclient>=2.2.0,<3.0.0
|
||||
pyudev
|
@ -45,22 +45,19 @@ subsystems:
|
||||
keystone:
|
||||
- all
|
||||
nova:
|
||||
- api-ec2
|
||||
- api-metadata
|
||||
- api-os-compute
|
||||
- api
|
||||
- cert
|
||||
- compute
|
||||
- consoleauth
|
||||
- dhcpbridge
|
||||
- network
|
||||
- novncproxy
|
||||
- scheduler
|
||||
- xvpvncproxy
|
||||
quantum:
|
||||
- server
|
||||
- linuxbridge-agent
|
||||
cinder:
|
||||
- all
|
||||
supports:
|
||||
- rhel
|
||||
...
|
||||
|
||||
|
@ -16,13 +16,13 @@ components:
|
||||
- swift-client # Seems only needed for horizon?
|
||||
- quantum
|
||||
- cinder
|
||||
- no-vnc
|
||||
- novnc
|
||||
- nova
|
||||
- nova-client
|
||||
- django-openstack-auth
|
||||
- horizon
|
||||
options:
|
||||
no-vnc:
|
||||
novnc:
|
||||
# This is the nova component name (we need this to hook into the nova conf...)
|
||||
nova-component: nova
|
||||
nova:
|
||||
@ -54,23 +54,20 @@ subsystems:
|
||||
keystone:
|
||||
- all
|
||||
nova:
|
||||
- api-ec2
|
||||
- api-metadata
|
||||
- api-os-compute
|
||||
- api
|
||||
- cert
|
||||
- compute
|
||||
- conductor
|
||||
- consoleauth
|
||||
- dhcpbridge
|
||||
- network
|
||||
- novncproxy
|
||||
- scheduler
|
||||
- xvpvncproxy
|
||||
quantum:
|
||||
- server
|
||||
- agent
|
||||
cinder:
|
||||
- all
|
||||
supports:
|
||||
- rhel
|
||||
...
|
||||
|
||||
|
@ -47,7 +47,6 @@ subsystems:
|
||||
- compute
|
||||
- conductor
|
||||
- consoleauth
|
||||
- dhcpbridge
|
||||
- network
|
||||
- novncproxy
|
||||
- scheduler
|
||||
@ -57,4 +56,3 @@ subsystems:
|
||||
supports:
|
||||
- rhel
|
||||
...
|
||||
|
||||
|
@ -1,36 +0,0 @@
|
||||
#*
|
||||
This is a cheetah template!
|
||||
*#
|
||||
<VirtualHost *:$HORIZON_PORT>
|
||||
WSGIScriptAlias / ${HORIZON_DIR}/openstack_dashboard/wsgi/django.wsgi
|
||||
WSGIDaemonProcess horizon user=$USER group=$GROUP processes=3 threads=10 home=$HORIZON_DIR
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
|
||||
SetEnv APACHE_RUN_USER $USER
|
||||
SetEnv APACHE_RUN_GROUP $GROUP
|
||||
WSGIProcessGroup %{GLOBAL}
|
||||
|
||||
#if $BLACK_HOLE_DIR
|
||||
DocumentRoot $BLACK_HOLE_DIR
|
||||
#end if
|
||||
Alias /media $HORIZON_DIR/openstack_dashboard/static
|
||||
|
||||
<Directory />
|
||||
Options FollowSymLinks
|
||||
AllowOverride None
|
||||
</Directory>
|
||||
|
||||
<Directory ${HORIZON_DIR}>
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Order allow,deny
|
||||
allow from all
|
||||
</Directory>
|
||||
|
||||
ErrorLog ${ERROR_LOG}
|
||||
CustomLog ${ACCESS_LOG} combined
|
||||
LogLevel warn
|
||||
</VirtualHost>
|
||||
|
||||
WSGISocketPrefix /var/run/$APACHE_NAME
|
||||
|
@ -1,115 +0,0 @@
|
||||
#*
|
||||
This is a cheetah template!
|
||||
*#
|
||||
|
||||
# These settings are good for dev-like environments (or ci)
|
||||
# When moving to production it is likely some more thought should
|
||||
# be given here...
|
||||
#
|
||||
# See: https://docs.djangoproject.com/en/dev/ref/settings/
|
||||
#
|
||||
# This file overrides other defaults in openstack_dashboard/settings.py
|
||||
|
||||
import os
|
||||
|
||||
DEBUG = True
|
||||
TEMPLATE_DEBUG = DEBUG
|
||||
PROD = False
|
||||
USE_SSL = False
|
||||
|
||||
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# See: https://docs.djangoproject.com/en/dev/topics/cache/?from=olddocs
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
|
||||
}
|
||||
}
|
||||
|
||||
# Use session cookies for any user-specific horizon session data
|
||||
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
|
||||
|
||||
# Send email to the console by default
|
||||
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
||||
|
||||
# See: https://code.google.com/p/django-mailer/
|
||||
#
|
||||
# django-mailer uses a different settings attribute
|
||||
MAILER_EMAIL_BACKEND = EMAIL_BACKEND
|
||||
|
||||
# Set a secure and unique SECRET_KEY (the Django default is '')
|
||||
#
|
||||
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY
|
||||
SECRET_KEY = "${SECRET_KEY}"
|
||||
|
||||
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
|
||||
# capabilities of the auth backend for Keystone.
|
||||
# If Keystone has been configured to use LDAP as the auth backend then set
|
||||
# can_edit_user to False and name to 'ldap'.
|
||||
#
|
||||
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
|
||||
OPENSTACK_KEYSTONE_BACKEND = {
|
||||
'name': 'native',
|
||||
'can_edit_user': True
|
||||
}
|
||||
|
||||
OPENSTACK_HOST = "${OPENSTACK_HOST}"
|
||||
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
|
||||
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
|
||||
|
||||
# The timezone of the server. This should correspond with the timezone
|
||||
# of your entire OpenStack installation, and hopefully be in UTC.
|
||||
TIME_ZONE = "UTC"
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
# When set to True this will disable all logging except
|
||||
# for loggers specified in this configuration dictionary. Note that
|
||||
# if nothing is specified here and disable_existing_loggers is True,
|
||||
# django.db.backends will still log unless it is disabled explicitly.
|
||||
'disable_existing_loggers': False,
|
||||
'handlers': {
|
||||
'null': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'django.utils.log.NullHandler',
|
||||
},
|
||||
'console': {
|
||||
# Set the level to "DEBUG" for verbose output logging.
|
||||
'level': 'INFO',
|
||||
'class': 'logging.StreamHandler',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
# Logging from django.db.backends is VERY verbose, send to null
|
||||
# by default.
|
||||
'django.db.backends': {
|
||||
'handlers': ['null'],
|
||||
'propagate': False,
|
||||
},
|
||||
'horizon': {
|
||||
'handlers': ['console'],
|
||||
'propagate': False,
|
||||
},
|
||||
'openstack_dashboard': {
|
||||
'handlers': ['console'],
|
||||
'propagate': False,
|
||||
},
|
||||
'novaclient': {
|
||||
'handlers': ['console'],
|
||||
'propagate': False,
|
||||
},
|
||||
'keystoneclient': {
|
||||
'handlers': ['console'],
|
||||
'propagate': False,
|
||||
},
|
||||
'glanceclient': {
|
||||
'handlers': ['console'],
|
||||
'propagate': False,
|
||||
},
|
||||
'nose.plugins.manager': {
|
||||
'handlers': ['console'],
|
||||
'propagate': False,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -1,10 +0,0 @@
|
||||
[anvil]
|
||||
name=anvil
|
||||
baseurl=$baseurl_bin
|
||||
gpgcheck=0
|
||||
|
||||
[anvil-src]
|
||||
name=anvil
|
||||
baseurl=$baseurl_src
|
||||
gpgcheck=0
|
||||
enabled=0
|
97
conf/templates/packaging/common.init
Normal file
97
conf/templates/packaging/common.init
Normal file
@ -0,0 +1,97 @@
|
||||
#!/bin/sh
|
||||
#*
|
||||
bin - as it is in /usr/bin (nova-api, keystone-all)
|
||||
package - directory in /etc (nova, keystone)
|
||||
*#
|
||||
#
|
||||
# $bin
|
||||
#
|
||||
# chkconfig: - 98 02
|
||||
# description: $bin server
|
||||
### END INIT INFO
|
||||
|
||||
. /etc/rc.d/init.d/functions
|
||||
|
||||
prog=openstack-$bin
|
||||
exec="/usr/bin/$bin"
|
||||
pidfile="/var/run/$package/\$prog.pid"
|
||||
daemon_user=$package
|
||||
|
||||
#raw
|
||||
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
|
||||
|
||||
lockfile=/var/lock/subsys/$prog
|
||||
|
||||
start() {
|
||||
[ -x $exec ] || exit 5
|
||||
echo -n $"Starting $prog: "
|
||||
daemon --user $daemon_user --pidfile $pidfile "$exec &>/dev/null & echo \$! > $pidfile"
|
||||
retval=$?
|
||||
echo
|
||||
[ $retval -eq 0 ] && touch $lockfile
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping $prog: "
|
||||
killproc -p $pidfile $prog
|
||||
retval=$?
|
||||
echo
|
||||
[ $retval -eq 0 ] && rm -f $lockfile
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
status -p $pidfile $prog
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
exit $?
|
||||
#end raw
|
10
conf/templates/packaging/common.repo
Normal file
10
conf/templates/packaging/common.repo
Normal file
@ -0,0 +1,10 @@
|
||||
[${repo_name}]
|
||||
name=${repo_name}
|
||||
baseurl=$baseurl_bin
|
||||
gpgcheck=0
|
||||
|
||||
[${repo_name}-source]
|
||||
name=${repo_name} - source
|
||||
baseurl=$baseurl_src
|
||||
gpgcheck=0
|
||||
enabled=0
|
3
conf/templates/packaging/sources/cinder/cinder-sudoers
Normal file
3
conf/templates/packaging/sources/cinder/cinder-sudoers
Normal file
@ -0,0 +1,3 @@
|
||||
Defaults:cinder !requiretty
|
||||
|
||||
cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *
|
1
conf/templates/packaging/sources/cinder/cinder-tgt.conf
Normal file
1
conf/templates/packaging/sources/cinder/cinder-tgt.conf
Normal file
@ -0,0 +1 @@
|
||||
include /etc/cinder/volumes/
|
9
conf/templates/packaging/sources/cinder/cinder.logrotate
Normal file
9
conf/templates/packaging/sources/cinder/cinder.logrotate
Normal file
@ -0,0 +1,9 @@
|
||||
compress
|
||||
|
||||
/var/log/cinder/*.log {
|
||||
weekly
|
||||
rotate 4
|
||||
missingok
|
||||
compress
|
||||
minsize 100k
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
/var/log/glance/*.log {
|
||||
weekly
|
||||
rotate 4
|
||||
missingok
|
||||
compress
|
||||
minsize 100k
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
WSGIDaemonProcess dashboard
|
||||
WSGIProcessGroup dashboard
|
||||
WSGISocketPrefix run/wsgi
|
||||
|
||||
WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
|
||||
Alias /static /usr/share/openstack-dashboard/static
|
||||
|
||||
<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
|
||||
Options All
|
||||
AllowOverride All
|
||||
Require all granted
|
||||
</Directory>
|
||||
|
||||
<Directory /usr/share/openstack-dashboard/static>
|
||||
Options All
|
||||
AllowOverride All
|
||||
Require all granted
|
||||
</Directory>
|
||||
|
@ -0,0 +1,11 @@
|
||||
WSGIDaemonProcess dashboard
|
||||
WSGIProcessGroup dashboard
|
||||
WSGISocketPrefix run/wsgi
|
||||
|
||||
WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
|
||||
Alias /static /usr/share/openstack-dashboard/static
|
||||
|
||||
<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Directory>
|
15
conf/templates/packaging/sources/nova/nova-ifc-template
Normal file
15
conf/templates/packaging/sources/nova/nova-ifc-template
Normal file
@ -0,0 +1,15 @@
|
||||
DEVICE="$name"
|
||||
NM_CONTROLLED="no"
|
||||
ONBOOT=yes
|
||||
TYPE=Ethernet
|
||||
BOOTPROTO=static
|
||||
IPADDR=$address
|
||||
NETMASK=$netmask
|
||||
BROADCAST=$broadcast
|
||||
GATEWAY=$gateway
|
||||
DNS1=$dns
|
||||
|
||||
#if $use_ipv6
|
||||
IPV6INIT=yes
|
||||
IPV6ADDR=$address_v6
|
||||
#end if
|
6
conf/templates/packaging/sources/nova/nova-polkit.pkla
Normal file
6
conf/templates/packaging/sources/nova/nova-polkit.pkla
Normal file
@ -0,0 +1,6 @@
|
||||
[Allow nova libvirt management permissions]
|
||||
Identity=unix-user:nova
|
||||
Action=org.libvirt.unix.manage
|
||||
ResultAny=yes
|
||||
ResultInactive=yes
|
||||
ResultActive=yes
|
3
conf/templates/packaging/sources/nova/nova-sudoers
Normal file
3
conf/templates/packaging/sources/nova/nova-sudoers
Normal file
@ -0,0 +1,3 @@
|
||||
Defaults:nova !requiretty
|
||||
|
||||
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
|
9
conf/templates/packaging/sources/nova/nova.logrotate
Normal file
9
conf/templates/packaging/sources/nova/nova.logrotate
Normal file
@ -0,0 +1,9 @@
|
||||
compress
|
||||
|
||||
/var/log/nova/*.log {
|
||||
weekly
|
||||
rotate 4
|
||||
missingok
|
||||
compress
|
||||
minsize 100k
|
||||
}
|
35
conf/templates/packaging/sources/novnc/nova-novncproxy.1
Normal file
35
conf/templates/packaging/sources/novnc/nova-novncproxy.1
Normal file
@ -0,0 +1,35 @@
|
||||
.TH nova-novncproxy 1 "June 8, 2012" "version 0.3" "USER COMMANDS"
|
||||
|
||||
.SH NAME
|
||||
nova-novncproxy | noVNC proxy for Openstack Nova
|
||||
.SH SYNOPSIS
|
||||
.B nova-novncproxy [options]
|
||||
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
record : Record sessions to FILE.[session_number]
|
||||
.TP
|
||||
daemon : Become a daemon (background process)
|
||||
.TP
|
||||
ssl_only : Disallow non-encrypted connections'),
|
||||
.TP
|
||||
source_is_ipv6 : Source is ipv6
|
||||
.TP
|
||||
cert : SSL certificate file
|
||||
.TP
|
||||
key : SSL key file (if separate from cert)
|
||||
.TP
|
||||
web : Run webserver on same port. Serve files from DIR.
|
||||
.TP
|
||||
novncproxy_host : Host on which to listen for incoming requests.
|
||||
defaults to ='0.0.0.0'
|
||||
.TP
|
||||
novncproxy_port: Port on which to listen for incoming requests
|
||||
defaults to 6080
|
||||
|
||||
|
||||
.SH AUTHOR
|
||||
Joel Martin (github@martintribe.org)
|
||||
|
||||
.SH SEE ALSO
|
||||
websockify(1)
|
22
conf/templates/packaging/sources/novnc/novnc_server.1
Normal file
22
conf/templates/packaging/sources/novnc/novnc_server.1
Normal file
@ -0,0 +1,22 @@
|
||||
.TH novnc_server 1 "June 8, 2012" "version 0.3" "USER COMMANDS"
|
||||
|
||||
.SH NAME
|
||||
novnc_server | noVNC proxy server
|
||||
.SH SYNOPSIS
|
||||
.B novnc_server [--listen PORT] [--vnc VNC_HOST:PORT] [--cert CERT]
|
||||
|
||||
Starts the WebSockets proxy and a mini-webserver and
|
||||
provides a cut-and-paste URL to go to.
|
||||
|
||||
--listen PORT Port for proxy/webserver to listen on
|
||||
Default: 6080
|
||||
--vnc VNC_HOST:PORT VNC server host:port proxy target
|
||||
Default: localhost:5900
|
||||
--cert CERT Path to combined cert/key file
|
||||
Default: self.pem
|
||||
|
||||
.SH AUTHOR
|
||||
Joel Martin (github@martintribe.org)
|
||||
|
||||
.SH SEE ALSO
|
||||
websockify(1)
|
@ -0,0 +1,102 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# openstack-nova-novncproxy OpenStack Nova Console noVNC Proxy
|
||||
#
|
||||
# chkconfig: - 98 02
|
||||
# description: OpenStack Nova Console noVCN Proxy Server
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides:
|
||||
# Required-Start: $remote_fs $network $syslog
|
||||
# Required-Stop: $remote_fs $syslog
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: OpenStack Nova Console noVNC Proxy
|
||||
# Description: OpenStack Nova Console noVNC Proxy Server
|
||||
### END INIT INFO
|
||||
|
||||
. /etc/rc.d/init.d/functions
|
||||
|
||||
suffix=novncproxy
|
||||
prog=openstack-nova-$suffix
|
||||
exec="/usr/bin/nova-$suffix"
|
||||
config="/etc/nova/nova.conf"
|
||||
pidfile="/var/run/nova/nova-$suffix.pid"
|
||||
logfile="/var/log/nova/$suffix.log"
|
||||
|
||||
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
|
||||
|
||||
lockfile=/var/lock/subsys/$prog
|
||||
|
||||
start() {
|
||||
[ -x $exec ] || exit 5
|
||||
[ -f $config ] || exit 6
|
||||
echo -n $"Starting $prog: "
|
||||
daemon --user nova --pidfile $pidfile "$exec --web /usr/share/novnc/ &>/dev/null & echo \$! > $pidfile"
|
||||
retval=$?
|
||||
echo
|
||||
[ $retval -eq 0 ] && touch $lockfile
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping $prog: "
|
||||
killproc -p $pidfile $prog
|
||||
retval=$?
|
||||
echo
|
||||
[ $retval -eq 0 ] && rm -f $lockfile
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
status -p $pidfile $prog
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
exit $?
|
3
conf/templates/packaging/sources/quantum/quantum-sudoers
Normal file
3
conf/templates/packaging/sources/quantum/quantum-sudoers
Normal file
@ -0,0 +1,3 @@
|
||||
Defaults:quantum !requiretty
|
||||
|
||||
quantum ALL = (root) NOPASSWD: SETENV: /usr/bin/quantum-rootwrap
|
@ -0,0 +1,9 @@
|
||||
compress
|
||||
|
||||
/var/log/quantum/*.log {
|
||||
weekly
|
||||
rotate 4
|
||||
missingok
|
||||
compress
|
||||
minsize 100k
|
||||
}
|
97
conf/templates/packaging/specs/novnc.spec
Normal file
97
conf/templates/packaging/specs/novnc.spec
Normal file
@ -0,0 +1,97 @@
|
||||
Name: novnc
|
||||
Summary: VNC client using HTML5 (Web Sockets, Canvas) with encryption support
|
||||
Epoch: $epoch
|
||||
Version: 0.4
|
||||
Release: 1%{?dist}
|
||||
|
||||
#raw
|
||||
License: GPLv3
|
||||
URL: https://github.com/kanaka/noVNC
|
||||
Source0: https://github.com/downloads/kanaka/noVNC/novnc-%{version}.tar.gz
|
||||
Source1: openstack-nova-novncproxy.init
|
||||
Source2: nova-novncproxy.1
|
||||
Source3: novnc_server.1
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: python2-devel
|
||||
|
||||
Requires: python-websockify
|
||||
|
||||
%description
|
||||
Websocket implementation of VNC client
|
||||
|
||||
|
||||
%package -n openstack-nova-novncproxy
|
||||
Summary: Proxy server for noVNC traffic over Websockets
|
||||
Requires: novnc
|
||||
Requires: openstack-nova
|
||||
Requires: python-websockify
|
||||
|
||||
Requires(post): chkconfig
|
||||
Requires(preun): chkconfig
|
||||
Requires(preun): initscripts
|
||||
|
||||
%description -n openstack-nova-novncproxy
|
||||
OpenStack Nova noVNC server that proxies VNC traffic over Websockets.
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
|
||||
# call the websockify executable
|
||||
sed -i 's/wsproxy\.py/websockify/' utils/launch.sh
|
||||
# import websockify
|
||||
sed -i 's/import wsproxy/import wsproxy as websockify/' utils/nova-novncproxy
|
||||
install %{SOURCE2} %{SOURCE3} docs/
|
||||
|
||||
%build
|
||||
|
||||
|
||||
%install
|
||||
mkdir -p %{buildroot}/%{_usr}/share/novnc/utils
|
||||
install -m 444 *html %{buildroot}/%{_usr}/share/novnc
|
||||
#provide an index file to prevent default directory browsing
|
||||
install -m 444 vnc.html %{buildroot}/%{_usr}/share/novnc/index.html
|
||||
|
||||
mkdir -p %{buildroot}/%{_usr}/share/novnc/include/
|
||||
install -m 444 include/*.* %{buildroot}/%{_usr}/share/novnc/include
|
||||
mkdir -p %{buildroot}/%{_usr}/share/novnc/images
|
||||
install -m 444 images/*.* %{buildroot}/%{_usr}/share/novnc/images
|
||||
|
||||
mkdir -p %{buildroot}/%{_bindir}
|
||||
install utils/launch.sh %{buildroot}/%{_bindir}/novnc_server
|
||||
|
||||
install utils/nova-novncproxy %{buildroot}/%{_bindir}
|
||||
|
||||
mkdir -p %{buildroot}%{_mandir}/man1/
|
||||
install -m 444 docs/novnc_server.1 %{buildroot}%{_mandir}/man1/
|
||||
|
||||
mkdir -p %{buildroot}%{_initddir}
|
||||
install -p -D -m 755 %{SOURCE1} %{buildroot}%{_initrddir}/openstack-nova-novncproxy
|
||||
|
||||
|
||||
%preun -n openstack-nova-novncproxy
|
||||
if [ $1 -eq 0 ] ; then
|
||||
/sbin/service openstack-nova-novncproxy stop >/dev/null 2>&1
|
||||
/sbin/chkconfig --del openstack-nova-novncproxy
|
||||
fi
|
||||
|
||||
|
||||
%files
|
||||
%doc README.md LICENSE.txt
|
||||
|
||||
%dir %{_usr}/share/novnc
|
||||
%{_usr}/share/novnc/*.*
|
||||
%dir %{_usr}/share/novnc/include
|
||||
%{_usr}/share/novnc/include/*
|
||||
%dir %{_usr}/share/novnc/images
|
||||
%{_usr}/share/novnc/images/*
|
||||
%{_bindir}/novnc_server
|
||||
%{_mandir}/man1/novnc_server.1*
|
||||
|
||||
|
||||
%files -n openstack-nova-novncproxy
|
||||
%{_bindir}/nova-novncproxy
|
||||
%{_initrddir}/openstack-nova-novncproxy
|
||||
|
||||
%changelog
|
||||
#endraw
|
244
conf/templates/packaging/specs/openstack-cinder.spec
Normal file
244
conf/templates/packaging/specs/openstack-cinder.spec
Normal file
@ -0,0 +1,244 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Eric Harney <eharney@redhat.com>
|
||||
# * Martin Magr <mmagr@redhat.com>
|
||||
# * Pádraig Brady <P@draigBrady.com>
|
||||
|
||||
%global python_name cinder
|
||||
%global daemon_prefix openstack-cinder
|
||||
|
||||
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 6)
|
||||
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
|
||||
%endif
|
||||
|
||||
Name: openstack-cinder
|
||||
Version: $version
|
||||
Release: 1%{?dist}
|
||||
Epoch: $epoch
|
||||
Summary: OpenStack Volume service
|
||||
|
||||
Group: Applications/System
|
||||
License: ASL 2.0
|
||||
URL: http://www.openstack.org/software/openstack-storage/
|
||||
Source0: %{python_name}-%{version}.tar.gz
|
||||
Source1: cinder-sudoers
|
||||
Source2: cinder.logrotate
|
||||
Source3: cinder-tgt.conf
|
||||
|
||||
Source10: openstack-cinder-api.init
|
||||
Source11: openstack-cinder-scheduler.init
|
||||
Source12: openstack-cinder-volume.init
|
||||
Source13: openstack-cinder-all.init
|
||||
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: python-setuptools
|
||||
|
||||
Requires: python-cinder = %{epoch}:%{version}-%{release}
|
||||
|
||||
# as convenience
|
||||
Requires: python-cinderclient
|
||||
|
||||
Requires(post): chkconfig
|
||||
Requires(preun): chkconfig
|
||||
Requires(postun): chkconfig
|
||||
Requires(pre): shadow-utils
|
||||
|
||||
Requires: lvm2
|
||||
Requires: scsi-target-utils
|
||||
|
||||
%description
|
||||
OpenStack Volume (codename Cinder) provides services to manage and
|
||||
access block storage volumes for use by Virtual Machine instances.
|
||||
|
||||
|
||||
%package -n python-cinder
|
||||
Summary: OpenStack Volume Python libraries
|
||||
Group: Applications/System
|
||||
|
||||
Requires: sudo
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
|
||||
%description -n python-cinder
|
||||
OpenStack Volume (codename Cinder) provides services to manage and
|
||||
access block storage volumes for use by Virtual Machine instances.
|
||||
|
||||
This package contains the cinder Python library.
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%package doc
|
||||
Summary: Documentation for OpenStack Volume
|
||||
Group: Documentation
|
||||
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
|
||||
BuildRequires: graphviz
|
||||
BuildRequires: python-sphinx
|
||||
|
||||
# Required to build module documents
|
||||
BuildRequires: python-eventlet
|
||||
BuildRequires: python-routes
|
||||
BuildRequires: python-sqlalchemy
|
||||
BuildRequires: python-webob
|
||||
# while not strictly required, quiets the build down when building docs.
|
||||
BuildRequires: python-migrate, python-iso8601
|
||||
|
||||
%description doc
|
||||
OpenStack Volume (codename Cinder) provides services to manage and
|
||||
access block storage volumes for use by Virtual Machine instances.
|
||||
|
||||
This package contains documentation files for cinder.
|
||||
%endif
|
||||
|
||||
#raw
|
||||
%prep
|
||||
%setup -q -n cinder-%{version}
|
||||
|
||||
# Ensure we don't access the net when building docs
|
||||
sed -i "/'sphinx.ext.intersphinx',/d" doc/source/conf.py
|
||||
# Remove deprecated assert_unicode sqlalchemy attribute
|
||||
sed -i "/assert_unicode=None/d" cinder/db/sqlalchemy/migrate_repo/versions/*py
|
||||
|
||||
find . \( -name .gitignore -o -name .placeholder \) -delete
|
||||
|
||||
find cinder -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} +
|
||||
|
||||
# TODO: Have the following handle multi line entries
|
||||
sed -i '/setup_requires/d; /install_requires/d; /dependency_links/d' setup.py
|
||||
|
||||
%build
|
||||
|
||||
%{__python} setup.py build
|
||||
|
||||
%install
|
||||
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
|
||||
# docs generation requires everything to be installed first
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
|
||||
%if 0%{?with_doc}
|
||||
pushd doc
|
||||
|
||||
SPHINX_DEBUG=1 sphinx-build -b html source build/html
|
||||
# Fix hidden-file-or-dir warnings
|
||||
rm -fr build/html/.doctrees build/html/.buildinfo
|
||||
|
||||
# Create dir link to avoid a sphinx-build exception
|
||||
mkdir -p build/man/.doctrees/
|
||||
ln -s . build/man/.doctrees/man
|
||||
SPHINX_DEBUG=1 sphinx-build -b man -c source source/man build/man
|
||||
mkdir -p %{buildroot}%{_mandir}/man1
|
||||
install -p -D -m 644 build/man/*.1 %{buildroot}%{_mandir}/man1/
|
||||
|
||||
popd
|
||||
%endif
|
||||
|
||||
# Setup directories
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/cinder
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/cinder/tmp
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/log/cinder
|
||||
|
||||
# Install config files
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/cinder
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/cinder/volumes
|
||||
install -p -D -m 644 %{SOURCE3} %{buildroot}%{_sysconfdir}/tgt/conf.d/cinder.conf
|
||||
install -p -D -m 640 etc/cinder/cinder.conf.sample %{buildroot}%{_sysconfdir}/cinder/
|
||||
install -p -D -m 640 etc/cinder/rootwrap.conf %{buildroot}%{_sysconfdir}/cinder/
|
||||
install -p -D -m 640 etc/cinder/api-paste.ini %{buildroot}%{_sysconfdir}/cinder/
|
||||
install -p -D -m 640 etc/cinder/policy.json %{buildroot}%{_sysconfdir}/cinder/
|
||||
|
||||
# Install initscripts for services
|
||||
install -p -D -m 755 %{SOURCE10} %{buildroot}%{_initrddir}/%{daemon_prefix}-api
|
||||
install -p -D -m 755 %{SOURCE11} %{buildroot}%{_initrddir}/%{daemon_prefix}-scheduler
|
||||
install -p -D -m 755 %{SOURCE12} %{buildroot}%{_initrddir}/%{daemon_prefix}-volume
|
||||
install -p -D -m 755 %{SOURCE13} %{buildroot}%{_initrddir}/%{daemon_prefix}-all
|
||||
|
||||
# Install sudoers
|
||||
install -p -D -m 440 %{SOURCE1} %{buildroot}%{_sysconfdir}/sudoers.d/cinder
|
||||
|
||||
# Install logrotate
|
||||
install -p -D -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/logrotate.d/openstack-cinder
|
||||
|
||||
# Install pid directory
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/run/cinder
|
||||
|
||||
# Install rootwrap files in /usr/share/cinder/rootwrap
|
||||
mkdir -p %{buildroot}%{_datarootdir}/cinder/rootwrap/
|
||||
install -p -D -m 644 etc/cinder/rootwrap.d/* %{buildroot}%{_datarootdir}/cinder/rootwrap/
|
||||
|
||||
# Remove unneeded in production stuff
|
||||
rm -f %{buildroot}%{_bindir}/cinder-debug
|
||||
rm -fr %{buildroot}%{python_sitelib}/cinder/tests/
|
||||
rm -fr %{buildroot}%{python_sitelib}/run_tests.*
|
||||
rm -f %{buildroot}/usr/share/doc/cinder/README*
|
||||
|
||||
|
||||
%pre
|
||||
getent group cinder >/dev/null || groupadd -r cinder
|
||||
getent passwd cinder >/dev/null || \
|
||||
useradd -r -g cinder -d %{_sharedstatedir}/cinder -s /sbin/nologin \
|
||||
-c "OpenStack Cinder Daemons" cinder
|
||||
exit 0
|
||||
|
||||
|
||||
%preun
|
||||
if [ $1 -eq 0 ] ; then
|
||||
for svc in all volume api scheduler; do
|
||||
/sbin/chkconfig --del %{daemon_prefix}-${svc} &>/dev/null
|
||||
/sbin/service %{daemon_prefix}-${svc} stop &>/dev/null
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
%postun
|
||||
if [ $1 -ge 1 ] ; then
|
||||
# Package upgrade, not uninstall
|
||||
for svc in all volume api scheduler; do
|
||||
/sbin/service %{daemon_prefix}-${svc} condrestart &>/dev/null
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
%files
|
||||
%doc LICENSE
|
||||
|
||||
%dir %{_sysconfdir}/cinder
|
||||
%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/cinder.conf.sample
|
||||
%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/api-paste.ini
|
||||
%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/rootwrap.conf
|
||||
%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/policy.json
|
||||
%config(noreplace) %{_sysconfdir}/logrotate.d/openstack-cinder
|
||||
%config(noreplace) %{_sysconfdir}/sudoers.d/cinder
|
||||
%config(noreplace) %{_sysconfdir}/tgt/conf.d/cinder.conf
|
||||
|
||||
%dir %attr(0755, cinder, root) %{_localstatedir}/log/cinder
|
||||
%dir %attr(0755, cinder, root) %{_localstatedir}/run/cinder
|
||||
%dir %attr(0755, cinder, root) %{_sysconfdir}/cinder/volumes
|
||||
|
||||
%{_bindir}/cinder-*
|
||||
%{_initrddir}/*
|
||||
%{_datarootdir}/cinder
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%{_mandir}/man1/*
|
||||
%endif
|
||||
|
||||
%defattr(-, cinder, cinder, -)
|
||||
%dir %{_sharedstatedir}/cinder
|
||||
%dir %{_sharedstatedir}/cinder/tmp
|
||||
|
||||
%files -n python-cinder
|
||||
%doc LICENSE
|
||||
%{python_sitelib}/cinder
|
||||
%{python_sitelib}/cinder-%{version}*.egg-info
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%files doc
|
||||
%doc doc/build/html
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
#end raw
|
192
conf/templates/packaging/specs/openstack-glance.spec
Normal file
192
conf/templates/packaging/specs/openstack-glance.spec
Normal file
@ -0,0 +1,192 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Andrey Brindeyev <abrindeyev@griddynamics.com>
|
||||
# * Alessio Ababilov <aababilov@griddynamics.com>
|
||||
|
||||
%global python_name glance
|
||||
%global daemon_prefix openstack-glance
|
||||
|
||||
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 6)
|
||||
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
|
||||
%endif
|
||||
|
||||
Name: openstack-glance
|
||||
Epoch: $epoch
|
||||
Version: $version
|
||||
Release: 1%{?dist}
|
||||
Summary: OpenStack Image Registry and Delivery Service
|
||||
|
||||
Group: Development/Languages
|
||||
License: ASL 2.0
|
||||
Vendor: OpenStack Foundation
|
||||
URL: http://glance.openstack.org
|
||||
Source0: %{python_name}-%{version}.tar.gz
|
||||
Source1: openstack-glance-api.init
|
||||
Source2: openstack-glance-registry.init
|
||||
Source3: openstack-glance-scrubber.init
|
||||
Source4: openstack-glance.logrotate
|
||||
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: python-devel
|
||||
BuildRequires: python-setuptools
|
||||
|
||||
Requires(post): chkconfig
|
||||
Requires(postun): initscripts
|
||||
Requires(preun): chkconfig
|
||||
Requires(pre): shadow-utils
|
||||
Requires: python-glance = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description
|
||||
OpenStack Image Service (code-named Glance) provides discovery, registration,
|
||||
and delivery services for virtual disk images. The Image Service API server
|
||||
provides a standard REST interface for querying information about virtual disk
|
||||
images stored in a variety of back-end stores, including OpenStack Object
|
||||
Storage. Clients can register new virtual disk images with the Image Service,
|
||||
query for information on publicly available disk images, and use the Image
|
||||
Service client library for streaming virtual disk images.
|
||||
|
||||
This package contains the API and registry servers.
|
||||
|
||||
%package -n python-glance
|
||||
Summary: Glance Python libraries
|
||||
Group: Applications/System
|
||||
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
#raw
|
||||
%description -n python-glance
|
||||
OpenStack Image Service (code-named Glance) provides discovery, registration,
|
||||
and delivery services for virtual disk images.
|
||||
|
||||
This package contains the glance Python library.
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%package doc
|
||||
Summary: Documentation for OpenStack Glance
|
||||
Group: Documentation
|
||||
|
||||
BuildRequires: python-sphinx
|
||||
BuildRequires: graphviz
|
||||
|
||||
# Required to build module documents
|
||||
BuildRequires: python-boto
|
||||
BuildRequires: python-daemon
|
||||
BuildRequires: python-eventlet
|
||||
|
||||
%description doc
|
||||
OpenStack Image Service (code-named Glance) provides discovery, registration,
|
||||
and delivery services for virtual disk images.
|
||||
|
||||
This package contains documentation files for glance.
|
||||
|
||||
%endif
|
||||
|
||||
%prep
|
||||
%setup -q -n %{python_name}-%{version}
|
||||
sed '/pysendfile/d' tools/pip-requires
|
||||
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
|
||||
# Delete tests
|
||||
rm -fr %{buildroot}%{python_sitelib}/tests
|
||||
|
||||
%if 0%{?with_doc}
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
pushd doc
|
||||
sphinx-build -b html source build/html
|
||||
popd
|
||||
|
||||
# Fix hidden-file-or-dir warnings
|
||||
rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo
|
||||
%endif
|
||||
|
||||
# Setup directories
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/glance/images
|
||||
|
||||
# Config file
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/glance
|
||||
for i in etc/*; do
|
||||
install -p -D -m 644 $i %{buildroot}%{_sysconfdir}/glance/
|
||||
done
|
||||
|
||||
# Initscripts
|
||||
install -p -D -m 755 %{SOURCE1} %{buildroot}%{_initrddir}/%{daemon_prefix}-api
|
||||
install -p -D -m 755 %{SOURCE2} %{buildroot}%{_initrddir}/%{daemon_prefix}-registry
|
||||
install -p -D -m 755 %{SOURCE3} %{buildroot}%{_initrddir}/%{daemon_prefix}-scrubber
|
||||
|
||||
# Logrotate config
|
||||
install -p -D -m 644 %{SOURCE4} %{buildroot}%{_sysconfdir}/logrotate.d/openstack-glance
|
||||
|
||||
# Install pid directory
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/run/glance
|
||||
|
||||
# Install log directory
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/log/glance
|
||||
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
|
||||
%pre
|
||||
getent group glance >/dev/null || groupadd -r glance
|
||||
getent passwd glance >/dev/null || \
|
||||
useradd -r -g glance -d %{_sharedstatedir}/glance -s /sbin/nologin \
|
||||
-c "OpenStack Glance Daemons" glance
|
||||
exit 0
|
||||
|
||||
|
||||
%preun
|
||||
if [ $1 = 0 ] ; then
|
||||
for svc in api registry scrubber; do
|
||||
/sbin/service %{daemon_prefix}-${svc} stop &>/dev/null
|
||||
/sbin/chkconfig --del %{daemon_prefix}-${svc}
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
%postun
|
||||
if [ $1 -ge 1 ] ; then
|
||||
# Package upgrade, not uninstall
|
||||
for svc in api registry scrubber; do
|
||||
/sbin/service %{daemon_prefix}-${svc} condrestart &>/dev/null
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc README* LICENSE* HACKING* ChangeLog
|
||||
%{_bindir}/*
|
||||
%{_initrddir}/*
|
||||
%dir %{_sysconfdir}/glance
|
||||
%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/*
|
||||
%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/logrotate.d/openstack-glance
|
||||
%dir %attr(0755, glance, nobody) %{_sharedstatedir}/glance
|
||||
%dir %attr(0755, glance, nobody) %{_localstatedir}/log/glance
|
||||
%dir %attr(0755, glance, nobody) %{_localstatedir}/run/glance
|
||||
|
||||
|
||||
%files -n python-glance
|
||||
%{python_sitelib}/*
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%files doc
|
||||
%defattr(-,root,root,-)
|
||||
%doc doc/build/html
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
#end raw
|
162
conf/templates/packaging/specs/openstack-keystone.spec
Normal file
162
conf/templates/packaging/specs/openstack-keystone.spec
Normal file
@ -0,0 +1,162 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Andrey Brindeyev <abrindeyev@griddynamics.com>
|
||||
# * Alessio Ababilov <aababilov@griddynamics.com>
|
||||
|
||||
%global python_name keystone
|
||||
%global daemon_prefix openstack-keystone
|
||||
|
||||
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
|
||||
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
|
||||
%endif
|
||||
|
||||
Name: openstack-keystone
|
||||
Epoch: 1
|
||||
Version: $version
|
||||
Release: 1%{?dist}
|
||||
Url: http://www.openstack.org
|
||||
Summary: Openstack Identity Service
|
||||
License: Apache 2.0
|
||||
Vendor: Openstack Foundation
|
||||
Group: Applications/System
|
||||
|
||||
Source0: %{python_name}-%{version}.tar.gz
|
||||
Source1: openstack-keystone-all.init
|
||||
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: python-devel
|
||||
BuildRequires: python-setuptools
|
||||
|
||||
Requires(post): chkconfig
|
||||
Requires(postun): initscripts
|
||||
Requires(preun): chkconfig
|
||||
Requires(pre): shadow-utils
|
||||
Requires: python-keystone = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description
|
||||
Keystone is a Python implementation of the OpenStack
|
||||
(http://www.openstack.org) identity service API.
|
||||
|
||||
This package contains the Keystone daemon.
|
||||
|
||||
|
||||
%if 0%{?with_doc}
|
||||
|
||||
%package doc
|
||||
Summary: Documentation for %{name}
|
||||
Group: Documentation
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description doc
|
||||
Keystone is a Python implementation of the OpenStack
|
||||
(http://www.openstack.org) identity service API.
|
||||
|
||||
This package contains documentation for Keystone.
|
||||
|
||||
%endif
|
||||
|
||||
|
||||
%package -n python-keystone
|
||||
Summary: Keystone Python libraries
|
||||
Group: Development/Languages/Python
|
||||
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
%description -n python-keystone
|
||||
Keystone is a Python implementation of the OpenStack
|
||||
(http://www.openstack.org) identity service API.
|
||||
|
||||
This package contains the Keystone Python library.
|
||||
|
||||
#raw
|
||||
%prep
|
||||
%setup -q -n %{python_name}-%{version}
|
||||
|
||||
|
||||
%build
|
||||
python setup.py build
|
||||
|
||||
|
||||
%install
|
||||
%__rm -rf %{buildroot}
|
||||
|
||||
%if 0%{?with_doc}
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
|
||||
pushd doc
|
||||
sphinx-build -b html source build/html
|
||||
popd
|
||||
|
||||
# Fix hidden-file-or-dir warnings
|
||||
rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo
|
||||
%endif
|
||||
|
||||
python setup.py install --prefix=%{_prefix} --root=%{buildroot}
|
||||
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/keystone
|
||||
install -m 644 etc/* %{buildroot}%{_sysconfdir}/keystone
|
||||
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/keystone
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/log/keystone
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/run/keystone
|
||||
|
||||
install -p -D -m 755 %{SOURCE1} %{buildroot}%{_initrddir}/%{daemon_prefix}-all
|
||||
|
||||
%__rm -rf %{buildroot}%{py_sitelib}/{doc,tools}
|
||||
|
||||
|
||||
%clean
|
||||
%__rm -rf %{buildroot}
|
||||
|
||||
|
||||
%pre
|
||||
getent group keystone >/dev/null || groupadd -r keystone
|
||||
getent passwd keystone >/dev/null || \
|
||||
useradd -r -g keystone -d %{_sharedstatedir}/keystone -s /sbin/nologin \
|
||||
-c "OpenStack Keystone Daemons" keystone
|
||||
exit 0
|
||||
|
||||
|
||||
%preun
|
||||
if [ $1 = 0 ] ; then
|
||||
/sbin/service %{daemon_prefix}-all stop &>/dev/null
|
||||
/sbin/chkconfig --del %{daemon_prefix}-all
|
||||
exit 0
|
||||
fi
|
||||
|
||||
%postun
|
||||
if [ $1 -ge 1 ] ; then
|
||||
# Package upgrade, not uninstall
|
||||
/sbin/service %{daemon_prefix}-all condrestart &>/dev/null
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc README.rst HACKING.rst LICENSE
|
||||
%{_usr}/bin/*
|
||||
%config(noreplace) %{_sysconfdir}/keystone
|
||||
%dir %attr(0755, keystone, nobody) %{_sharedstatedir}/keystone
|
||||
%dir %attr(0755, keystone, nobody) %{_localstatedir}/log/keystone
|
||||
%dir %attr(0755, keystone, nobody) %{_localstatedir}/run/keystone
|
||||
%{_initrddir}/*
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%files doc
|
||||
%defattr(-,root,root,-)
|
||||
%doc doc
|
||||
%endif
|
||||
|
||||
%files -n python-keystone
|
||||
%defattr(-,root,root,-)
|
||||
%doc LICENSE
|
||||
%{python_sitelib}/*
|
||||
|
||||
|
||||
%changelog
|
||||
#endraw
|
600
conf/templates/packaging/specs/openstack-nova.spec
Normal file
600
conf/templates/packaging/specs/openstack-nova.spec
Normal file
@ -0,0 +1,600 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Silas Sewell <silas@sewell.ch>
|
||||
# * Andrey Brindeyev <abrindeyev@griddynamics.com>
|
||||
# * Alessio Ababilov <aababilov@griddynamics.com>
|
||||
# * Ivan A. Melnikov <imelnikov@griddynamics.com>
|
||||
|
||||
%global python_name nova
|
||||
%global daemon_prefix openstack-nova
|
||||
|
||||
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 6)
|
||||
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
|
||||
%endif
|
||||
|
||||
Name: openstack-nova
|
||||
Summary: OpenStack Compute (nova)
|
||||
Version: $version
|
||||
Release: 1%{?dist}
|
||||
Epoch: $epoch
|
||||
|
||||
Group: Development/Languages
|
||||
License: ASL 2.0
|
||||
Vendor: OpenStack Foundation
|
||||
URL: http://openstack.org/projects/compute/
|
||||
Source0: %{python_name}-%{version}.tar.gz
|
||||
|
||||
Source10: openstack-nova-api.init
|
||||
Source11: openstack-nova-cert.init
|
||||
Source12: openstack-nova-compute.init
|
||||
Source13: openstack-nova-network.init
|
||||
Source14: openstack-nova-objectstore.init
|
||||
Source15: openstack-nova-scheduler.init
|
||||
Source18: openstack-nova-xvpvncproxy.init
|
||||
Source19: openstack-nova-console.init
|
||||
Source20: openstack-nova-consoleauth.init
|
||||
Source25: openstack-nova-metadata-api.init
|
||||
Source26: openstack-nova-conductor.init
|
||||
Source27: openstack-nova-cells.init
|
||||
Source28: openstack-nova-spicehtml5proxy.init
|
||||
|
||||
Source50: nova-ifc-template
|
||||
Source51: nova.logrotate
|
||||
Source52: nova-polkit.pkla
|
||||
Source53: nova-sudoers
|
||||
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: python-devel
|
||||
BuildRequires: python-setuptools
|
||||
|
||||
Requires: %{name}-compute = %{epoch}:%{version}-%{release}
|
||||
Requires: %{name}-cert = %{epoch}:%{version}-%{release}
|
||||
Requires: %{name}-scheduler = %{epoch}:%{version}-%{release}
|
||||
Requires: %{name}-api = %{epoch}:%{version}-%{release}
|
||||
Requires: %{name}-network = %{epoch}:%{version}-%{release}
|
||||
Requires: %{name}-objectstore = %{epoch}:%{version}-%{release}
|
||||
Requires: %{name}-console = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description
|
||||
Nova is a cloud computing fabric controller (the main part of an IaaS system)
|
||||
built to match the popular AWS EC2 and S3 APIs. It is written in Python, using
|
||||
the Tornado and Twisted frameworks, and relies on the standard AMQP messaging
|
||||
protocol, and the Redis KVS.
|
||||
|
||||
Nova is intended to be easy to extend, and adapt. For example, it currently
|
||||
uses an LDAP server for users and groups, but also includes a fake LDAP server,
|
||||
that stores data in Redis. It has extensive test coverage, and uses the Sphinx
|
||||
toolkit (the same as Python itself) for code and user documentation.
|
||||
|
||||
%package common
|
||||
Summary: Components common to all OpenStack services
|
||||
Group: Applications/System
|
||||
|
||||
Requires: python-nova = %{epoch}:%{version}-%{release}
|
||||
|
||||
Requires(post): chkconfig
|
||||
Requires(postun): initscripts
|
||||
Requires(preun): chkconfig
|
||||
Requires(pre): shadow-utils
|
||||
|
||||
|
||||
%description common
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains scripts, config and dependencies shared
|
||||
between all the OpenStack nova services.
|
||||
|
||||
%package compute
|
||||
Summary: OpenStack Nova Virtual Machine control service
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
Requires: curl
|
||||
Requires: iscsi-initiator-utils
|
||||
Requires: iptables iptables-ipv6
|
||||
Requires: vconfig
|
||||
# tunctl is needed where `ip tuntap` is not available
|
||||
Requires: tunctl
|
||||
Requires: libguestfs-mount >= 1.7.17
|
||||
# The fuse dependency should be added to libguestfs-mount
|
||||
Requires: fuse
|
||||
Requires: libvirt >= 0.8.7
|
||||
Requires: libvirt-python
|
||||
Requires(pre): qemu-kvm
|
||||
|
||||
%description compute
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova service for controlling Virtual Machines.
|
||||
|
||||
|
||||
%package network
|
||||
Summary: OpenStack Nova Network control service
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
Requires: vconfig
|
||||
Requires: radvd
|
||||
Requires: bridge-utils
|
||||
Requires: dnsmasq-utils
|
||||
Requires: dnsmasq
|
||||
# tunctl is needed where `ip tuntap` is not available
|
||||
Requires: tunctl
|
||||
|
||||
%description network
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova service for controlling networking.
|
||||
|
||||
|
||||
%package scheduler
|
||||
Summary: OpenStack Nova VM distribution service
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description scheduler
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the service for scheduling where
|
||||
to run Virtual Machines in the cloud.
|
||||
|
||||
|
||||
%package cert
|
||||
Summary: OpenStack Nova certificate management service
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description cert
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova service for managing certificates.
|
||||
|
||||
|
||||
%package api
|
||||
Summary: OpenStack Nova API services
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description api
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova services providing programmatic access.
|
||||
|
||||
|
||||
%package conductor
|
||||
Summary: OpenStack Nova Conductor services
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-nova-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description conductor
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova services providing database access for
|
||||
the compute service
|
||||
|
||||
|
||||
%package objectstore
|
||||
Summary: OpenStack Nova simple object store service
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description objectstore
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova service providing a simple object store.
|
||||
|
||||
|
||||
%package console
|
||||
Summary: OpenStack Nova console access services
|
||||
Group: Applications/System
|
||||
|
||||
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description console
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova services providing
|
||||
console access services to Virtual Machines.
|
||||
|
||||
|
||||
%package cells
|
||||
Summary: OpenStack Nova Cells services
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-nova-common = %{epoch}:%{version}-%{release}
|
||||
|
||||
%description cells
|
||||
OpenStack Compute (codename Nova) is open source software designed to
|
||||
provision and manage large networks of virtual machines, creating a
|
||||
redundant and scalable cloud computing platform. It gives you the
|
||||
software, control panels, and APIs required to orchestrate a cloud,
|
||||
including running instances, managing networks, and controlling access
|
||||
through users and projects. OpenStack Compute strives to be both
|
||||
hardware and hypervisor agnostic, currently supporting a variety of
|
||||
standard hardware configurations and seven major hypervisors.
|
||||
|
||||
This package contains the Nova Cells service providing additional
|
||||
scaling and (geographic) distribution for compute services.
|
||||
|
||||
|
||||
%package -n python-nova
|
||||
Summary: Nova Python libraries
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openssl
|
||||
Requires: sudo
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
%description -n python-nova
|
||||
Nova is a cloud computing fabric controller (the main part of an IaaS system)
|
||||
built to match the popular AWS EC2 and S3 APIs. It is written in Python, using
|
||||
the Tornado and Twisted frameworks, and relies on the standard AMQP messaging
|
||||
protocol, and the Redis KVS.
|
||||
|
||||
This package contains the %{name} Python library.
|
||||
|
||||
%if 0%{?with_doc}
|
||||
|
||||
%package doc
|
||||
Summary: Documentation for %{name}
|
||||
Group: Documentation
|
||||
|
||||
BuildRequires: python-sphinx
|
||||
|
||||
%description doc
|
||||
Nova is a cloud computing fabric controller (the main part of an IaaS system)
|
||||
built to match the popular AWS EC2 and S3 APIs. It is written in Python, using
|
||||
the Tornado and Twisted frameworks, and relies on the standard AMQP messaging
|
||||
protocol, and the Redis KVS.
|
||||
|
||||
This package contains documentation files for %{name}.
|
||||
%endif
|
||||
|
||||
#raw
|
||||
%prep
|
||||
%setup0 -q -n %{python_name}-%{version}
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
|
||||
%if 0%{?with_doc}
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
pushd doc
|
||||
sphinx-build -b html source build/html
|
||||
popd
|
||||
|
||||
# Fix hidden-file-or-dir warnings
|
||||
rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo
|
||||
%endif
|
||||
|
||||
# Setup directories
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/buckets
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/images
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/instances
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/keys
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/networks
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/tmp
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/log/nova
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/lock/nova
|
||||
|
||||
# Setup ghost CA cert
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/CA
|
||||
install -p -m 755 nova/CA/*.sh %{buildroot}%{_sharedstatedir}/nova/CA
|
||||
install -p -m 644 nova/CA/openssl.cnf.tmpl %{buildroot}%{_sharedstatedir}/nova/CA
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/CA/{certs,crl,newcerts,projects,reqs}
|
||||
touch %{buildroot}%{_sharedstatedir}/nova/CA/{cacert.pem,crl.pem,index.txt,openssl.cnf,serial}
|
||||
install -d -m 750 %{buildroot}%{_sharedstatedir}/nova/CA/private
|
||||
touch %{buildroot}%{_sharedstatedir}/nova/CA/private/cakey.pem
|
||||
|
||||
# Install config files
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/nova
|
||||
install -p -D -m 640 etc/nova/api-paste.ini %{buildroot}%{_sysconfdir}/nova/
|
||||
install -p -D -m 640 etc/nova/policy.json %{buildroot}%{_sysconfdir}/nova/
|
||||
install -p -D -m 640 etc/nova/rootwrap.conf %{buildroot}%{_sysconfdir}/nova/
|
||||
install -p -D -m 640 etc/nova/nova.conf.sample %{buildroot}%{_sysconfdir}/nova/
|
||||
install -p -D -m 640 etc/nova/logging_sample.conf %{buildroot}%{_sysconfdir}/nova/
|
||||
|
||||
# Install initscripts for Nova services
|
||||
install -p -D -m 755 %{SOURCE10} %{buildroot}%{_initrddir}/%{daemon_prefix}-api
|
||||
install -p -D -m 755 %{SOURCE11} %{buildroot}%{_initrddir}/%{daemon_prefix}-cert
|
||||
install -p -D -m 755 %{SOURCE12} %{buildroot}%{_initrddir}/%{daemon_prefix}-compute
|
||||
install -p -D -m 755 %{SOURCE13} %{buildroot}%{_initrddir}/%{daemon_prefix}-network
|
||||
install -p -D -m 755 %{SOURCE14} %{buildroot}%{_initrddir}/%{daemon_prefix}-objectstore
|
||||
install -p -D -m 755 %{SOURCE15} %{buildroot}%{_initrddir}/%{daemon_prefix}-scheduler
|
||||
install -p -D -m 755 %{SOURCE18} %{buildroot}%{_initrddir}/%{daemon_prefix}-xvpvncproxy
|
||||
install -p -D -m 755 %{SOURCE19} %{buildroot}%{_initrddir}/%{daemon_prefix}-console
|
||||
install -p -D -m 755 %{SOURCE20} %{buildroot}%{_initrddir}/%{daemon_prefix}-consoleauth
|
||||
install -p -D -m 755 %{SOURCE25} %{buildroot}%{_initrddir}/%{daemon_prefix}-metadata-api
|
||||
install -p -D -m 755 %{SOURCE26} %{buildroot}%{_initrddir}/%{daemon_prefix}-conductor
|
||||
install -p -D -m 755 %{SOURCE27} %{buildroot}%{_initrddir}/%{daemon_prefix}-cells
|
||||
install -p -D -m 755 %{SOURCE28} %{buildroot}%{_initrddir}/%{daemon_prefix}-spicehtml5proxy
|
||||
|
||||
# Install sudoers
|
||||
install -p -D -m 440 %{SOURCE53} %{buildroot}%{_sysconfdir}/sudoers.d/nova
|
||||
|
||||
# Install logrotate
|
||||
install -p -D -m 644 %{SOURCE51} %{buildroot}%{_sysconfdir}/logrotate.d/%{name}
|
||||
|
||||
# Install pid directory
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/run/nova
|
||||
|
||||
# Install template files
|
||||
install -p -D -m 644 nova/cloudpipe/client.ovpn.template %{buildroot}%{_datarootdir}/nova/client.ovpn.template
|
||||
install -p -D -m 644 nova/virt/interfaces.template %{buildroot}%{_datarootdir}/nova/interfaces.template
|
||||
|
||||
# Install rootwrap files in /usr/share/nova/rootwrap
|
||||
mkdir -p %{buildroot}%{_datarootdir}/nova/rootwrap/
|
||||
install -p -D -m 644 etc/nova/rootwrap.d/* %{buildroot}%{_datarootdir}/nova/rootwrap/
|
||||
|
||||
# Network configuration templates for injection engine
|
||||
install -d -m 755 %{buildroot}%{_datarootdir}/nova/interfaces
|
||||
install -p -D -m 644 nova/virt/interfaces.template %{buildroot}%{_datarootdir}/nova/interfaces/interfaces.ubuntu.template
|
||||
install -p -D -m 644 %{SOURCE50} %{buildroot}%{_datarootdir}/nova/interfaces.template
|
||||
|
||||
# Clean CA directory
|
||||
find %{buildroot}%{_sharedstatedir}/nova/CA -name .gitignore -delete
|
||||
find %{buildroot}%{_sharedstatedir}/nova/CA -name .placeholder -delete
|
||||
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/polkit-1/localauthority/50-local.d
|
||||
install -p -D -m 644 %{SOURCE52} %{buildroot}%{_sysconfdir}/polkit-1/localauthority/50-local.d/50-nova.pkla
|
||||
|
||||
# Remove unneeded in production stuff
|
||||
rm -f %{buildroot}%{_bindir}/nova-debug
|
||||
rm -fr %{buildroot}%{python_sitelib}/nova/tests/
|
||||
rm -fr %{buildroot}%{python_sitelib}/run_tests.*
|
||||
rm -f %{buildroot}%{_bindir}/nova-combined
|
||||
rm -f %{buildroot}/usr/share/doc/nova/README*
|
||||
|
||||
# We currently use the equivalent file from the novnc package
|
||||
rm -f %{buildroot}%{_bindir}/nova-novncproxy
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
|
||||
%post
|
||||
if %{_sbindir}/selinuxenabled; then
|
||||
echo -e "\033[47m\033[1;31m***************************************************\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31m \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31m >> \033[5mYou have SELinux enabled on your host !\033[25m << \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31m \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31mPlease disable it by setting \`SELINUX=disabled' \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31min /etc/sysconfig/selinux and don't forget \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31mto reboot your host to apply that change! \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m*\033[0m \033[40m\033[1;31m \033[47m\033[1;31m*\033[0m"
|
||||
echo -e "\033[47m\033[1;31m***************************************************\033[0m"
|
||||
fi
|
||||
|
||||
|
||||
%pre common
|
||||
getent group nova >/dev/null || groupadd -r nova
|
||||
getent passwd nova >/dev/null || \
|
||||
useradd -r -g nova -d %{_sharedstatedir}/nova -s /sbin/nologin \
|
||||
-c "OpenStack Nova Daemons" nova
|
||||
exit 0
|
||||
|
||||
%pre compute
|
||||
usermod -a -G qemu nova
|
||||
# Add nova to the fuse group (if present) to support guestmount
|
||||
if getent group fuse >/dev/null; then
|
||||
usermod -a -G fuse nova
|
||||
fi
|
||||
exit 0
|
||||
|
||||
# Do not autostart daemons in %post since they are not configured yet
|
||||
#end raw
|
||||
|
||||
#set $daemon_map = {"api": ["api", "metadata-api"], "cells": [], "cert": [], "compute": [], "console": ["console", "consoleauth", "xvpvncproxy"], "network": [], "objectstore": [], "scheduler": []}
|
||||
#for $key, $value in $daemon_map.iteritems()
|
||||
#set $daemon_list = " ".join($value) if $value else $key
|
||||
%preun $key
|
||||
if [ \$1 -eq 0 ] ; then
|
||||
for svc in $daemon_list; do
|
||||
/sbin/service %{daemon_prefix}-\${svc} stop &>/dev/null
|
||||
/sbin/chkconfig --del %{daemon_prefix}-\${svc}
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
%postun $key
|
||||
if [ \$1 -ge 1 ] ; then
|
||||
# Package upgrade, not uninstall
|
||||
for svc in $daemon_list; do
|
||||
/sbin/service %{daemon_prefix}-\${svc} condrestart &>/dev/null
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
#end for
|
||||
#raw
|
||||
|
||||
%files
|
||||
%doc LICENSE
|
||||
%{_bindir}/nova-all
|
||||
|
||||
%files common
|
||||
%doc LICENSE
|
||||
%dir %{_sysconfdir}/nova
|
||||
%attr(-, root, nova) %{_sysconfdir}/nova/nova.conf.sample
|
||||
%attr(-, root, nova) %{_sysconfdir}/nova/logging_sample.conf
|
||||
%config(noreplace) %attr(-, root, nova) %{_sysconfdir}/nova/rootwrap.conf
|
||||
%config(noreplace) %attr(-, root, nova) %{_sysconfdir}/nova/api-paste.ini
|
||||
%config(noreplace) %attr(-, root, nova) %{_sysconfdir}/nova/policy.json
|
||||
%config(noreplace) %{_sysconfdir}/logrotate.d/openstack-nova
|
||||
%config(noreplace) %{_sysconfdir}/sudoers.d/nova
|
||||
%config(noreplace) %{_sysconfdir}/polkit-1/localauthority/50-local.d/50-nova.pkla
|
||||
|
||||
%dir %attr(0755, nova, root) %{_localstatedir}/log/nova
|
||||
%dir %attr(0755, nova, root) %{_localstatedir}/lock/nova
|
||||
%dir %attr(0755, nova, root) %{_localstatedir}/run/nova
|
||||
|
||||
%{_bindir}/nova-clear-rabbit-queues
|
||||
# TODO. zmq-receiver may need its own service?
|
||||
%{_bindir}/nova-rpc-zmq-receiver
|
||||
%{_bindir}/nova-manage
|
||||
%{_bindir}/nova-rootwrap
|
||||
|
||||
%{_datarootdir}/nova
|
||||
#%{_mandir}/man1/nova*.1.gz
|
||||
|
||||
%defattr(-, nova, nova, -)
|
||||
%dir %{_sharedstatedir}/nova
|
||||
%dir %{_sharedstatedir}/nova/buckets
|
||||
%dir %{_sharedstatedir}/nova/images
|
||||
%dir %{_sharedstatedir}/nova/instances
|
||||
%dir %{_sharedstatedir}/nova/keys
|
||||
%dir %{_sharedstatedir}/nova/networks
|
||||
%dir %{_sharedstatedir}/nova/tmp
|
||||
|
||||
%files compute
|
||||
%{_bindir}/nova-compute
|
||||
%{_bindir}/nova-baremetal-deploy-helper
|
||||
%{_bindir}/nova-baremetal-manage
|
||||
%{_initrddir}/%{daemon_prefix}-compute
|
||||
%{_datarootdir}/nova/rootwrap/compute.filters
|
||||
|
||||
%files network
|
||||
%{_bindir}/nova-network
|
||||
%{_bindir}/nova-dhcpbridge
|
||||
%{_initrddir}/%{daemon_prefix}-network
|
||||
%{_datarootdir}/nova/rootwrap/network.filters
|
||||
|
||||
%files scheduler
|
||||
%{_bindir}/nova-scheduler
|
||||
%{_initrddir}/%{daemon_prefix}-scheduler
|
||||
|
||||
%files cert
|
||||
%{_bindir}/nova-cert
|
||||
%{_initrddir}/%{daemon_prefix}-cert
|
||||
%defattr(-, nova, nova, -)
|
||||
%dir %{_sharedstatedir}/nova/CA/
|
||||
%dir %{_sharedstatedir}/nova/CA/certs
|
||||
%dir %{_sharedstatedir}/nova/CA/crl
|
||||
%dir %{_sharedstatedir}/nova/CA/newcerts
|
||||
%dir %{_sharedstatedir}/nova/CA/projects
|
||||
%dir %{_sharedstatedir}/nova/CA/reqs
|
||||
%{_sharedstatedir}/nova/CA/*.sh
|
||||
%{_sharedstatedir}/nova/CA/openssl.cnf.tmpl
|
||||
%ghost %config(missingok,noreplace) %verify(not md5 size mtime) %{_sharedstatedir}/nova/CA/cacert.pem
|
||||
%ghost %config(missingok,noreplace) %verify(not md5 size mtime) %{_sharedstatedir}/nova/CA/crl.pem
|
||||
%ghost %config(missingok,noreplace) %verify(not md5 size mtime) %{_sharedstatedir}/nova/CA/index.txt
|
||||
%ghost %config(missingok,noreplace) %verify(not md5 size mtime) %{_sharedstatedir}/nova/CA/openssl.cnf
|
||||
%ghost %config(missingok,noreplace) %verify(not md5 size mtime) %{_sharedstatedir}/nova/CA/serial
|
||||
%dir %attr(0750, -, -) %{_sharedstatedir}/nova/CA/private
|
||||
%ghost %config(missingok,noreplace) %verify(not md5 size mtime) %{_sharedstatedir}/nova/CA/private/cakey.pem
|
||||
|
||||
%files api
|
||||
%{_bindir}/nova-api*
|
||||
%{_initrddir}/openstack-nova-*api
|
||||
%{_datarootdir}/nova/rootwrap/api-metadata.filters
|
||||
|
||||
%files conductor
|
||||
%{_bindir}/nova-conductor
|
||||
%{_initrddir}/openstack-nova-conductor
|
||||
|
||||
%files objectstore
|
||||
%{_bindir}/nova-objectstore
|
||||
%{_initrddir}/%{daemon_prefix}-objectstore
|
||||
|
||||
%files console
|
||||
%{_bindir}/nova-console*
|
||||
%{_bindir}/nova-xvpvncproxy
|
||||
%{_bindir}/nova-spicehtml5proxy
|
||||
%{_initrddir}/openstack-nova-console*
|
||||
%{_initrddir}/openstack-nova-xvpvncproxy
|
||||
%{_initrddir}/openstack-nova-spicehtml5proxy
|
||||
|
||||
%files cells
|
||||
%{_bindir}/nova-cells
|
||||
%{_initrddir}/openstack-nova-cells
|
||||
|
||||
%files -n python-nova
|
||||
%defattr(-,root,root,-)
|
||||
%doc LICENSE
|
||||
%{python_sitelib}/nova
|
||||
%{python_sitelib}/nova-%{version}-*.egg-info
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%files doc
|
||||
%doc LICENSE doc/build/html
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
#end raw
|
588
conf/templates/packaging/specs/openstack-quantum.spec
Normal file
588
conf/templates/packaging/specs/openstack-quantum.spec
Normal file
@ -0,0 +1,588 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Terry Wilson <twilson@redhat.com>
|
||||
# * Alan Pevec <apevec@redhat.com>
|
||||
# * Martin Magr <mmagr@redhat.com>
|
||||
# * Gary Kotton <gkotton@redhat.com>
|
||||
# * Robert Kukura <rkukura@redhat.com>
|
||||
# * Pádraig Brady <P@draigBrady.com>
|
||||
|
||||
|
||||
%global python_name quantum
|
||||
%global daemon_prefix openstack-quantum
|
||||
|
||||
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 6)
|
||||
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
|
||||
%endif
|
||||
|
||||
Name: openstack-quantum
|
||||
Version: $version
|
||||
Release: 1%{?dist}
|
||||
Epoch: $epoch
|
||||
Summary: Virtual network service for OpenStack (quantum)
|
||||
|
||||
Group: Applications/System
|
||||
License: ASL 2.0
|
||||
URL: http://launchpad.net/quantum/
|
||||
|
||||
Source0: %{python_name}-%{version}.tar.gz
|
||||
Source1: quantum.logrotate
|
||||
Source2: quantum-sudoers
|
||||
|
||||
Source10: quantum-server.init
|
||||
Source11: quantum-linuxbridge-agent.init
|
||||
Source12: quantum-openvswitch-agent.init
|
||||
Source13: quantum-ryu-agent.init
|
||||
Source14: quantum-nec-agent.init
|
||||
Source15: quantum-dhcp-agent.init
|
||||
Source16: quantum-l3-agent.init
|
||||
Source17: quantum-ovs-cleanup.init
|
||||
Source18: quantum-hyperv-agent.init
|
||||
Source19: quantum-rpc-zmq-receiver.init
|
||||
|
||||
BuildArch: noarch
|
||||
|
||||
BuildRequires: python-devel
|
||||
BuildRequires: python-setuptools
|
||||
# Build require these parallel versions
|
||||
# as setup.py build imports quantum.openstack.common.setup
|
||||
# which will then check for these
|
||||
# BuildRequires: python-sqlalchemy
|
||||
# BuildRequires: python-webob
|
||||
# BuildRequires: python-paste-deploy
|
||||
# BuildRequires: python-routes
|
||||
BuildRequires: dos2unix
|
||||
|
||||
Requires: python-quantum = %{epoch}:%{version}-%{release}
|
||||
Requires: python-keystone
|
||||
|
||||
Requires(post): chkconfig
|
||||
Requires(postun): initscripts
|
||||
Requires(preun): chkconfig
|
||||
Requires(preun): initscripts
|
||||
Requires(pre): shadow-utils
|
||||
|
||||
|
||||
%description
|
||||
Quantum is a virtual network service for Openstack. Just like
|
||||
OpenStack Nova provides an API to dynamically request and configure
|
||||
virtual servers, Quantum provides an API to dynamically request and
|
||||
configure virtual networks. These networks connect "interfaces" from
|
||||
other OpenStack services (e.g., virtual NICs from Nova VMs). The
|
||||
Quantum API supports extensions to provide advanced network
|
||||
capabilities (e.g., QoS, ACLs, network monitoring, etc.)
|
||||
|
||||
|
||||
%package -n python-quantum
|
||||
Summary: Quantum Python libraries
|
||||
Group: Applications/System
|
||||
|
||||
Requires: sudo
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
|
||||
%description -n python-quantum
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum Python library.
|
||||
|
||||
|
||||
%package -n openstack-quantum-bigswitch
|
||||
Summary: Quantum Big Switch plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-bigswitch
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using the FloodLight Openflow Controller or the Big Switch
|
||||
Networks Controller.
|
||||
|
||||
|
||||
%package -n openstack-quantum-brocade
|
||||
Summary: Quantum Brocade plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-brocade
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using Brocade VCS switches running NOS.
|
||||
|
||||
|
||||
%package -n openstack-quantum-cisco
|
||||
Summary: Quantum Cisco plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
Requires: python-configobj
|
||||
|
||||
|
||||
%description -n openstack-quantum-cisco
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using Cisco UCS and Nexus.
|
||||
|
||||
|
||||
%package -n openstack-quantum-hyperv
|
||||
Summary: Quantum Hyper-V plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-hyperv
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using Microsoft Hyper-V.
|
||||
|
||||
|
||||
%package -n openstack-quantum-linuxbridge
|
||||
Summary: Quantum linuxbridge plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: bridge-utils
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
Requires: python-pyudev
|
||||
|
||||
|
||||
%description -n openstack-quantum-linuxbridge
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks as VLANs using Linux bridging.
|
||||
|
||||
|
||||
%package -n openstack-quantum-midonet
|
||||
Summary: Quantum MidoNet plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-midonet
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using MidoNet from Midokura.
|
||||
|
||||
|
||||
%package -n openstack-quantum-nicira
|
||||
Summary: Quantum Nicira plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-nicira
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using Nicira NVP.
|
||||
|
||||
|
||||
%package -n openstack-quantum-openvswitch
|
||||
Summary: Quantum openvswitch plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
Requires: openvswitch
|
||||
|
||||
|
||||
%description -n openstack-quantum-openvswitch
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using Open vSwitch.
|
||||
|
||||
|
||||
%package -n openstack-quantum-plumgrid
|
||||
Summary: Quantum PLUMgrid plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-plumgrid
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using the PLUMgrid platform.
|
||||
|
||||
|
||||
%package -n openstack-quantum-ryu
|
||||
Summary: Quantum Ryu plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-ryu
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using the Ryu Network Operating System.
|
||||
|
||||
|
||||
%package -n openstack-quantum-nec
|
||||
Summary: Quantum NEC plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-nec
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using the NEC OpenFlow controller.
|
||||
|
||||
|
||||
%package -n openstack-quantum-metaplugin
|
||||
Summary: Quantum meta plugin
|
||||
Group: Applications/System
|
||||
|
||||
Requires: openstack-quantum = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description -n openstack-quantum-metaplugin
|
||||
Quantum provides an API to dynamically request and configure virtual
|
||||
networks.
|
||||
|
||||
This package contains the quantum plugin that implements virtual
|
||||
networks using multiple other quantum plugins.
|
||||
|
||||
#raw
|
||||
%prep
|
||||
%setup -q -n quantum-%{version}
|
||||
|
||||
find quantum -name \*.py -exec sed -i '/\/usr\/bin\/env python/d' {} \;
|
||||
|
||||
chmod 644 quantum/plugins/cisco/README
|
||||
|
||||
# Adjust configuration file content
|
||||
sed -i 's/debug = True/debug = False/' etc/quantum.conf
|
||||
sed -i 's/\# auth_strategy = keystone/auth_strategy = keystone/' etc/quantum.conf
|
||||
|
||||
# Remove unneeded dependency
|
||||
sed -i '/setuptools_git/d' setup.py
|
||||
|
||||
# let RPM handle deps
|
||||
sed -i '/setup_requires/d; /install_requires/d; /dependency_links/d' setup.py
|
||||
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
|
||||
# Remove unused files
|
||||
rm -rf %{buildroot}%{python_sitelib}/bin
|
||||
rm -rf %{buildroot}%{python_sitelib}/doc
|
||||
rm -rf %{buildroot}%{python_sitelib}/tools
|
||||
rm -rf %{buildroot}%{python_sitelib}/quantum/tests
|
||||
rm -rf %{buildroot}%{python_sitelib}/quantum/plugins/*/tests
|
||||
rm -f %{buildroot}%{python_sitelib}/quantum/plugins/*/run_tests.*
|
||||
rm %{buildroot}/usr/etc/init.d/quantum-server
|
||||
|
||||
# Install execs
|
||||
install -p -D -m 755 bin/quantum-* %{buildroot}%{_bindir}/
|
||||
|
||||
# Move rootwrap files to proper location
|
||||
install -d -m 755 %{buildroot}%{_datarootdir}/quantum/rootwrap
|
||||
mv %{buildroot}/usr/etc/quantum/rootwrap.d/*.filters %{buildroot}%{_datarootdir}/quantum/rootwrap
|
||||
|
||||
# Move config files to proper location
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/quantum
|
||||
mv %{buildroot}/usr/etc/quantum/* %{buildroot}%{_sysconfdir}/quantum
|
||||
chmod 640 %{buildroot}%{_sysconfdir}/quantum/plugins/*/*.ini
|
||||
|
||||
# Configure agents to use quantum-rootwrap
|
||||
for f in %{buildroot}%{_sysconfdir}/quantum/plugins/*/*.ini %{buildroot}%{_sysconfdir}/quantum/*_agent.ini; do
|
||||
sed -i 's/^root_helper.*/root_helper = sudo quantum-rootwrap \/etc\/quantum\/rootwrap.conf/g' $f
|
||||
done
|
||||
|
||||
# Configure quantum-dhcp-agent state_path
|
||||
sed -i 's/state_path = \/opt\/stack\/data/state_path = \/var\/lib\/quantum/' %{buildroot}%{_sysconfdir}/quantum/dhcp_agent.ini
|
||||
|
||||
# Install logrotate
|
||||
install -p -D -m 644 %{SOURCE1} %{buildroot}%{_sysconfdir}/logrotate.d/openstack-quantum
|
||||
|
||||
# Install sudoers
|
||||
install -p -D -m 440 %{SOURCE2} %{buildroot}%{_sysconfdir}/sudoers.d/quantum
|
||||
|
||||
# Install sysv init scripts
|
||||
install -p -D -m 755 %{SOURCE10} %{buildroot}%{_initrddir}/%{daemon_prefix}-server
|
||||
install -p -D -m 755 %{SOURCE11} %{buildroot}%{_initrddir}/%{daemon_prefix}-linuxbridge-agent
|
||||
install -p -D -m 755 %{SOURCE12} %{buildroot}%{_initrddir}/%{daemon_prefix}-openvswitch-agent
|
||||
install -p -D -m 755 %{SOURCE13} %{buildroot}%{_initrddir}/%{daemon_prefix}-ryu-agent
|
||||
install -p -D -m 755 %{SOURCE14} %{buildroot}%{_initrddir}/%{daemon_prefix}-nec-agent
|
||||
install -p -D -m 755 %{SOURCE15} %{buildroot}%{_initrddir}/%{daemon_prefix}-dhcp-agent
|
||||
install -p -D -m 755 %{SOURCE16} %{buildroot}%{_initrddir}/%{daemon_prefix}-l3-agent
|
||||
install -p -D -m 755 %{SOURCE17} %{buildroot}%{_initrddir}/%{daemon_prefix}-ovs-cleanup
|
||||
install -p -D -m 755 %{SOURCE18} %{buildroot}%{_initrddir}/%{daemon_prefix}-hyperv-agent
|
||||
install -p -D -m 755 %{SOURCE19} %{buildroot}%{_initrddir}/%{daemon_prefix}-rpc-zmq-receiver
|
||||
|
||||
# Setup directories
|
||||
install -d -m 755 %{buildroot}%{_datadir}/quantum
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/quantum
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/log/quantum
|
||||
install -d -m 755 %{buildroot}%{_localstatedir}/run/quantum
|
||||
|
||||
# Install version info file
|
||||
cat > %{buildroot}%{_sysconfdir}/quantum/release <<EOF
|
||||
[Quantum]
|
||||
vendor = OpenStack LLC
|
||||
product = OpenStack Quantum
|
||||
package = %{release}
|
||||
EOF
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
|
||||
%pre
|
||||
getent group quantum >/dev/null || groupadd -r quantum
|
||||
getent passwd quantum >/dev/null || \
|
||||
useradd -r -g quantum -d %{_sharedstatedir}/quantum -s /sbin/nologin \
|
||||
-c "OpenStack Quantum Daemons" quantum
|
||||
exit 0
|
||||
|
||||
# Do not autostart daemons in %post since they are not configured yet
|
||||
#end raw
|
||||
|
||||
#set $daemon_map = {"": ["server", "dhcp-agent", "l3-agent"], "linuxbridge": ["linuxbridge-agent"], "openvswitch": ["openvswitch-agent", "ovs-cleanup"], "ryu": ["ryu-agent"], "nec": ["nec-agent"]}
|
||||
#for $key, $value in $daemon_map.iteritems()
|
||||
#set $daemon_list = " ".join($value) if $value else $key
|
||||
%preun $key
|
||||
if [ \$1 -eq 0 ] ; then
|
||||
for svc in $daemon_list; do
|
||||
/sbin/service %{daemon_prefix}-\${svc} stop &>/dev/null
|
||||
/sbin/chkconfig --del %{daemon_prefix}-\${svc}
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
%postun $key
|
||||
if [ \$1 -ge 1 ] ; then
|
||||
# Package upgrade, not uninstall
|
||||
for svc in $daemon_list; do
|
||||
/sbin/service %{daemon_prefix}-\${svc} condrestart &>/dev/null
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
#end for
|
||||
#raw
|
||||
|
||||
%files
|
||||
%doc LICENSE
|
||||
%doc README
|
||||
%{_bindir}/quantum-db-manage
|
||||
%{_bindir}/quantum-debug
|
||||
%{_bindir}/quantum-dhcp-agent
|
||||
%{_bindir}/quantum-dhcp-agent-dnsmasq-lease-update
|
||||
%{_bindir}/quantum-l3-agent
|
||||
%{_bindir}/quantum-lbaas-agent
|
||||
%{_bindir}/quantum-metadata-agent
|
||||
%{_bindir}/quantum-netns-cleanup
|
||||
%{_bindir}/quantum-ns-metadata-proxy
|
||||
%{_bindir}/quantum-rootwrap
|
||||
%{_bindir}/quantum-rpc-zmq-receiver
|
||||
%{_bindir}/quantum-server
|
||||
%{_bindir}/quantum-usage-audit
|
||||
%{_initrddir}/%{daemon_prefix}-server
|
||||
%{_initrddir}/%{daemon_prefix}-dhcp-agent
|
||||
%{_initrddir}/%{daemon_prefix}-l3-agent
|
||||
%{_initrddir}/%{daemon_prefix}-rpc-zmq-receiver
|
||||
%dir %{_sysconfdir}/quantum
|
||||
%{_sysconfdir}/quantum/release
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/api-paste.ini
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/dhcp_agent.ini
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/l3_agent.ini
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/metadata_agent.ini
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/lbaas_agent.ini
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/policy.json
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/quantum.conf
|
||||
%config(noreplace) %{_sysconfdir}/quantum/rootwrap.conf
|
||||
%dir %{_sysconfdir}/quantum/plugins
|
||||
%config(noreplace) %{_sysconfdir}/logrotate.d/*
|
||||
%config(noreplace) %{_sysconfdir}/sudoers.d/quantum
|
||||
%dir %attr(0755, quantum, quantum) %{_sharedstatedir}/quantum
|
||||
%dir %attr(0755, quantum, quantum) %{_localstatedir}/log/quantum
|
||||
%dir %attr(0755, quantum, quantum) %{_localstatedir}/run/quantum
|
||||
%dir %{_datarootdir}/quantum
|
||||
%dir %{_datarootdir}/quantum/rootwrap
|
||||
%{_datarootdir}/quantum/rootwrap/dhcp.filters
|
||||
%{_datarootdir}/quantum/rootwrap/iptables-firewall.filters
|
||||
%{_datarootdir}/quantum/rootwrap/l3.filters
|
||||
%{_datarootdir}/quantum/rootwrap/lbaas-haproxy.filters
|
||||
|
||||
|
||||
%files -n python-quantum
|
||||
%doc LICENSE
|
||||
%doc README
|
||||
%{python_sitelib}/quantum
|
||||
%exclude %{python_sitelib}/quantum/plugins/cisco/extensions/_credential_view.py*
|
||||
%exclude %{python_sitelib}/quantum/plugins/cisco/extensions/credential.py*
|
||||
%exclude %{python_sitelib}/quantum/plugins/cisco/extensions/qos.py*
|
||||
%exclude %{python_sitelib}/quantum/plugins/cisco/extensions/_qos_view.py*
|
||||
%exclude %{python_sitelib}/quantum/plugins/bigswitch
|
||||
%exclude %{python_sitelib}/quantum/plugins/brocade
|
||||
%exclude %{python_sitelib}/quantum/plugins/cisco
|
||||
%exclude %{python_sitelib}/quantum/plugins/hyperv
|
||||
%exclude %{python_sitelib}/quantum/plugins/linuxbridge
|
||||
%exclude %{python_sitelib}/quantum/plugins/metaplugin
|
||||
%exclude %{python_sitelib}/quantum/plugins/midonet
|
||||
%exclude %{python_sitelib}/quantum/plugins/nec
|
||||
%exclude %{python_sitelib}/quantum/plugins/nicira
|
||||
%exclude %{python_sitelib}/quantum/plugins/openvswitch
|
||||
%exclude %{python_sitelib}/quantum/plugins/plumgrid
|
||||
%exclude %{python_sitelib}/quantum/plugins/ryu
|
||||
%{python_sitelib}/quantum-%%{version}-*.egg-info
|
||||
|
||||
|
||||
%files -n openstack-quantum-bigswitch
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/bigswitch/README
|
||||
%{python_sitelib}/quantum/plugins/bigswitch
|
||||
%dir %{_sysconfdir}/quantum/plugins/bigswitch
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/bigswitch/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-brocade
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/brocade/README.md
|
||||
%{python_sitelib}/quantum/plugins/brocade
|
||||
%dir %{_sysconfdir}/quantum/plugins/brocade
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/brocade/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-cisco
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/cisco/README
|
||||
%{python_sitelib}/quantum/plugins/cisco/extensions/_credential_view.py*
|
||||
%{python_sitelib}/quantum/plugins/cisco/extensions/credential.py*
|
||||
%{python_sitelib}/quantum/plugins/cisco/extensions/qos.py*
|
||||
%{python_sitelib}/quantum/plugins/cisco/extensions/_qos_view.py*
|
||||
%{python_sitelib}/quantum/plugins/cisco
|
||||
%dir %{_sysconfdir}/quantum/plugins/cisco
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/cisco/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-hyperv
|
||||
%doc LICENSE
|
||||
#%%doc quantum/plugins/hyperv/README
|
||||
%{_bindir}/quantum-hyperv-agent
|
||||
%{_initrddir}/%{daemon_prefix}-hyperv-agent
|
||||
%{python_sitelib}/quantum/plugins/hyperv
|
||||
%dir %{_sysconfdir}/quantum/plugins/hyperv
|
||||
%exclude %{python_sitelib}/quantum/plugins/hyperv/agent
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/hyperv/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-linuxbridge
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/linuxbridge/README
|
||||
%{_bindir}/quantum-linuxbridge-agent
|
||||
%{_initrddir}/%{daemon_prefix}-linuxbridge-agent
|
||||
%{python_sitelib}/quantum/plugins/linuxbridge
|
||||
%{_datarootdir}/quantum/rootwrap/linuxbridge-plugin.filters
|
||||
%dir %{_sysconfdir}/quantum/plugins/linuxbridge
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/linuxbridge/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-midonet
|
||||
%doc LICENSE
|
||||
#%%doc quantum/plugins/midonet/README
|
||||
%{python_sitelib}/quantum/plugins/midonet
|
||||
%dir %{_sysconfdir}/quantum/plugins/midonet
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/midonet/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-nicira
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/nicira/nicira_nvp_plugin/README
|
||||
%{_bindir}/quantum-check-nvp-config
|
||||
%{python_sitelib}/quantum/plugins/nicira
|
||||
%dir %{_sysconfdir}/quantum/plugins/nicira
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/nicira/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-openvswitch
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/openvswitch/README
|
||||
%{_bindir}/quantum-openvswitch-agent
|
||||
%{_bindir}/quantum-ovs-cleanup
|
||||
%{_initrddir}/%{daemon_prefix}-openvswitch-agent
|
||||
%{_initrddir}/%{daemon_prefix}-ovs-cleanup
|
||||
%{python_sitelib}/quantum/plugins/openvswitch
|
||||
%{_datarootdir}/quantum/rootwrap/openvswitch-plugin.filters
|
||||
%dir %{_sysconfdir}/quantum/plugins/openvswitch
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/openvswitch/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-plumgrid
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/plumgrid/README
|
||||
%{python_sitelib}/quantum/plugins/plumgrid
|
||||
%dir %{_sysconfdir}/quantum/plugins/plumgrid
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/plumgrid/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-ryu
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/ryu/README
|
||||
%{_bindir}/quantum-ryu-agent
|
||||
%{_initrddir}/%{daemon_prefix}-ryu-agent
|
||||
%{python_sitelib}/quantum/plugins/ryu
|
||||
%{_datarootdir}/quantum/rootwrap/ryu-plugin.filters
|
||||
%dir %{_sysconfdir}/quantum/plugins/ryu
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/ryu/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-nec
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/nec/README
|
||||
%{_bindir}/quantum-nec-agent
|
||||
%{_initrddir}/%{daemon_prefix}-nec-agent
|
||||
%{python_sitelib}/quantum/plugins/nec
|
||||
%{_datarootdir}/quantum/rootwrap/nec-plugin.filters
|
||||
%dir %{_sysconfdir}/quantum/plugins/nec
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/nec/*.ini
|
||||
|
||||
|
||||
%files -n openstack-quantum-metaplugin
|
||||
%doc LICENSE
|
||||
%doc quantum/plugins/metaplugin/README
|
||||
%{python_sitelib}/quantum/plugins/metaplugin
|
||||
%dir %{_sysconfdir}/quantum/plugins/metaplugin
|
||||
%config(noreplace) %attr(0640, root, quantum) %{_sysconfdir}/quantum/plugins/metaplugin/*.ini
|
||||
|
||||
%changelog
|
||||
#end raw
|
99
conf/templates/packaging/specs/python-commonclient.spec
Normal file
99
conf/templates/packaging/specs/python-commonclient.spec
Normal file
@ -0,0 +1,99 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Alessio Ababilov <aababilov@griddynamics.com>
|
||||
#*
|
||||
version - version for RPM
|
||||
epoch - epoch for RPM
|
||||
clientname - keystone, nova, etc. (lowercase)
|
||||
apiname - Identity, Compute, etc. (first uppercase)
|
||||
requires - list of requirements for python-* package
|
||||
*#
|
||||
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
|
||||
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
|
||||
%endif
|
||||
|
||||
Name: python-${clientname}client
|
||||
Summary: OpenStack ${clientname.title()} Client
|
||||
Version: $version
|
||||
Release: 1%{?dist}
|
||||
Epoch: $epoch
|
||||
|
||||
Group: Development/Languages
|
||||
License: Apache 2.0
|
||||
Vendor: OpenStack Foundation
|
||||
URL: http://www.openstack.org
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: python-setuptools
|
||||
|
||||
%if 0%{?enable_doc}
|
||||
BuildRequires: python-sphinx
|
||||
BuildRequires: make
|
||||
%endif
|
||||
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
%description
|
||||
This is a client for the OpenStack $apiname API. There is a Python API (the
|
||||
${clientname}client module), and a command-line script (${clientname}).
|
||||
|
||||
#raw
|
||||
%if 0%{?enable_doc}
|
||||
%package doc
|
||||
Summary: Documentation for %{name}
|
||||
Group: Documentation
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
|
||||
|
||||
%description doc
|
||||
Documentation for %{name}.
|
||||
%endif
|
||||
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
if [ ! -f HACKING* ]; then
|
||||
touch HACKING
|
||||
fi
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
|
||||
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
# keystoneclient writes a strange catalog
|
||||
rm -rf %{buildroot}/%{_usr}/*client
|
||||
|
||||
%if 0%{?enable_doc}
|
||||
make -C docs html PYTHONPATH=%{buildroot}%{python_sitelib}
|
||||
%endif
|
||||
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc README* LICENSE* HACKING*
|
||||
%{python_sitelib}/*
|
||||
%{_bindir}/*
|
||||
|
||||
|
||||
%if 0%{?enable_doc}
|
||||
%files doc
|
||||
%defattr(-,root,root,-)
|
||||
%doc docs/_build/html
|
||||
%endif
|
||||
|
||||
|
||||
%changelog
|
||||
#end raw
|
204
conf/templates/packaging/specs/python-django-horizon.spec
Normal file
204
conf/templates/packaging/specs/python-django-horizon.spec
Normal file
@ -0,0 +1,204 @@
|
||||
#encoding UTF-8
|
||||
# Based on spec by:
|
||||
# * Matthias Runge <mrunge@redhat.com>
|
||||
# * Pádraig Brady <P@draigBrady.com>
|
||||
# * Alan Pevec <apevec@redhat.com>
|
||||
# * Cole Robinson <crobinso@redhat.com>
|
||||
|
||||
Name: python-django-horizon
|
||||
Version: ${version}
|
||||
Epoch: ${epoch}
|
||||
Release: 1%{?dist}
|
||||
Summary: Django application for talking to Openstack
|
||||
|
||||
Group: Development/Libraries
|
||||
# Code in horizon/horizon/utils taken from django which is BSD
|
||||
License: ASL 2.0 and BSD
|
||||
URL: http://horizon.openstack.org/
|
||||
BuildArch: noarch
|
||||
Source0: horizon-%{version}.tar.gz
|
||||
Source1: openstack-dashboard.conf
|
||||
Source2: openstack-dashboard-httpd-2.4.conf
|
||||
|
||||
# additional provides to be consistent with other django packages
|
||||
Provides: django-horizon = %{epoch}:%{version}-%{release}
|
||||
|
||||
BuildRequires: python-devel
|
||||
BuildRequires: python-setuptools
|
||||
|
||||
#for $i in $requires
|
||||
Requires: ${i}
|
||||
#end for
|
||||
|
||||
%description
|
||||
Horizon is a Django application for providing Openstack UI components.
|
||||
It allows performing site administrator (viewing account resource usage,
|
||||
configuring users, accounts, quotas, flavors, etc.) and end user
|
||||
operations (start/stop/delete instances, create/restore snapshots, view
|
||||
instance VNC console, etc.)
|
||||
|
||||
|
||||
%package -n openstack-dashboard
|
||||
Summary: Openstack web user interface reference implementation
|
||||
Group: Applications/System
|
||||
|
||||
Requires: httpd
|
||||
Requires: mod_wsgi
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
|
||||
BuildRequires: python-devel
|
||||
|
||||
%description -n openstack-dashboard
|
||||
Openstack Dashboard is a web user interface for Openstack. The package
|
||||
provides a reference implementation using the Django Horizon project,
|
||||
mostly consisting of JavaScript and CSS to tie it altogether as a standalone
|
||||
site.
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%package doc
|
||||
Summary: Documentation for Django Horizon
|
||||
Group: Documentation
|
||||
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
|
||||
BuildRequires: python-sphinx >= 1.1.3
|
||||
# Doc building basically means we have to mirror Requires:
|
||||
BuildRequires: python-dateutil
|
||||
BuildRequires: python-glanceclient
|
||||
BuildRequires: python-keystoneclient
|
||||
BuildRequires: python-novaclient
|
||||
BuildRequires: python-quantumclient
|
||||
BuildRequires: python-cinderclient
|
||||
BuildRequires: python-swiftclient
|
||||
|
||||
%description doc
|
||||
Documentation for the Django Horizon application for talking with Openstack
|
||||
%endif
|
||||
|
||||
#raw
|
||||
%prep
|
||||
%setup -q -n horizon-%{version}
|
||||
|
||||
# Don't access the net while building docs
|
||||
sed -i '/sphinx.ext.intersphinx/d' doc/source/conf.py
|
||||
|
||||
sed -i -e 's@^BIN_DIR.*$@BIN_DIR = "/usr/bin"@' \
|
||||
-e 's@^less_binary.*$@less_binary = "/usr/lib/node_modules/less/bin/lessc"@' \
|
||||
-e 's@^LOGIN_URL.*$@LOGIN_URL = "/dashboard/auth/login/"@' \
|
||||
-e 's@^LOGOUT_URL.*$@LOGOUT_URL = "/dashboard/auth/logout/"@' \
|
||||
-e 's@^LOGIN_REDIRECT_URL.*$@LOGIN_REDIRECT_URL = "/dashboard"@' \
|
||||
-e 's@^DEBUG.*$@DEBUG = False@' \
|
||||
openstack_dashboard/settings.py
|
||||
|
||||
# remove unnecessary .po files
|
||||
find . -name "django*.po" -exec rm -f '{}' \;
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
|
||||
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
|
||||
# drop httpd-conf snippet
|
||||
%if 0%{?rhel} || 0%{?fedora} <18
|
||||
install -m 0644 -D -p %{SOURCE1} %{buildroot}%{_sysconfdir}/httpd/conf.d/openstack-dashboard.conf
|
||||
%else
|
||||
# httpd-2.4 changed the syntax
|
||||
install -m 0644 -D -p %{SOURCE2} %{buildroot}%{_sysconfdir}/httpd/conf.d/openstack-dashboard.conf
|
||||
%endif
|
||||
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
%if 0%{?with_doc}
|
||||
%if 0%{?rhel}==6
|
||||
sphinx-1.0-build -b html doc/source html
|
||||
%else
|
||||
sphinx-build -b html doc/source html
|
||||
%endif
|
||||
%endif
|
||||
|
||||
# Fix hidden-file-or-dir warnings
|
||||
rm -fr html/.doctrees html/.buildinfo
|
||||
|
||||
install -d -m 755 %{buildroot}%{_datadir}/openstack-dashboard
|
||||
install -d -m 755 %{buildroot}%{_sharedstatedir}/openstack-dashboard
|
||||
install -d -m 755 %{buildroot}%{_sysconfdir}/openstack-dashboard
|
||||
|
||||
# Copy everything to /usr/share
|
||||
mv %{buildroot}%{python_sitelib}/openstack_dashboard \
|
||||
%{buildroot}%{_datadir}/openstack-dashboard
|
||||
mv manage.py %{buildroot}%{_datadir}/openstack-dashboard
|
||||
rm -rf %{buildroot}%{python_sitelib}/openstack_dashboard
|
||||
|
||||
|
||||
# Move config to /etc, symlink it back to /usr/share
|
||||
mv %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py.example %{buildroot}%{_sysconfdir}/openstack-dashboard/local_settings
|
||||
ln -s %{_sysconfdir}/openstack-dashboard/local_settings %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py
|
||||
|
||||
%if 0%{?rhel} > 6 || 0%{?fedora} >= 16
|
||||
%find_lang django
|
||||
%find_lang djangojs
|
||||
%else
|
||||
# Handling locale files
|
||||
# This is adapted from the %%find_lang macro, which cannot be directly
|
||||
# used since Django locale files are not located in %%{_datadir}
|
||||
#
|
||||
# The rest of the packaging guideline still apply -- do not list
|
||||
# locale files by hand!
|
||||
(cd $RPM_BUILD_ROOT && find . -name 'django*.mo') | %{__sed} -e 's|^.||' |
|
||||
%{__sed} -e \
|
||||
's:\(.*/locale/\)\([^/_]\+\)\(.*\.mo$\):%lang(\2) \1\2\3:' \
|
||||
>> django.lang
|
||||
%endif
|
||||
|
||||
grep "\/usr\/share\/openstack-dashboard" django.lang > dashboard.lang
|
||||
grep "\/site-packages\/horizon" django.lang > horizon.lang
|
||||
|
||||
%if 0%{?rhel} > 6 || 0%{?fedora} >= 16
|
||||
cat djangojs.lang >> horizon.lang
|
||||
%endif
|
||||
|
||||
# copy static files to %{_datadir}/openstack-dashboard/static
|
||||
mkdir -p %{buildroot}%{_datadir}/openstack-dashboard/static
|
||||
cp -a openstack_dashboard/static/* %{buildroot}%{_datadir}/openstack-dashboard/static
|
||||
cp -a horizon/static/* %{buildroot}%{_datadir}/openstack-dashboard/static
|
||||
|
||||
# compress css, js etc.
|
||||
cd %{buildroot}%{_datadir}/openstack-dashboard
|
||||
# TODO(aababilov): compress them
|
||||
#%{__python} manage.py collectstatic --noinput --pythonpath=../../lib/python2.7/site-packages/
|
||||
#%{__python} manage.py compress --pythonpath=../../lib/python2.7/site-packages/
|
||||
|
||||
node_less_dir=%{buildroot}/usr/lib/node_modules/less
|
||||
mkdir -p "$node_less_dir"
|
||||
mv %{buildroot}%{python_sitelib}/bin/less "$node_less_dir/bin"
|
||||
mv %{buildroot}%{python_sitelib}/bin/lib "$node_less_dir/lib"
|
||||
rm -rf %{buildroot}%{python_sitelib}/bin/
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
|
||||
%files
|
||||
%doc LICENSE README.rst
|
||||
%{python_sitelib}/*
|
||||
/usr/lib/node_modules/less
|
||||
|
||||
|
||||
%files -n openstack-dashboard
|
||||
%{_datadir}/openstack-dashboard/
|
||||
|
||||
%{_sharedstatedir}/openstack-dashboard
|
||||
%dir %attr(0750, root, apache) %{_sysconfdir}/openstack-dashboard
|
||||
%config(noreplace) %{_sysconfdir}/httpd/conf.d/openstack-dashboard.conf
|
||||
%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/local_settings
|
||||
|
||||
|
||||
%if 0%{?with_doc}
|
||||
%files doc
|
||||
%doc html
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
#end raw
|
@ -8,7 +8,6 @@ conflicts python-nose1.1
|
||||
conflicts python-routes1.12
|
||||
conflicts python-sphinx10
|
||||
conflicts python-webob1.0
|
||||
conflicts Django14
|
||||
|
||||
## Package Requirements (Order matters!)
|
||||
require PyYAML
|
||||
@ -34,12 +33,13 @@ require python-pip
|
||||
require python-setuptools
|
||||
|
||||
# Build dependencies
|
||||
require sqlite-devel
|
||||
require libxml2-devel
|
||||
require libxslt-devel
|
||||
require mysql-devel
|
||||
require postgresql-devel
|
||||
require openldap-devel
|
||||
require libxml2-devel
|
||||
require libxslt-devel
|
||||
require sqlite-devel
|
||||
require dos2unix
|
||||
|
||||
# This packages can be built from archives
|
||||
require python-cheetah Cheetah
|
||||
|
Loading…
Reference in New Issue
Block a user