Merge pull request #111 from harlowja/master

Getting rhel6 back in shape.
This commit is contained in:
Joshua Harlow 2012-03-16 19:29:24 -07:00
commit b5c455b48a
24 changed files with 1603 additions and 1473 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,7 @@
---
description: Devstack.sh matching component installation.
supports:
- rhel-6
- ubuntu-oneiric
- fedora-16
options:
glance:
- load-images
components:
# Order matters here!
- general
- db
- rabbit-mq
- keystone-client
@ -18,16 +12,24 @@ components:
- quantum-client
- nova-client
- horizon
description: Devstack.sh matching component installation.
options: null
subsystems:
glance:
- api
- reg
nova:
- api
- cauth
- cert
- cpu
- net
- sched
- vol
- xvnc
glance:
- api
- reg
nova:
- api
- cauth
- cert
- cpu
- net
- sched
- vol
- xvnc
supports:
- rhel-6
- ubuntu-oneiric
- fedora-16
...

View File

@ -1,4 +1,4 @@
# From devstack commit f056b7d2d098361908df490e6683d26065a7cd3a with modifications to parametrize
# From devstack commit 77b0e1d8ff9617dc71cf92a7a9d7fb850e2e5998 with modifications to parametrize
# certain variables (ports mainly).
[DEFAULT]
@ -75,10 +75,10 @@ paste.app_factory = keystone.service:public_app_factory
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension public_service
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
[pipeline:admin_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory

View File

@ -1,26 +1,26 @@
#!/bin/bash
# From devstack.sh commit edf59ca44331106ba895eee78ae1d8602764eb4c
# From devstack.sh commit 77b0e1d8ff9617dc71cf92a7a9d7fb850e2e5998
#
# Initial data for Keystone using python-keystoneclient
#
# Tenant User Roles
# -------------------------------------------------------
# ------------------------------------------------------------------
# admin admin admin
# service glance admin
# service nova admin
# service nova admin, [ResellerAdmin (swift only)]
# service quantum admin # if enabled
# service swift admin # if enabled
# demo admin admin
# demo demo Member,sysadmin,netadmin
# demo demo Member, anotherrole
# invisible_to_admin demo Member
#
# Variables set before calling this script:
# SERVICE_TOKEN - aka admin_token in keystone.conf
# SERVICE_ENDPOINT - local Keystone admin endpoint
# SERVICE_TENANT_NAME - name of tenant containing service accounts
# ENABLED_SERVICES - stack.sh's list of services to start
# ENABLED_SERVICES - stack's list of services to start
set -e
@ -64,15 +64,15 @@ DEMO_USER=$(get_id keystone user-create --name=demo \
ADMIN_ROLE=$(get_id keystone role-create --name=admin)
KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
SYSADMIN_ROLE=$(get_id keystone role-create --name=sysadmin)
NETADMIN_ROLE=$(get_id keystone role-create --name=netadmin)
# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole)
# Add Roles to Users in Tenants
keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT
keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT
keystone user-role-add --user $DEMO_USER --role $SYSADMIN_ROLE --tenant_id $DEMO_TENANT
keystone user-role-add --user $DEMO_USER --role $NETADMIN_ROLE --tenant_id $DEMO_TENANT
keystone user-role-add --user $DEMO_USER --role $ANOTHER_ROLE --tenant_id $DEMO_TENANT
# TODO(termie): these two might be dubious
keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT
@ -110,9 +110,18 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
keystone user-role-add --tenant_id $SERVICE_TENANT \
--user $SWIFT_USER \
--role $ADMIN_ROLE
# Nova needs ResellerAdmin role to download images when accessing
# swift through the s3 api. The admin role in swift allows a user
# to act as an admin for their tenant, but ResellerAdmin is needed
# for a user to act as any tenant. The name of this role is also
# configurable in swift-proxy.conf
RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
keystone user-role-add --tenant_id $SERVICE_TENANT \
--user $NOVA_USER \
--role $RESELLER_ROLE
fi
if [[ "$ENABLED_SERVICES" =~ "quantum-server" ]]; then
if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
QUANTUM_USER=$(get_id keystone user-create --name=quantum \
--pass="$SERVICE_PASSWORD" \
--tenant_id $SERVICE_TENANT \

View File

@ -377,8 +377,9 @@ class PkgUninstallComponent(ComponentBase):
def _uninstall_pkgs(self):
pkgsfull = self.tracereader.packages_installed()
if pkgsfull:
LOG.info("Potentially removing %s packages",
len(pkgsfull))
pkg_names = set([p['name'] for p in pkgsfull])
LOG.info("Potentially removing %s packages (%s)",
len(pkg_names), ", ".join(pkg_names))
which_removed = self.packager.remove_batch(pkgsfull)
LOG.info("Actually removed %s packages (%s)",
len(which_removed), ", ".join(which_removed))

View File

@ -187,6 +187,7 @@ class GlanceRuntime(comp.PythonRuntime):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.options = kargs.get('options', set())
def known_subsystems(self):
return SUB_TO_APP.keys()
@ -205,8 +206,11 @@ class GlanceRuntime(comp.PythonRuntime):
def post_start(self):
comp.PythonRuntime.post_start(self)
# Install any images that need activating...
# TODO: make this less cheesy - need to wait till glance goes online
LOG.info("Waiting %s seconds so that glance can start up before image install." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
creator.ImageCreationService(self.cfg, self.pw_gen).install()
if 'no-load-images' in self.options:
pass
else:
# Install any images that need activating...
# TODO: make this less cheesy - need to wait till glance goes online
LOG.info("Waiting %s seconds so that glance can start up before image install." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
creator.ImageCreationService(self.cfg, self.pw_gen).install()

View File

@ -1,3 +1,20 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devstack import component

35
devstack/decorators.py Normal file
View File

@ -0,0 +1,35 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import pprint
from devstack import log as logging
# Very useful example ones...
# See: http://wiki.python.org/moin/PythonDecoratorLibrary
LOG = logging.getLogger("devstack.decorators")
def log_debug(f):
@functools.wraps(f)
def wrapper(*args, **kargs):
LOG.debug('%s(%s, %s) ->', f.func_name, str(args), str(kargs))
rv = f(*args, **kargs)
LOG.debug("<- %s" % (pprint.pformat(rv, indent=2)))
return rv
return wrapper

View File

@ -21,6 +21,7 @@ import re
import yaml
from devstack import decorators
from devstack import importer
from devstack import log as logging
from devstack import settings
@ -75,7 +76,7 @@ class Distro(object):
'No platform configuration data for %s (%s)' %
(plt, distname))
@logging.log_debug
@decorators.log_debug
def __init__(self, name, distro_pattern, packager_name, commands, components):
self.name = name
self._distro_pattern = re.compile(distro_pattern, re.IGNORECASE)

View File

@ -46,33 +46,33 @@ class DBInstaller(db.DBInstaller):
sh.write_file('/etc/mysql/my.cnf', fc)
class OneiricAptPackager(apt.AptPackager):
class AptPackager(apt.AptPackager):
def _pkg_remove_special(self, name, pkginfo):
def _remove_special(self, name, info):
if name == 'rabbitmq-server':
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.info("Handling special remove of %s." % (name))
pkg_full = self._format_pkg(name, pkginfo.get("version"))
cmd = apt.APT_GET + apt.APT_REMOVE + [pkg_full]
pkg_full = self._format_pkg_name(name, info.get("version"))
cmd = apt.APT_REMOVE + [pkg_full]
self._execute_apt(cmd)
#probably useful to do this
time.sleep(1)
#purge
cmd = apt.APT_GET + apt.APT_PURGE + [pkg_full]
cmd = apt.APT_PURGE + [pkg_full]
self._execute_apt(cmd)
return True
return False
def _pkg_install_special(self, name, pkginfo):
def _install_special(self, name, info):
if name == 'rabbitmq-server':
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.info("Handling special install of %s." % (name))
#this seems to be a temporary fix for that bug
with tempfile.TemporaryFile() as f:
pkg_full = self._format_pkg(name, pkginfo.get("version"))
cmd = apt.APT_GET + apt.APT_INSTALL + [pkg_full]
pkg_full = self._format_pkg_name(name, info.get("version"))
cmd = apt.APT_INSTALL + [pkg_full]
self._execute_apt(cmd, stdout_fh=f, stderr_fh=f)
return True
return False

View File

@ -25,11 +25,25 @@ from devstack import utils
from devstack.components import db
from devstack.components import horizon
from devstack.packaging import yum
LOG = logging.getLogger(__name__)
SOCKET_CONF = "/etc/httpd/conf.d/wsgi-socket-prefix.conf"
HTTPD_CONF = '/etc/httpd/conf/httpd.conf'
# Need to relink for rhel (not a bug!)
RHEL_RELINKS = {
'python-webob1.0': (
'/usr/lib/python2.6/site-packages/WebOb-1.0.8-py2.6.egg/webob/',
'/usr/lib/python2.6/site-packages/webob'
),
'python-nose1.1': (
'/usr/lib/python2.6/site-packages/nose-1.1.2-py2.6.egg/nose/',
'/usr/lib/python2.6/site-packages/nose',
)
}
class DBInstaller(db.DBInstaller):
@ -47,12 +61,12 @@ class DBInstaller(db.DBInstaller):
sh.write_file('/etc/my.cnf', fc)
class Rhel6HorizonInstaller(horizon.HorizonInstaller):
class HorizonInstaller(horizon.HorizonInstaller):
def _config_fixups(self):
(user, group) = self._get_apache_user_group()
# This is recorded so it gets cleaned up during uninstall
self.tracewriter.file_touched(SOCKET_CONF)
# Not recorded since we aren't really creating this
LOG.info("Fixing up %s and %s files" % (SOCKET_CONF, HTTPD_CONF))
with sh.Rooted(True):
# Fix the socket prefix to someplace we can use
@ -67,3 +81,30 @@ class Rhel6HorizonInstaller(horizon.HorizonInstaller):
line = "Group %s" % (group)
new_lines.append(line)
sh.write_file(HTTPD_CONF, utils.joinlinesep(*new_lines))
class YumPackager(yum.YumPackager):
def _remove_special(self, name, info):
if name in RHEL_RELINKS:
# Note: we don't return true here so that
# the normal package cleanup happens...
(_, tgt) = RHEL_RELINKS.get(name)
if sh.islink(tgt):
sh.unlink(tgt)
return False
def _install_special(self, name, info):
if name in RHEL_RELINKS:
full_pkg_name = self._format_pkg_name(name, info.get("version"))
install_cmd = yum.YUM_INSTALL + [full_pkg_name]
self._execute_yum(install_cmd)
(src, tgt) = RHEL_RELINKS.get(name)
if not sh.islink(tgt):
# This is actually a feature, EPEL must not conflict with RHEL, so X pkg installs newer version in parallel.
#
# This of course doesn't work when running from git like devstack does....
sh.symlink(src, tgt)
return True
else:
return False

View File

@ -17,9 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import pprint
from logging.handlers import SysLogHandler
from logging.handlers import WatchedFileHandler
@ -76,14 +74,3 @@ class AuditAdapter(logging.LoggerAdapter):
def getLogger(name='devstack'):
return AuditAdapter(logging.getLogger(name))
def log_debug(f):
@functools.wraps(f)
def wrapper(*args, **kargs):
logger = getLogger()
logger.debug('%s(%s, %s) ->', f.func_name, str(args), str(kargs))
rv = f(*args, **kargs)
logger.debug("<- %s" % (pprint.pformat(rv, indent=2)))
return rv
return wrapper

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from devstack import decorators
from devstack import log as logging
from devstack import utils
@ -22,7 +23,7 @@ LOG = logging.getLogger("devstack.packager")
class Packager(object):
@logging.log_debug
@decorators.log_debug
def __init__(self, distro, keep_packages):
self.distro = distro
self.keep_packages = keep_packages

View File

@ -44,15 +44,15 @@ class AptPackager(pack.Packager):
pack.Packager.__init__(self, distro, keep_packages)
self.auto_remove = True
def _format_pkg(self, name, version):
def _format_pkg_name(self, name, version):
if version:
pkg_full_name = VERSION_TEMPL % (name, version)
return VERSION_TEMPL % (name, version)
else:
pkg_full_name = name
return pkg_full_name
return name
def _execute_apt(self, cmd, **kargs):
return sh.execute(*cmd, run_as_root=True,
full_cmd = APT_GET + cmd
return sh.execute(*full_cmd, run_as_root=True,
check_exit_code=True,
env_overrides=ENV_ADDITIONS,
**kargs)
@ -65,32 +65,33 @@ class AptPackager(pack.Packager):
removable = info.get('removable', True)
if not removable:
continue
if self._pkg_remove_special(name, info):
if self._remove_special(name, info):
which_removed.append(name)
continue
pkg_full = self._format_pkg(name, info.get("version"))
pkg_full = self._format_pkg_name(name, info.get("version"))
if pkg_full:
cmds.append(pkg_full)
which_removed.append(name)
if cmds:
cmd = APT_GET + APT_DO_REMOVE + cmds
cmd = APT_DO_REMOVE + cmds
self._execute_apt(cmd)
if which_removed and self.auto_remove:
cmd = APT_GET + APT_AUTOREMOVE
cmd = APT_AUTOREMOVE
self._execute_apt(cmd)
return which_removed
def install(self, pkg):
name = pkg['name']
if self._pkg_install_special(name, pkg):
if self._install_special(name, pkg):
return
else:
pkg_full = self._format_pkg(name, pkg.get("version"))
cmd = APT_GET + APT_INSTALL + [pkg_full]
self._execute_apt(cmd)
pkg_full = self._format_pkg_name(name, pkg.get("version"))
if pkg_full:
cmd = APT_INSTALL + [pkg_full]
self._execute_apt(cmd)
def _pkg_remove_special(self, name, info):
def _remove_special(self, name, info):
return False
def _pkg_install_special(self, name, info):
def _install_special(self, name, info):
return False

View File

@ -30,74 +30,36 @@ YUM_REMOVE = ['erase', '-y', "-t"]
# Yum separates its pkg names and versions with a dash
VERSION_TEMPL = "%s-%s"
# Need to relink for rhel (not a bug!)
# TODO: maybe this should be a subclass that handles these differences
RHEL_RELINKS = {
'python-webob1.0': {
"src": '/usr/lib/python2.6/site-packages/WebOb-1.0.8-py2.6.egg/webob/',
'tgt': '/usr/lib/python2.6/site-packages/webob',
},
'python-nose1.1': {
"src": '/usr/lib/python2.6/site-packages/nose-1.1.2-py2.6.egg/nose/',
'tgt': '/usr/lib/python2.6/site-packages/nose',
},
}
class YumPackager(pack.Packager):
def __init__(self, distro, keep_packages):
pack.Packager.__init__(self, distro, keep_packages)
def _format_pkg_name(self, name, version):
if version is not None and len(version):
if version:
return VERSION_TEMPL % (name, version)
else:
return name
def _execute_yum(self, cmd, **kargs):
return sh.execute(*cmd, run_as_root=True,
full_cmd = YUM_CMD + cmd
return sh.execute(*full_cmd, run_as_root=True,
check_exit_code=True,
**kargs)
def _remove_special(self, pkgname, pkginfo):
# TODO: maybe this should be a subclass that handles these differences
# if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# #we don't return true here so that
# #the normal package cleanup happens
# sh.unlink(RHEL_RELINKS.get(pkgname).get("tgt"))
def _remove_special(self, name, info):
return False
# TODO: maybe this should be a subclass that handles these differences
def _install_rhel_relinks(self, pkgname, pkginfo):
full_pkg_name = self._format_pkg_name(pkgname, pkginfo.get("version"))
install_cmd = YUM_CMD + YUM_INSTALL + [full_pkg_name]
self._execute_yum(install_cmd)
tgt = RHEL_RELINKS.get(pkgname).get("tgt")
src = RHEL_RELINKS.get(pkgname).get("src")
if not sh.islink(tgt):
# This is actually a feature, EPEL must not conflict with RHEL, so X pkg installs newer version in parallel.
#
# This of course doesn't work when running from git like devstack does....
sh.symlink(src, tgt)
return True
# TODO: maybe this should be a subclass that handles these differences
def _install_special(self, pkgname, pkginfo):
# FIXME:
# if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# return self._install_rhel_relinks(pkgname, pkginfo)
def _install_special(self, name, info):
return False
def install_batch(self, pkgs):
pkg_full_names = []
for info in pkgs:
name = info['name']
if self._install_special(name, info):
continue
full_pkg_name = self._format_pkg_name(name, info.get("version"))
pkg_full_names.append(full_pkg_name)
if pkg_full_names:
cmd = YUM_CMD + YUM_INSTALL + pkg_full_names
def install(self, pkg):
name = pkg['name']
if self._install_special(name, pkg):
return
else:
full_pkg_name = self._format_pkg_name(name, pkg.get("version"))
cmd = YUM_INSTALL + [full_pkg_name]
self._execute_yum(cmd)
def _remove_batch(self, pkgs):
@ -105,17 +67,16 @@ class YumPackager(pack.Packager):
which_removed = []
for info in pkgs:
name = info['name']
info = pkgs.get(name) or {}
removable = info.get('removable', True)
if not removable:
continue
if self._remove_special(name, info):
which_removed.append(name)
continue
full_pkg_name = self._format_pkg_name(name, info.get("version"))
pkg_full_names.append(full_pkg_name)
which_removed.append(name)
else:
full_pkg_name = self._format_pkg_name(name, info.get("version"))
pkg_full_names.append(full_pkg_name)
which_removed.append(name)
if pkg_full_names:
cmd = YUM_CMD + YUM_REMOVE + pkg_full_names
cmd = YUM_REMOVE + pkg_full_names
self._execute_yum(cmd)
return which_removed

View File

@ -16,6 +16,7 @@
import yaml
from devstack import decorators
from devstack import exceptions as excp
from devstack import log as logging
from devstack import shell as sh
@ -46,7 +47,7 @@ class Persona(object):
cls, cls_kvs, err)
return instance
@logging.log_debug
@decorators.log_debug
def __init__(self, description,
supports,
components,
@ -78,9 +79,9 @@ class Persona(object):
# Some sanity checks against the given distro
d_name = distro.name
if d_name not in self.distro_support:
msg = "Distro %s not supported" % (d_name)
msg = "Distro %r not supported" % (d_name)
raise excp.ConfigException(msg)
for c in self.wanted_components:
if not distro.known_component(c):
raise RuntimeError("Distro %s does not support component %s" %
raise RuntimeError("Distro %r does not support component %r" %
(d_name, c))

View File

@ -155,6 +155,7 @@ class ActionRunner(object):
def _construct_instances(self, persona, action, root_dir):
components = persona.wanted_components
desired_subsystems = persona.wanted_subsystems or dict()
component_opts = persona.component_options or dict()
instances = dict()
for c in components:
(cls, my_info) = self.distro.extract_component(c, action)
@ -167,6 +168,7 @@ class ActionRunner(object):
cls_kvs['name'] = c
cls_kvs['keep_old'] = self.keep_old
cls_kvs['desired_subsystems'] = set(desired_subsystems.get(c, list()))
cls_kvs['options'] = set(component_opts.get(c, list()))
# The above is not overrideable...
for (k, v) in my_info.items():
if k not in cls_kvs:

7
stack
View File

@ -136,6 +136,7 @@ def run(args):
if not persona_fn or not sh.isfile(persona_fn):
print(utils.color_text("No valid persona file name specified!", "red"))
return False
persona_fn = sh.abspth(persona_fn)
# Welcome!
(repeat_string, line_max_len) = utils.welcome(_WELCOME_MAP.get(action))
@ -166,9 +167,9 @@ def run(args):
pkg_manager,
**args)
LOG.info("Starting action [%s] on %s for distro: %s" % (action, date.rcf8222date(), dist.name))
LOG.info("Using persona: %s" % (persona_inst))
LOG.info("In root directory: %s" % (root_dir))
LOG.info("Starting action %r on %s for distro: %r" % (action, date.rcf8222date(), dist.name))
LOG.info("Using persona: %r" % (persona_fn))
LOG.info("In root directory: %r" % (root_dir))
start_time = time.time()
runner.run(persona_inst, root_dir)

View File

@ -1,28 +0,0 @@
import json
import os
import sys
def clean_file(name):
with open(name, "r") as f:
contents = f.read()
lines = contents.splitlines()
cleaned_up = list()
for line in lines:
if line.lstrip().startswith('#'):
continue
else:
cleaned_up.append(line)
cleaned_lines = os.linesep.join(cleaned_up)
data = json.loads(cleaned_lines)
output = json.dumps(data, indent=4, sort_keys=True)
print(output)
if __name__ == "__main__":
ME = os.path.basename(sys.argv[0])
if len(sys.argv) == 1:
print("%s filename filename filename..." % (ME))
sys.exit(0)
clean_file(sys.argv[1])
sys.exit(0)

View File

@ -1,31 +0,0 @@
#!/usr/bin/env python
import glob
import json
import os
import sys
def load_json(fn):
with open(fn, 'r') as f:
lines = f.readlines()
data = os.linesep.join(
l
for l in lines
if not l.lstrip().startswith('#')
)
return json.loads(data)
inputdir = sys.argv[1]
distro = sys.argv[2]
for input_file in glob.glob('%s/*.json' % inputdir):
data = load_json(input_file)
print
print ' - name: %s' % os.path.splitext(os.path.basename(input_file))[0]
print ' packages:'
for pkg, info in sorted(data.get(distro, {}).items()):
print ' - name: %s' % pkg
for n, v in sorted(info.items()):
print ' %s: %s' % (n, v)

View File

@ -1,126 +0,0 @@
"""
Searches the given path for JSON files, and validates their contents.
"""
import errno
import json
import logging
import optparse
import os
import re
# Configure logging
logging.basicConfig(format='%(levelname)s: %(message)s')
ROOT_LOGGER = logging.getLogger("")
ROOT_LOGGER.setLevel(logging.WARNING)
LOGGER = logging.getLogger(__name__)
# Configure commandlineability
parser = optparse.OptionParser()
parser.add_option('-p', type="string", default=os.getcwd(),
help='the path to search for JSON files', dest='path')
parser.add_option('-r', type="string", default='.json$',
help='the regular expression to match filenames against ' \
'(not absolute paths)', dest='regexp')
(args, _) = parser.parse_args()
def main():
files = find_matching_files(args.path, args.regexp)
results = True
print("Validating %s json files (found using regex [%s] in path [%s])" % (len(files), args.regexp, args.path))
for path in files:
pres = validate_json(path)
if not pres:
print("Failed at validating [%s]" % (path))
results = False
else:
print("Validated [%s]" % (path))
# Invert our test results to produce a status code
if results:
exit(0)
else:
exit(1)
def validate_json(path):
"""Open a file and validate it's contents as JSON"""
try:
LOGGER.info("Validating %s" % (path))
contents = read_file(path)
if contents is False:
logging.warning('Insufficient permissions to open: %s' % path)
return False
except:
LOGGER.warning('Unable to open: %s' % path)
return False
#knock off comments
ncontents = list()
for line in contents.splitlines():
tmp_line = line.strip()
if tmp_line.startswith("#"):
continue
else:
ncontents.append(line)
contents = os.linesep.join(ncontents)
try:
jdict = json.loads(contents)
if not type(jdict) is dict:
LOGGER.error('Root element in %s is not a dictionary!' % path)
return False
except Exception:
LOGGER.exception('Unable to parse: %s' % path)
return False
return True
def find_matching_files(path, pattern):
"""Search the given path for files matching the given pattern"""
regex = re.compile(pattern)
json_files = []
for root, dirs, files in os.walk(path):
for name in files:
if regex.search(name):
full_name = os.path.join(root, name)
json_files.append(full_name)
return json_files
def read_file(path):
"""Attempt to read a file safely
Returns the contents of the file as a string on success, False otherwise"""
try:
fp = open(path)
except IOError as e:
if e.errno == errno.EACCES:
# permission error
return False
raise
else:
with fp:
return fp.read()
def replace_file(path, new_contents):
"""Overwrite the file at the given path with the new contents
Returns True on success, False otherwise."""
try:
f = open(path, 'w')
f.write(new_contents)
f.close()
except:
LOGGER.error('Unable to write: %s' % f)
return False
return True
if __name__ == "__main__":
main()

View File

@ -7,6 +7,6 @@ import sys
import yaml
with open(sys.argv[1], 'r') as f:
yaml.load(f)
if __name__ == "__main__":
with open(sys.argv[1], 'r') as f:
yaml.load(f)

25
tools/yamlpretty.py Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/env python
import os
import sys
import yaml
# See: http://pyyaml.org/wiki/PyYAMLDocumentation
if __name__ == "__main__":
args = list(sys.argv)
args = args[1:]
for fn in args:
data = None
with open(fn, 'r') as fh:
data = yaml.load(fh.read())
fh.close()
formatted = yaml.dump(data,
line_break="\n",
indent=4,
explicit_start=True,
explicit_end=True,
default_flow_style=False,
)
print formatted