Import add'l files from Nova

Import lots of files from Nova at rev
f0da3464ab17552f24977d22365fa9573702f5a8
This commit is contained in:
Devananda van der Veen 2013-05-07 16:36:49 -07:00
parent 089e2ff25b
commit 05e9ce4c48
20 changed files with 4083 additions and 17 deletions

27
etc/ironic/rootwrap.conf Normal file
View File

@ -0,0 +1,27 @@
# Configuration for ironic-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/ironic/rootwrap.d,/usr/share/ironic/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, user0, user1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR

View File

@ -0,0 +1,10 @@
# ironic-rootwrap command filters for ironic-deploy-helper
# This file should be owned by (and only-writeable by) the root user
[Filters]
# ironic-deploy-helper
iscsiadm: CommandFilter, /sbin/iscsiadm, root
sfdisk: CommandFilter, /sbin/sfdisk, root
dd: CommandFilter, /bin/dd, root
mkswap: CommandFilter, /sbin/mkswap, root
blkid: CommandFilter, /sbin/blkid, root

View File

@ -0,0 +1,9 @@
# ironic-rootwrap command filters for manager nodes
# This file should be owned by (and only-writeable by) the root user
[Filters]
# ironic/manager/ipmi.py: 'ipmitool', ..
ipmitool: CommandFilter, /usr/bin/ipmitool, root
# ironic/manager/ipmi.py: 'kill', '-TERM', str(console_pid)
kill_shellinaboxd: KillFilter, root, /usr/local/bin/shellinaboxd, -15, -TERM

37
ironic/config.py Normal file
View File

@ -0,0 +1,37 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ironic.openstack.common.db.sqlalchemy import session as db_session
from ironic.openstack.common import rpc
from ironic import paths
from ironic import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
def parse_args(argv, default_config_files=None):
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='ironic.sqlite')
rpc.set_defaults(control_exchange='ironic')
cfg.CONF(argv[1:],
project='ironic',
version=version.version_string(),
default_config_files=default_config_files)

227
ironic/context.py Normal file
View File

@ -0,0 +1,227 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
import uuid
from ironic import exception
from ironic.openstack.common import local
from ironic.openstack.common import log as logging
from ironic.openstack.common import timeutils
from ironic import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, basestring):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
'instance_lock_checked': self.instance_lock_checked,
'tenant': self.tenant,
'user': self.user}
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Nova version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.NotAuthorized() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.NotAuthorized()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()

1217
ironic/exception.py Normal file

File diff suppressed because it is too large Load Diff

62
ironic/netconf.py Normal file
View File

@ -0,0 +1,62 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo.config import cfg
CONF = cfg.CONF
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
netconf_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.BoolOpt('use_ipv6',
default=False,
help='use ipv6'),
]
CONF.register_opts(netconf_opts)

68
ironic/paths.py Normal file
View File

@ -0,0 +1,68 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the nova python module is installed'),
cfg.StrOpt('bindir',
default='$pybasedir/bin',
help='Directory where nova binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining nova's state"),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)

132
ironic/policy.py Normal file
View File

@ -0,0 +1,132 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Nova."""
import os.path
from oslo.config import cfg
from ironic import exception
from ironic.openstack.common import policy
from ironic import utils
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file representing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule checked when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
_POLICY_PATH = None
_POLICY_CACHE = {}
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = CONF.policy_file
if not os.path.exists(_POLICY_PATH):
_POLICY_PATH = CONF.find_file(_POLICY_PATH)
if not _POLICY_PATH:
raise exception.ConfigNotFound(path=CONF.policy_file)
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_rules)
def _set_rules(data):
default_rule = CONF.policy_default_rule
policy.set_rules(policy.Rules.load_json(data, default_rule))
def enforce(context, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param context: nova context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
``compute:attach_volume``,
``volume:attach_volume``
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param do_raise: if True (the default), raises PolicyNotAuthorized;
if False, returns False
:raises nova.exception.PolicyNotAuthorized: if verification fails
and do_raise is True.
:return: returns a non-False value (not necessarily "True") if
authorized, and the exact value False if not authorized and
do_raise is False.
"""
init()
credentials = context.to_dict()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.PolicyNotAuthorized, action=action)
return policy.check(action, target, credentials, **extra)
def check_is_admin(context):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
#the target is user-self
credentials = context.to_dict()
target = credentials
return policy.check('context_is_admin', target, credentials)
@policy.register('is_admin')
class IsAdminCheck(policy.Check):
"""An explicit check for is_admin."""
def __init__(self, kind, match):
"""Initialize the check."""
self.expected = (match.lower() == 'true')
super(IsAdminCheck, self).__init__(kind, str(self.expected))
def __call__(self, target, creds):
"""Determine whether is_admin matches the requested value."""
return creds['is_admin'] == self.expected

55
ironic/safe_utils.py Normal file
View File

@ -0,0 +1,55 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions that won't produce circular imports."""
import inspect
def getcallargs(function, *args, **kwargs):
"""This is a simplified inspect.getcallargs (2.7+).
It should be replaced when python >= 2.7 is standard.
"""
keyed_args = {}
argnames, varargs, keywords, defaults = inspect.getargspec(function)
keyed_args.update(kwargs)
#NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
# some tests use 'self2'.
if 'self' in argnames[0] or 'cls' == argnames[0]:
# The function may not actually be a method or have im_self.
# Typically seen when it's stubbed with mox.
if inspect.ismethod(function) and hasattr(function, 'im_self'):
keyed_args[argnames[0]] = function.im_self
else:
keyed_args[argnames[0]] = None
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
keyed_args.update(dict(zip(remaining_argnames, args)))
if defaults:
num_defaults = len(defaults)
for argname, value in zip(argnames[-num_defaults:], defaults):
if argname not in keyed_args:
keyed_args[argname] = value
return keyed_args

690
ironic/service.py Normal file
View File

@ -0,0 +1,690 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import inspect
import os
import random
import signal
import sys
import time
import eventlet
import greenlet
from oslo.config import cfg
from nova import conductor
from nova import context
from nova import exception
from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import rpc
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='a list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='port for ec2 api to listen'),
cfg.IntOpt('ec2_workers',
default=None,
help='Number of workers for EC2 API service'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='IP address for OpenStack API to listen'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='list port for osapi compute'),
cfg.IntOpt('osapi_compute_workers',
default=None,
help='Number of workers for OpenStack API service'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='IP address for metadata api to listen'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='port for metadata api to listen'),
cfg.IntOpt('metadata_workers',
default=None,
help='Number of workers for metadata service'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='full class name for the Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_server(server):
"""Start and wait for a server to finish.
:param service: Server to run and wait for.
:returns: None
"""
server.start()
server.wait()
def launch_server(self, server):
"""Load and start the given server.
:param server: The server you would like to start.
:returns: None
"""
if self.backdoor_port is not None:
server.backdoor_port = self.backdoor_port
gt = eventlet.spawn(self.run_server, server)
self._services.append(gt)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
service.kill()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
for service in self._services:
try:
service.wait()
except greenlet.GreenletExit:
pass
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:'))
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and "mysql:" in flag_get)):
LOG.debug(_('%(flag)s : FLAG SET ') % locals())
else:
LOG.debug('%(flag)s : %(flag_get)s' % locals())
status = None
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
finally:
self.stop()
rpc.cleanup()
if status is not None:
sys.exit(status)
class ServerWrapper(object):
def __init__(self, server, workers):
self.server = server
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process(self, server):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.run_server(server)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
status = 0
try:
self._child_process(wrap.server)
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.server.stop()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_server(self, server, workers=1):
wrap = ServerWrapper(server, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
pid, status = os.wait()
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals())
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)d exited with status %(code)d'), locals())
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
while self.running:
wrap = self._wait_child()
if not wrap:
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
if self.sigcaught:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[self.sigcaught]
LOG.info(_('Caught %s, stopping children'), signame)
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table."""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
self.service_ref = self._create_service_ref(ctxt)
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
self.manager.pre_start_hook(rpc_connection=self.conn)
rpc_dispatcher = self.manager.create_rpc_dispatcher(self.backdoor_port)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.manager.post_start_hook()
LOG.debug(_("Join ServiceGroup membership for this service %s")
% self.topic)
# Add service to the ServiceGroup membership group.
pulse = self.servicegroup_api.join(self.host, self.topic, self)
if pulse:
self.timers.append(pulse)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.DynamicLoopingCall(self.periodic_tasks)
periodic.start(initial_delay=initial_delay,
periodic_interval_max=self.periodic_interval_max)
self.timers.append(periodic)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir() as tmpdir:
pass
except Exception as e:
LOG.error(_('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
if workers:
_launcher = ProcessLauncher()
_launcher.launch_server(server, workers=workers)
else:
_launcher = ServiceLauncher()
_launcher.launch_server(server)
def wait():
_launcher.wait()

View File

@ -0,0 +1,76 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
from nova import config
from nova import ipv6
from nova import paths
from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(fixtures.Fixture):
"""Fixture to manage global conf settings."""
def __init__(self, conf):
self.conf = conf
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('fake_rabbit', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('lock_path', None)
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('rpc_backend',
'nova.openstack.common.rpc.impl_fake')
self.conf.set_default('rpc_cast_timeout', 5)
self.conf.set_default('rpc_response_timeout', 5)
self.conf.set_default('sql_connection', "sqlite://")
self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True)
self.conf.set_default('vlan_interface', 'eth0')
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)

View File

@ -0,0 +1,23 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
policy_data = """
{
"admin_api": "role:admin",
"context_is_admin": "role:admin or role:administrator",
}
"""

View File

@ -0,0 +1,44 @@
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
from nova.openstack.common import policy as common_policy
import nova.policy
from nova.tests import fake_policy
CONF = cfg.CONF
class PolicyFixture(fixtures.Fixture):
def setUp(self):
super(PolicyFixture, self).setUp()
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.policy_data)
CONF.set_override('policy_file', self.policy_file_name)
nova.policy.reset()
nova.policy.init()
self.addCleanup(nova.policy.reset)
def set_rules(self, rules):
common_policy.set_rules(common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in rules.items())))

1266
ironic/utils.py Normal file

File diff suppressed because it is too large Load Diff

47
ironic/version.py Normal file
View File

@ -0,0 +1,47 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from ironic.vcsversion import version_info
except ImportError:
version_info = {'branch_nick': u'LOCALBRANCH',
'revision_id': 'LOCALREVISION',
'revno': 0}
IRONIC_VERSION = ['2013', '1']
YEAR, COUNT = IRONIC_VERSION
FINAL = False # This becomes true at Release Candidate time
def canonical_version_string():
return '.'.join([YEAR, COUNT])
def version_string():
if FINAL:
return canonical_version_string()
else:
return '%s-dev' % (canonical_version_string(),)
def vcs_version_string():
return "%s:%s" % (version_info['branch_nick'], version_info['revision_id'])
def version_string_with_vcs():
return "%s-%s" % (canonical_version_string(), vcs_version_string())

24
tools/flakes.py Normal file
View File

@ -0,0 +1,24 @@
"""
wrapper for pyflakes to ignore gettext based warning:
"undefined name '_'"
Synced in from openstack-common
"""
__all__ = ['main']
import __builtin__ as builtins
import sys
import pyflakes.api
from pyflakes import checker
def main():
checker.Checker.builtIns = (set(dir(builtins)) |
set(['_']) |
set(checker._MAGIC_GLOBALS))
sys.exit(pyflakes.api.main())
if __name__ == "__main__":
main()

View File

@ -18,10 +18,15 @@
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
import argparse
from __future__ import print_function
import optparse
import os
import subprocess
import sys
@ -39,7 +44,7 @@ class InstallVenv(object):
self.project = project
def die(self, message, *args):
print >> sys.stderr, message % args
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
@ -86,20 +91,20 @@ class InstallVenv(object):
virtual environment.
"""
if not os.path.isdir(self.venv):
print 'Creating venv...',
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print 'done.'
print 'Installing pip in venv...',
print('done.')
print('Installing pip in venv...', end=' ')
if not self.run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']).strip():
self.die("Failed to install pip.")
print 'done.'
print('done.')
else:
print "venv already exists..."
print("venv already exists...")
pass
def pip_install(self, *args):
@ -108,7 +113,7 @@ class InstallVenv(object):
redirect_output=False)
def install_dependencies(self):
print 'Installing dependencies with pip (this can take a while)...'
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# distribute.
@ -131,12 +136,12 @@ class InstallVenv(object):
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
@ -150,12 +155,12 @@ class Distro(InstallVenv):
return
if self.check_cmd('easy_install'):
print 'Installing virtualenv via easy_install...',
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print 'Succeeded'
print('Succeeded')
return
else:
print 'Failed'
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'

30
tools/pip-requires Normal file
View File

@ -0,0 +1,30 @@
SQLAlchemy>=0.7.8,<0.7.99
Cheetah>=2.4.4
amqplib>=0.6.1
anyjson>=0.2.4
argparse
boto
eventlet>=0.9.17
kombu>=1.0.4
lxml>=2.3
routes>=1.12.3
WebOb==1.2.3
greenlet>=0.3.1
PasteDeploy>=1.5.0
paste
sqlalchemy-migrate>=0.7.2
netaddr>=0.7.6
suds>=0.4
paramiko
pyasn1
Babel>=0.9.6
iso8601>=0.1.4
httplib2
setuptools_git>=0.4
python-cinderclient>=1.0.1
python-quantumclient>=2.2.0,<3.0.0
python-glanceclient>=0.5.0,<2
python-keystoneclient>=0.2.0
stevedore>=0.7
websockify<0.4
oslo.config>=1.1.0

17
tools/test-requires Normal file
View File

@ -0,0 +1,17 @@
# Packages needed for dev testing
distribute>=0.6.24
coverage>=3.6
discover
feedparser
fixtures>=0.3.12
mox==0.5.3
MySQL-python
psycopg2
pep8==1.3.3
pyflakes
pylint==0.25.2
python-subunit
sphinx>=1.1.2
testrepository>=0.0.13
testtools>=0.9.27