IRR - Implemented for plugins

The change removes and points our plugins to use the independent
plugin repo that is now online.

Change-Id: I3a24b08d5e32d6b946bed1026dd211a6f4d44d47
Implements: blueprint independent-role-repositories
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2016-01-26 12:19:43 -06:00
parent 918be83c6e
commit 9d348826a1
No known key found for this signature in database
GPG Key ID: 69FEFFC5E2D9273F
16 changed files with 20 additions and 4360 deletions

View File

@ -1,9 +1,8 @@
# Potential method for globally resolving plugins and libs
# - name: plugins
# src: https://github.com/os-cloud/openstack-ansible-plugins
# scm: git
# path: /etc/ansible
# version: master
- name: plugins
src: https://git.openstack.org/openstack/openstack-ansible-plugins
path: /etc/ansible
scm: git
version: master
- src: evrardjp.keepalived
name: keepalived
version: '1.3'

View File

@ -1,16 +1,12 @@
[defaults]
# Additional plugins
lookup_plugins = plugins/lookups
filter_plugins = plugins/filters
action_plugins = plugins/actions
# Potential method for globally resolving plugins and libs
# lookup_plugins = /etc/ansible/plugins/lookups
# filter_plugins = /etc/ansible/plugins/filters
# action_plugins = /etc/ansible/plugins/actions
# library = /etc/ansible/plugins/library
gathering = smart
lookup_plugins = /etc/ansible/plugins/lookups
filter_plugins = /etc/ansible/plugins/filters
action_plugins = /etc/ansible/plugins/actions
library = /etc/ansible/plugins/library
# Fact caching
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /etc/openstack_deploy/ansible_facts
fact_caching_timeout = 86400

View File

@ -1,66 +0,0 @@
# this is a virtual module that is entirely implemented server side
DOCUMENTATION = """
---
module: config_template
version_added: 1.9.2
short_description: Renders template files providing a create/update override interface
description:
- The module contains the template functionality with the ability to override items
in config, in transit, through the use of a simple dictionary without having to
write out various temp files on target machines. The module renders all of the
potential jinja a user could provide in both the template file and in the override
dictionary which is ideal for deployers who may have lots of different configs
using a similar code base.
- The module is an extension of the **copy** module and all of attributes that can be
set there are available to be set here.
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative
or absolute path.
required: true
default: null
dest:
description:
- Location to render the template to on the remote machine.
required: true
default: null
config_overrides:
description:
- A dictionary used to update or override items within a configuration template.
The dictionary data structure may be nested. If the target config file is an ini
file the nested keys in the ``config_overrides`` will be used as section
headers.
config_type:
description:
- A string value describing the target config type.
choices:
- ini
- json
- yaml
author: Kevin Carter
"""
EXAMPLES = """
- name: run config template ini
config_template:
src: templates/test.ini.j2
dest: /tmp/test.ini
config_overrides: {}
config_type: ini
- name: run config template json
config_template:
src: templates/test.json.j2
dest: /tmp/test.json
config_overrides: {}
config_type: json
- name: run config template yaml
config_template:
src: templates/test.yaml.j2
dest: /tmp/test.yaml
config_overrides: {}
config_type: yaml
"""

View File

@ -1,168 +0,0 @@
#!/usr/bin/env python
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import module snippets
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: dist_sort
version_added: "1.6.6"
short_description:
- Deterministically sort a list to distribute the elements in the list
evenly. Based on external values such as host or static modifier. Returns
a string as named key ``sorted_list``.
description:
- This module returns a list of servers uniquely sorted based on a index
from a look up value location within a group. The group should be an
existing ansible inventory group. This will module returns the sorted
list as a delimited string.
options:
src_list:
description:
- list in the form of a string separated by a delimiter.
required: True
ref_list:
description:
- list to lookup value_to_lookup against to return index number
This should be a pre-determined ansible group containing the
``value_to_lookup``.
required: False
value_to_lookup:
description:
- value is looked up against ref_list to get index number.
required: False
sort_modifier:
description:
- add a static int into the sort equation to weight the output.
type: int
default: 0
delimiter:
description:
- delimiter used to parse ``src_list`` with.
default: ','
author:
- Kevin Carter
- Sam Yaple
"""
EXAMPLES = """
- dist_sort:
value_to_lookup: "Hostname-in-ansible-group_name"
ref_list: "{{ groups['group_name'] }}"
src_list: "Server1,Server2,Server3"
register: test_var
# With a pre-set delimiter
- dist_sort:
value_to_lookup: "Hostname-in-ansible-group_name"
ref_list: "{{ groups['group_name'] }}"
src_list: "Server1|Server2|Server3"
delimiter: '|'
register: test_var
# With a set modifier
- dist_sort:
value_to_lookup: "Hostname-in-ansible-group_name"
ref_list: "{{ groups['group_name'] }}"
src_list: "Server1#Server2#Server3"
delimiter: '#'
sort_modifier: 5
register: test_var
"""
class DistSort(object):
def __init__(self, module):
"""Deterministically sort a list of servers.
:param module: The active ansible module.
:type module: ``class``
"""
self.module = module
self.params = self.module.params
self.return_data = self._runner()
def _runner(self):
"""Return the sorted list of servers.
Based on the modulo of index of a *value_to_lookup* from an ansible
group this function will return a comma "delimiter" separated list of
items.
:returns: ``str``
"""
index = self.params['ref_list'].index(self.params['value_to_lookup'])
index += self.params['sort_modifier']
src_list = self.params['src_list'].split(
self.params['delimiter']
)
for _ in range(index % len(src_list)):
src_list.append(src_list.pop(0))
else:
return self.params['delimiter'].join(src_list)
def main():
"""Run the main app."""
module = AnsibleModule(
argument_spec=dict(
value_to_lookup=dict(
required=True,
type='str'
),
ref_list=dict(
required=True,
type='list'
),
src_list=dict(
required=True,
type='str'
),
delimiter=dict(
required=False,
type='str',
default=','
),
sort_modifier=dict(
required=False,
type='str',
default='0'
)
),
supports_check_mode=False
)
try:
# This is done so that the failure can be parsed and does not cause
# ansible to fail if a non-int is passed.
module.params['sort_modifier'] = int(module.params['sort_modifier'])
_ds = DistSort(module=module)
if _ds.return_data == module.params['src_list']:
_changed = False
else:
_changed = True
module.exit_json(changed=_changed, **{'sorted_list': _ds.return_data})
except Exception as exp:
resp = {'stderr': str(exp)}
resp.update(module.params)
module.fail_json(msg='Failed Process', **resp)
if __name__ == '__main__':
main()

View File

@ -1,236 +0,0 @@
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glanceclient.client as glclient
import keystoneclient.v3.client as ksclient
# import module snippets
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: glance
short_description:
- Basic module for interacting with openstack glance
description:
- Basic module for interacting with openstack glance
options:
command:
description:
- Operation for the module to perform. Currently available
choices:
- image-list
- image-create
openrc_path:
decription:
- Path to openrc file from which credentials and keystoneclient
- endpoint will be extracted
image_name:
description:
- Name of the image to create
image_url:
description:
- URL from which to download the image data
image_container_format:
description:
- container format that the image uses (bare)
image_disk_format:
description:
- disk format that the image uses
image_is_public:
description:
- Should the image be visible to all tenants?
choices:
- true (public)
- false (private)
api_version:
description:
- which version of the glance api to use
choices:
- 1
- 2
default: 1
insecure:
description:
- Explicitly allow client to perform "insecure" TLS
choices:
- false
- true
default: false
author: Hugh Saunders
"""
EXAMPLES = """
# Create an image
- name: Ensure cirros image
glance:
command: 'image-create'
openrc_path: /root/openrc
image_name: cirros
image_url: 'https://example-domain.com/cirros-0.3.2-source.tar.gz'
image_container_format: bare
image_disk_format: qcow2
image_is_public: True
# Get facts about existing images
- name: Get image facts
glance:
command: 'image-list'
openrc_path: /root/openrc
"""
COMMAND_MAP = {'image-list': 'list_images',
'image-create': 'create_image'}
class ManageGlance(object):
def __init__(self, module):
self.state_change = False
self.glance = None
self.keystone = None
self.module = module
try:
self._keystone_authenticate()
self._init_glance()
except Exception as e:
self.module.fail_json(
err="Initialisation Error: %s" % e,
rc=2, msg=str(e))
def _parse_openrc(self):
"""Get credentials from an openrc file."""
openrc_path = self.module.params['openrc_path']
line_re = re.compile('^export (?P<key>OS_\w*)=(?P<value>[^\n]*)')
with open(openrc_path) as openrc:
matches = [line_re.match(l) for l in openrc]
return dict(
(g.groupdict()['key'], g.groupdict()['value'])
for g in matches if g
)
def _keystone_authenticate(self):
"""Authenticate with Keystone."""
openrc = self._parse_openrc()
insecure = self.module.params['insecure']
self.keystone = ksclient.Client(insecure=insecure,
username=openrc['OS_USERNAME'],
password=openrc['OS_PASSWORD'],
project_name=openrc['OS_PROJECT_NAME'],
auth_url=openrc['OS_AUTH_URL'])
def _init_glance(self):
"""Create glance client object using token and url from keystone."""
openrc = self._parse_openrc()
p = self.module.params
v = p['api_version']
ep = self.keystone.service_catalog.url_for(
service_type='image',
endpoint_type=openrc['OS_ENDPOINT_TYPE']
)
self.glance = glclient.Client(
endpoint='%s/v%s' % (ep, v),
token=self.keystone.get_token(self.keystone.session)
)
def route(self):
"""Run the command specified by the command parameter."""
getattr(self, COMMAND_MAP[self.module.params['command']])()
def _get_image_facts(self):
"""Helper function to format image list as a dictionary."""
p = self.module.params
v = p['api_version']
if v == '1':
return dict(
(i.name, i.to_dict()) for i in self.glance.images.list()
)
elif v == '2':
return dict(
(i.name, i) for i in self.glance.images.list()
)
def list_images(self):
"""Get information about available glance images.
Returns as a fact dictionary glance_images
"""
self.module.exit_json(
changed=self.state_change,
ansible_facts=dict(glance_images=self._get_image_facts()))
def create_image(self):
"""Create a glance image that references a remote url."""
p = self.module.params
v = p['api_version']
image_name = p['image_name']
image_opts = dict(
name=image_name,
disk_format=p['image_disk_format'],
container_format=p['image_container_format'],
copy_from=p['image_url']
)
if v == '1':
image_opts['is_public'] = p['image_is_public']
elif v == '2':
if p['image_is_public']:
vis = 'public'
else:
vis = 'private'
image_opts['visibility'] = vis
images = {i.name for i in self.glance.images.list()}
if image_name in images:
self.module.exit_json(
changed=self.state_change,
ansible_facts=dict(
glance_images=self._get_image_facts()
)
)
else:
self.glance.images.create(**image_opts)
self.state_change = True
self.module.exit_json(
changed=self.state_change,
ansible_facts=dict(
glance_images=self._get_image_facts()
)
)
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True, choices=COMMAND_MAP.keys()),
openrc_path=dict(required=True),
image_name=dict(required=False),
image_url=dict(required=False),
image_container_format=dict(required=False),
image_disk_format=dict(required=False),
image_is_public=dict(required=False, choices=BOOLEANS),
api_version=dict(default='1', required=False, choices=['1', '2']),
insecure=dict(default=False, required=False,
choices=BOOLEANS + ['True', 'False'])
),
supports_check_mode=False
)
mg = ManageGlance(module)
mg.route()
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

View File

@ -1,598 +0,0 @@
#!/usr/bin/python
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import stat
import sys
import memcache
try:
from Crypto.Cipher import AES
from Crypto import Random
ENCRYPT_IMPORT = True
except ImportError:
ENCRYPT_IMPORT = False
# import module snippets
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: memcached
version_added: "1.6.6"
short_description:
- Add, remove, and get items from memcached
description:
- Add, remove, and get items from memcached
options:
name:
description:
- Memcached key name
required: true
content:
description:
- Add content to memcached. Only used when state is 'present'.
required: false
file_path:
description:
- This can be used with state 'present' and 'retrieve'. When set
with state 'present' the contents of a file will be used, when
set with state 'retrieve' the contents of the memcached key will
be written to a file.
required: false
state:
description:
- ['absent', 'present', 'retrieve']
required: true
server:
description:
- server IP address and port. This can be a comma separated list of
servers to connect to.
required: true
encrypt_string:
description:
- Encrypt/Decrypt a memcached object using a provided value.
required: false
dir_mode:
description:
- If a directory is created when using the ``file_path`` argument
the directory will be created with a set mode.
default: '0755'
required: false
file_mode:
description:
- If a file is created when using the ``file_path`` argument
the file will be created with a set mode.
default: '0644'
required: false
expires:
description:
- Seconds until an item is expired from memcached.
default: 300
required: false
notes:
- The "absent" state will remove an item from memcached.
- The "present" state will place an item from a string or a file into
memcached.
- The "retrieve" state will get an item from memcached and return it as a
string. If a ``file_path`` is set this module will also write the value
to a file.
- All items added into memcached are base64 encoded.
- All items retrieved will attempt base64 decode and return the string
value if not applicable.
- Items retrieve from memcached are returned within a "value" key unless
a ``file_path`` is specified which would then write the contents of the
memcached key to a file.
- The ``file_path`` and ``content`` fields are mutually exclusive.
- If you'd like to encrypt items in memcached PyCrypto is a required.
requirements:
- "python-memcached"
optional_requirements:
- "pycrypto"
author: Kevin Carter
"""
EXAMPLES = """
# Add an item into memcached.
- memcached:
name: "key_name"
content: "Super awesome value"
state: "present"
server: "localhost:11211"
# Read the contents of a memcached key, returned as "memcached_phrase.value".
- memcached:
name: "key_name"
state: "retrieve"
server: "localhost:11211"
register: memcached_key
# Add the contents of a file into memcached.
- memcached:
name: "key_name"
file_path: "/home/user_name/file.txt"
state: "present"
server: "localhost:11211"
# Write the contents of a memcached key to a file and is returned as
# "memcached_phrase.value".
- memcached:
name: "key_name"
file_path: "/home/user_name/file.txt"
state: "retrieve"
server: "localhost:11211"
register: memcached_key
# Delete an item from memcached.
- memcached:
name: "key_name"
state: "absent"
server: "localhost:11211"
"""
SERVER_MAX_VALUE_LENGTH = 1024 * 256
MAX_MEMCACHED_CHUNKS = 256
class AESCipher(object):
"""Encrypt an a string in using AES.
Solution derived from "http://stackoverflow.com/a/21928790"
"""
def __init__(self, key):
if ENCRYPT_IMPORT is False:
raise ImportError(
'PyCrypto failed to be imported. Encryption is not supported'
' on this system until PyCrypto is installed.'
)
self.bs = 32
if len(key) >= 32:
self.key = key[:32]
else:
self.key = self._pad(key)
def encrypt(self, raw):
"""Encrypt raw message.
:param raw: ``str``
:returns: ``str`` Base64 encoded string.
"""
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
"""Decrypt an encrypted message.
:param enc: ``str``
:returns: ``str``
"""
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
def _pad(self, string):
"""Pad an AES encryption key.
:param string: ``str``
"""
base = (self.bs - len(string) % self.bs)
back = chr(self.bs - len(string) % self.bs)
return string + base * back
@staticmethod
def _unpad(string):
"""Un-pad an AES encryption key.
:param string: ``str``
"""
ordinal_range = ord(string[len(string) - 1:])
return string[:-ordinal_range]
class Memcached(object):
"""Manage objects within memcached."""
def __init__(self, module):
self.module = module
self.state_change = False
self.mc = None
def router(self):
"""Route all commands to their respected functions.
If an exception happens a failure will be raised.
"""
try:
action = getattr(self, self.module.params['state'])
self.mc = memcache.Client(
self.module.params['server'].split(','),
server_max_value_length=SERVER_MAX_VALUE_LENGTH,
debug=0
)
facts = action()
except Exception as exp:
self._failure(error=str(exp), rc=1, msg='general exception')
else:
self.mc.disconnect_all()
self.module.exit_json(
changed=self.state_change, **facts
)
def _failure(self, error, rc, msg):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(msg=msg, rc=rc, err=error)
def absent(self):
"""Remove a key from memcached.
If the value is not deleted when instructed to do so an exception will
be raised.
:return: ``dict``
"""
key_name = self.module.params['name']
get_keys = [
'%s.%s' % (key_name, i) for i in xrange(MAX_MEMCACHED_CHUNKS)
]
self.mc.delete_multi(get_keys)
value = self.mc.get_multi(get_keys)
if not value:
self.state_change = True
return {'absent': True, 'key': self.module.params['name']}
else:
self._failure(
error='Memcache key not deleted',
rc=1,
msg='Failed to remove an item from memcached please check your'
' memcached server for issues. If you are load balancing'
' memcached, attempt to connect to a single node.'
)
@staticmethod
def _decode_value(value):
"""Return a ``str`` from a base64 decoded value.
If the content is not a base64 ``str`` the raw value will be returned.
:param value: ``str``
:return:
"""
try:
b64_value = base64.decodestring(value)
except Exception:
return value
else:
return b64_value
def _encode_value(self, value):
"""Return a base64 encoded value.
If the value can't be base64 encoded an excption will be raised.
:param value: ``str``
:return: ``str``
"""
try:
b64_value = base64.encodestring(value)
except Exception as exp:
self._failure(
error=str(exp),
rc=1,
msg='The value provided can not be Base64 encoded.'
)
else:
return b64_value
def _file_read(self, full_path, pass_on_error=False):
"""Read the contents of a file.
This will read the contents of a file. If the ``full_path`` does not
exist an exception will be raised.
:param full_path: ``str``
:return: ``str``
"""
try:
with open(full_path, 'rb') as f:
o_value = f.read()
except IOError as exp:
if pass_on_error is False:
self._failure(
error=str(exp),
rc=1,
msg="The file you've specified does not exist. Please"
" check your full path @ [ %s ]." % full_path
)
else:
return None
else:
return o_value
def _chown(self, path, mode_type):
"""Chown a file or directory based on a given mode type.
If the file is modified the state will be changed.
:param path: ``str``
:param mode_type: ``str``
"""
mode = self.module.params.get(mode_type)
# Ensure that the mode type is a string.
mode = str(mode)
_mode = oct(stat.S_IMODE(os.stat(path).st_mode))
if _mode != mode or _mode[1:] != mode:
os.chmod(path, int(mode, 8))
self.state_change = True
def _file_write(self, full_path, value):
"""Write the contents of ``value`` to the ``full_path``.
This will return True upon success and will raise an exception upon
failure.
:param full_path: ``str``
:param value: ``str``
:return: ``bol``
"""
try:
# Ensure that the directory exists
dir_path = os.path.dirname(full_path)
try:
os.makedirs(dir_path)
except OSError as exp:
if exp.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
self._failure(
error=str(exp),
rc=1,
msg="The directory [ %s ] does not exist and couldn't"
" be created. Please check the path and that you"
" have permission to write the file."
)
# Ensure proper directory permissions
self._chown(path=dir_path, mode_type='dir_mode')
# Write contents of a cached key to a file.
with open(full_path, 'wb') as f:
if isinstance(value, list):
f.writelines(value)
else:
f.write(value)
# Ensure proper file permissions
self._chown(path=full_path, mode_type='file_mode')
except IOError as exp:
self._failure(
error=str(exp),
rc=1,
msg="There was an issue while attempting to write to the"
" file [ %s ]. Please check your full path and"
" permissions." % full_path
)
else:
return True
def retrieve(self):
"""Return a value from memcached.
If ``file_path`` is specified the value of the memcached key will be
written to a file at the ``file_path`` location. If the value of a key
is None, an exception will be raised.
:returns: ``dict``
"""
key_name = self.module.params['name']
get_keys = [
'%s.%s' % (key_name, i) for i in xrange(MAX_MEMCACHED_CHUNKS)
]
multi_value = self.mc.get_multi(get_keys)
if multi_value:
value = ''.join([i for i in multi_value.values() if i is not None])
# Get the file path if specified.
file_path = self.module.params.get('file_path')
if file_path is not None:
full_path = os.path.abspath(os.path.expanduser(file_path))
# Decode cached value
encrypt_string = self.module.params.get('encrypt_string')
if encrypt_string:
_d_value = AESCipher(key=encrypt_string)
d_value = _d_value.decrypt(enc=value)
if not d_value:
d_value = self._decode_value(value=value)
else:
d_value = self._decode_value(value=value)
o_value = self._file_read(
full_path=full_path, pass_on_error=True
)
# compare old value to new value and write if different
if o_value != d_value:
self.state_change = True
self._file_write(full_path=full_path, value=d_value)
return {
'present': True,
'key': self.module.params['name'],
'value': value,
'file_path': full_path
}
else:
return {
'present': True,
'key': self.module.params['name'],
'value': value
}
else:
self._failure(
error='Memcache key not found',
rc=1,
msg='The key you specified was not found within memcached. '
'If you are load balancing memcached, attempt to connect'
' to a single node.'
)
def present(self):
"""Create and or update a key within Memcached.
The state processed here is present. This state will ensure that
content is written to a memcached server. When ``file_path`` is
specified the content will be read in from a file.
"""
file_path = self.module.params.get('file_path')
if file_path is not None:
full_path = os.path.abspath(os.path.expanduser(file_path))
# Read the contents of a file into memcached.
o_value = self._file_read(full_path=full_path)
else:
o_value = self.module.params['content']
# Encode cached value
encrypt_string = self.module.params.get('encrypt_string')
if encrypt_string:
_d_value = AESCipher(key=encrypt_string)
d_value = _d_value.encrypt(raw=o_value)
else:
d_value = self._encode_value(value=o_value)
compare = 1024 * 128
chunks = sys.getsizeof(d_value) / compare
if chunks == 0:
chunks = 1
elif chunks > MAX_MEMCACHED_CHUNKS:
self._failure(
error='Memcache content too large',
rc=1,
msg='The content that you are attempting to cache is larger'
' than [ %s ] megabytes.'
% ((compare * MAX_MEMCACHED_CHUNKS / 1024 / 1024))
)
step = len(d_value) / chunks
if step == 0:
step = 1
key_name = self.module.params['name']
split_d_value = {}
count = 0
for i in range(0, len(d_value), step):
split_d_value['%s.%s' % (key_name, count)] = d_value[i:i + step]
count += 1
value = self.mc.set_multi(
mapping=split_d_value,
time=self.module.params['expires'],
min_compress_len=2048
)
if not value:
self.state_change = True
return {
'present': True,
'key': self.module.params['name']
}
else:
self._failure(
error='Memcache content not created',
rc=1,
msg='The content you attempted to place within memcached'
' was not created. If you are load balancing'
' memcached, attempt to connect to a single node.'
' Returned a value of unstored keys [ %s ] - Original'
' Connection [ %s ]'
% (value, [i.__dict__ for i in self.mc.servers])
)
def main():
"""Main ansible run method."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
content=dict(
type='str',
required=False
),
file_path=dict(
type='str',
required=False
),
state=dict(
type='str',
required=True
),
server=dict(
type='str',
required=True
),
expires=dict(
type='int',
default=300,
required=False
),
file_mode=dict(
type='str',
default='0644',
required=False
),
dir_mode=dict(
type='str',
default='0755',
required=False
),
encrypt_string=dict(
type='str',
required=False
)
),
supports_check_mode=False,
mutually_exclusive=[
['content', 'file_path']
]
)
ms = Memcached(module=module)
ms.router()
if __name__ == '__main__':
main()

View File

@ -1,79 +0,0 @@
#!/usr/bin/python
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import platform
# import module snippets
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: name2int
version_added: "1.6.6"
short_description:
- hash a host name and return an integer
description:
- hash a host name and return an integer
options:
name:
description:
- login username
required: true
author: Kevin Carter
"""
EXAMPLES = """
# Create a new container
- name2int:
name: "Some-hostname.com"
"""
class HashHostname(object):
def __init__(self, module):
"""Generate an integer from a name."""
self.module = module
def return_hashed_host(self, name):
hashed_name = hashlib.md5(name).hexdigest()
hash_int = int(hashed_name, 32)
real_int = int(hash_int % 300)
return real_int
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(
required=True
)
),
supports_check_mode=False
)
try:
sm = HashHostname(module=module)
int_value = sm.return_hashed_host(platform.node())
resp = {'int_value': int_value}
module.exit_json(changed=True, **resp)
except Exception as exp:
resp = {'stderr': exp}
module.fail_json(msg='Failed Process', **resp)
if __name__ == '__main__':
main()

View File

@ -1,422 +0,0 @@
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import keystoneclient.v3.client as ksclient
from neutronclient.neutron import client as nclient
# import module snippets
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: neutron
short_description:
- Basic module for interacting with openstack neutron
description:
- Basic module for interacting with openstack neutron
options:
command:
description:
- Operation for the module to perform. Currently available
choices:
- create_network
- create_subnet
- create_router
- add_router_interface
required: True
openrc_path:
decription:
- Path to openrc file from which credentials and keystone endpoint
will be extracted
net_name:
description:
- Name of network
subnet_name:
description:
- Name of subnet
router_name:
description:
- Name of router
cidr:
description:
- Specify CIDR to use when creating subnet
provider_physical_network:
description:
- Specify provider:physical_network when creating network
provider_network_type:
description:
- Specify provider:network_type when creating network
provider_segmentation_id:
description:
- Specify provider:segmentation_id when creating network
router_external:
description:
- Specify router:external' when creating network
external_gateway_info:
description:
- Specify external_gateway_info when creating router
insecure:
description:
- Explicitly allow client to perform "insecure" TLS
choices:
- false
- true
default: false
author: Hugh Saunders
"""
EXAMPLES = """
- name: Create private network
neutron:
command: create_network
openrc_path: /root/openrc
net_name: private
- name: Create public network
neutron:
command: create_network
openrc_path: /root/openrc
net_name: public
provider_network_type: flat
provider_physical_network: vlan
router_external: true
- name: Create private subnet
neutron:
command: create_subnet
openrc_path: /root/openrc
net_name: private
subnet_name: private-subnet
cidr: "192.168.74.0/24"
- name: Create public subnet
neutron:
command: create_subnet
openrc_path: /root/openrc
net_name: public
subnet_name: public-subnet
cidr: "10.1.13.0/24"
- name: Create router
neutron:
command: create_router
openrc_path: /root/openrc
router_name: router
external_gateway_info: public
- name: Add private subnet to router
neutron:
command: add_router_interface
openrc_path: /root/openrc
router_name: router
subnet_name: private-subnet
"""
COMMAND_MAP = {
'create_network': {
'variables': [
'net_name',
'provider_physical_network',
'provider_network_type',
'provider_segmentation_id',
'router_external',
'tenant_id'
]
},
'create_subnet': {
'variables': [
'net_name',
'subnet_name',
'cidr',
'tenant_id'
]
},
'create_router': {
'variables': [
'router_name',
'external_gateway_info',
'tenant_id'
]
},
'add_router_interface': {
'variables': [
'router_name',
'subnet_name'
]
}
}
class ManageNeutron(object):
def __init__(self, module):
self.state_change = False
self.neutron = None
self.keystone = None
self.module = module
def command_router(self):
"""Run the command as its provided to the module."""
command_name = self.module.params['command']
if command_name not in COMMAND_MAP:
self.failure(
error='No Command Found',
rc=2,
msg='Command [ %s ] was not found.' % command_name
)
action_command = COMMAND_MAP[command_name]
if hasattr(self, '_%s' % command_name):
action = getattr(self, '_%s' % command_name)
try:
self._keystone_authenticate()
self._init_neutron()
except Exception as e:
self.module.fail_json(
err="Initialisation Error: %s" % e,
rc=2, msg=str(e))
facts = action(variables=action_command['variables'])
if facts is None:
self.module.exit_json(changed=self.state_change)
else:
self.module.exit_json(
changed=self.state_change,
ansible_facts=facts
)
else:
self.failure(
error='Command not in ManageNeutron class',
rc=2,
msg='Method [ %s ] was not found.' % command_name
)
@staticmethod
def _facts(resource_type, resource_data):
"""Return a dict for our Ansible facts."""
key = 'neutron_%s' % resource_type
facts = {key: {}}
for f in resource_data[resource_type]:
res_name = f['name']
del f['name']
facts[key][res_name] = f
return facts
def _get_vars(self, variables, required=None):
"""Return a dict of all variables as found within the module.
:param variables: ``list`` List of all variables that are available to
use within the Neutron Command.
:param required: ``list`` Name of variables that are required.
"""
return_dict = {}
for variable in variables:
return_dict[variable] = self.module.params.get(variable)
else:
if isinstance(required, list):
for var_name in required:
check = return_dict.get(var_name)
if check is None:
self.failure(
error='Missing [ %s ] from Task or found a None'
' value' % var_name,
rc=000,
msg='variables %s - available params [ %s ]'
% (variables, self.module.params)
)
return return_dict
def failure(self, error, rc, msg):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(msg=msg, rc=rc, err=error)
def _parse_openrc(self):
"""Get credentials from an openrc file."""
openrc_path = self.module.params['openrc_path']
line_re = re.compile('^export (?P<key>OS_\w*)=(?P<value>[^\n]*)')
with open(openrc_path) as openrc:
matches = [line_re.match(l) for l in openrc]
return dict(
(g.groupdict()['key'], g.groupdict()['value'])
for g in matches if g
)
def _keystone_authenticate(self):
"""Authenticate with Keystone."""
openrc = self._parse_openrc()
insecure = self.module.params['insecure']
self.keystone = ksclient.Client(insecure=insecure,
username=openrc['OS_USERNAME'],
password=openrc['OS_PASSWORD'],
project_name=openrc['OS_PROJECT_NAME'],
auth_url=openrc['OS_AUTH_URL'])
def _init_neutron(self):
"""Create neutron client object using token and url from keystone."""
openrc = self._parse_openrc()
self.neutron = nclient.Client(
'2.0',
endpoint_url=self.keystone.service_catalog.url_for(
service_type='network',
endpoint_type=openrc['OS_ENDPOINT_TYPE']),
token=self.keystone.get_token(self.keystone.session))
def _get_resource_by_name(self, resource_type, resource_name):
action = getattr(self.neutron, 'list_%s' % resource_type)
resource = action(name=resource_name)[resource_type]
if resource:
return resource[0]['id']
else:
return None
def _create_network(self, variables):
required_vars = ['net_name']
variables_dict = self._get_vars(variables, required=required_vars)
net_name = variables_dict.pop('net_name')
provider_physical_network = variables_dict.pop(
'provider_physical_network'
)
provider_network_type = variables_dict.pop('provider_network_type')
provider_segmentation_id = variables_dict.pop(
'provider_segmentation_id'
)
router_external = variables_dict.pop('router_external')
tenant_id = variables_dict.pop('tenant_id')
if not self._get_resource_by_name('networks', net_name):
n = {"name": net_name, "admin_state_up": True}
if provider_physical_network:
n['provider:physical_network'] = provider_physical_network
if provider_network_type:
n['provider:network_type'] = provider_network_type
if provider_segmentation_id:
n['provider:segmentation_id'] = str(provider_segmentation_id)
if router_external:
n['router:external'] = router_external
if tenant_id:
n['tenant_id'] = tenant_id
self.state_change = True
self.neutron.create_network({"network": n})
return self._facts('networks', self.neutron.list_networks())
def _create_subnet(self, variables):
required_vars = ['net_name', 'subnet_name', 'cidr']
variables_dict = self._get_vars(variables, required=required_vars)
net_name = variables_dict.pop('net_name')
subnet_name = variables_dict.pop('subnet_name')
cidr = variables_dict.pop('cidr')
network_id = self._get_resource_by_name('networks', net_name)
tenant_id = variables_dict.pop('tenant_id')
if not network_id:
self.failure(
error='Network not found',
rc=1,
msg='The specified network could not be found'
)
if not self.neutron.list_subnets(cidr=cidr,
network_id=network_id)['subnets']:
self.state_change = True
s = {"name": subnet_name, "cidr": cidr, "ip_version": 4,
"network_id": network_id}
if tenant_id:
s["tenant_id"] = tenant_id
self.neutron.create_subnet({"subnet": s})
return self._facts('subnets', self.neutron.list_subnets())
def _create_router(self, variables):
required_vars = ['router_name', 'external_gateway_info']
variables_dict = self._get_vars(variables, required=required_vars)
router_name = variables_dict.pop('router_name')
external_gateway_info = variables_dict.pop('external_gateway_info')
tenant_id = variables_dict.pop('tenant_id')
if not self._get_resource_by_name('routers', router_name):
self.state_change = True
r = {'name': router_name}
if external_gateway_info:
network_id = self._get_resource_by_name('networks',
external_gateway_info)
r['external_gateway_info'] = {'network_id': network_id}
if tenant_id:
r['tenant_id'] = tenant_id
self.neutron.create_router({'router': r})
return self._facts('routers', self.neutron.list_routers())
def _add_router_interface(self, variables):
required_vars = ['router_name', 'subnet_name']
variables_dict = self._get_vars(variables, required=required_vars)
router_name = variables_dict.pop('router_name')
subnet_name = variables_dict.pop('subnet_name')
router_id = self._get_resource_by_name('routers', router_name)
subnet_id = self._get_resource_by_name('subnets', subnet_name)
if not router_id:
self.failure(
error='Router not found',
rc=1,
msg='The specified router could not be found'
)
if not subnet_id:
self.failure(
error='Subnet not found',
rc=1,
msg='The specified subnet could not be found'
)
found = False
for port in self.neutron.list_ports(device_id=router_id)['ports']:
for fixed_ips in port['fixed_ips']:
if fixed_ips['subnet_id'] == subnet_id:
found = True
if not found:
self.state_change = True
self.neutron.add_interface_router(router_id,
{'subnet_id': subnet_id})
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True, choices=COMMAND_MAP.keys()),
openrc_path=dict(required=True),
net_name=dict(required=False),
subnet_name=dict(required=False),
cidr=dict(required=False),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False),
provider_segmentation_id=dict(required=False),
router_external=dict(required=False),
router_name=dict(required=False),
external_gateway_info=dict(required=False),
tenant_id=dict(required=False),
insecure=dict(default=False, required=False,
choices=BOOLEANS + ['True', 'False'])
),
supports_check_mode=False
)
mn = ManageNeutron(module)
mn.command_router()
if __name__ == '__main__':
main()

View File

@ -1,283 +0,0 @@
#!/usr/bin/python
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import module snippets
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: provider_networks
version_added: "1.8.4"
short_description:
- Parse a list of networks and return data that Ansible can use
description:
- Parse a list of networks and return data that Ansible can use
options:
provider_networks:
description:
- List of networks to parse
required: true
is_metal:
description:
- Enable handling of on metal hosts
required: false
bind_prefix:
description:
- Add a prefix to all network interfaces.
required: false
author: Kevin Carter
"""
EXAMPLES = """
## This is what the provider_networks list should look like.
# provider_networks:
# - network:
# container_bridge: "br-mgmt"
# container_type: "veth"
# container_interface: "eth1"
# ip_from_q: "container"
# type: "raw"
# group_binds:
# - all_containers
# - hosts
# is_container_address: true
# is_ssh_address: true
# - network:
# container_bridge: "br-vxlan"
# container_type: "veth"
# container_interface: "eth10"
# ip_from_q: "tunnel"
# type: "vxlan"
# range: "1:1000"
# net_name: "vxlan"
# group_binds:
# - neutron_linuxbridge_agent
# - network:
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth12"
# host_bind_override: "eth12"
# type: "flat"
# net_name: "flat"
# group_binds:
# - neutron_linuxbridge_agent
# - network:
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth11"
# host_bind_override: "eth11"
# type: "vlan"
# range: "1:1, 101:101"
# net_name: "vlan"
# group_binds:
# - neutron_linuxbridge_agent
# - network:
# container_bridge: "br-storage"
# container_type: "veth"
# container_interface: "eth2"
# ip_from_q: "storage"
# type: "raw"
# group_binds:
# - glance_api
# - cinder_api
# - cinder_volume
# - nova_compute
# - swift_proxy
- name: Test provider networks
provider_networks:
provider_networks: "{{ provider_networks }}"
register: pndata1
- name: Test provider networks is metal
provider_networks:
provider_networks: "{{ provider_networks }}"
is_metal: true
register: pndata2
- name: Test provider networks with prfix
provider_networks:
provider_networks: "{{ provider_networks }}"
bind_prefix: "brx"
is_metal: true
register: pndata3
## Module output:
# {
# "network_flat_networks": "flat",
# "network_flat_networks_list": [
# "flat"
# ],
# "network_mappings": "flat:brx-eth12,vlan:brx-eth11",
# "network_mappings_list": [
# "flat:brx-eth12",
# "vlan:brx-eth11"
# ],
# "network_types": "vxlan,flat,vlan",
# "network_types_list": [
# "vxlan",
# "flat",
# "vlan"
# ],
# "network_vlan_ranges": "vlan:1:1,vlan:1024:1025",
# "network_vlan_ranges_list": [
# "vlan:1:1",
# "vlan:1024:1025"
# ],
# "network_vxlan_ranges": "1:1000",
# "network_vxlan_ranges_list": [
# "1:1000"
# ]
# }
"""
class ProviderNetworksParsing(object):
def __init__(self, module):
"""Generate an integer from a name.
:param module: Load the ansible module
:type module: ``object``
"""
self.module = module
self.network_vlan_ranges = list()
self.network_vxlan_ranges = list()
self.network_flat_networks = list()
self.network_mappings = list()
self.network_types = list()
def load_networks(self, provider_networks, is_metal=False,
bind_prefix=None):
"""Load the lists of network and network data types.
:param provider_networks: list of networks defined in user_config
:type provider_networks: ``list``
:param is_metal: Enable of disable handling of on metal nodes
:type is_metal: ``bol``
:param bind_prefix: Pre-interface prefix forced within the network map
:type bind_prefix: ``str``
"""
for net in provider_networks:
if net['network']['type'] == "vlan":
if "vlan" not in self.network_types:
self.network_types.append('vlan')
for vlan_range in net['network']['range'].split(','):
self.network_vlan_ranges.append(
'%s:%s' % (
net['network']['net_name'], vlan_range.strip()
)
)
elif net['network']['type'] == "vxlan":
if "vxlan" not in self.network_types:
self.network_types.append('vxlan')
self.network_vxlan_ranges.append(net['network']['range'])
elif net['network']['type'] == "flat":
if "flat" not in self.network_types:
self.network_types.append('flat')
self.network_flat_networks.append(
net['network']['net_name']
)
# Create the network mappings
if net['network']['type'] not in ['raw', 'vxlan']:
if 'net_name' in net['network']:
if is_metal:
if 'host_bind_override' in net['network']:
bind_device = net['network']['host_bind_override']
else:
bind_device = net['network']['container_bridge']
else:
bind_device = net['network']['container_interface']
if bind_prefix:
bind_device = '%s-%s' % (bind_prefix, bind_device)
self.network_mappings.append(
'%s:%s' % (
net['network']['net_name'],
bind_device
)
)
def main():
# Add in python True False
BOOLEANS.extend(['False', 'True'])
BOOLEANS_TRUE.append('True')
BOOLEANS_FALSE.append('False')
module = AnsibleModule(
argument_spec=dict(
provider_networks=dict(
type='list',
required=True
),
is_metal=dict(
choices=BOOLEANS,
default='false'
),
bind_prefix=dict(
type='str',
required=False,
default=None
)
),
supports_check_mode=False
)
try:
is_metal = module.params.get('is_metal')
if is_metal in BOOLEANS_TRUE:
module.params['is_metal'] = True
else:
module.params['is_metal'] = False
pnp = ProviderNetworksParsing(module=module)
pnp.load_networks(
provider_networks=module.params.get('provider_networks'),
is_metal=module.params.get('is_metal'),
bind_prefix=module.params.get('bind_prefix')
)
# Response dictionary, this adds commas to all list items in string
# format as well as preserves the list functionality for future data
# processing.
resp = {
'network_vlan_ranges': ','.join(pnp.network_vlan_ranges),
'network_vlan_ranges_list': pnp.network_vlan_ranges,
'network_vxlan_ranges': ','.join(pnp.network_vxlan_ranges),
'network_vxlan_ranges_list': pnp.network_vxlan_ranges,
'network_flat_networks': ','.join(pnp.network_flat_networks),
'network_flat_networks_list': pnp.network_flat_networks,
'network_mappings': ','.join(pnp.network_mappings),
'network_mappings_list': pnp.network_mappings,
'network_types': ','.join(pnp.network_types),
'network_types_list': pnp.network_types
}
module.exit_json(changed=True, **resp)
except Exception as exp:
resp = {'stderr': exp}
module.fail_json(msg='Failed Process', **resp)
if __name__ == '__main__':
main()

View File

@ -1,240 +0,0 @@
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import io
import json
import os
import yaml
from ansible import errors
from ansible.runner.return_data import ReturnData
from ansible import utils
from ansible.utils import template
CONFIG_TYPES = {
'ini': 'return_config_overrides_ini',
'json': 'return_config_overrides_json',
'yaml': 'return_config_overrides_yaml'
}
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def grab_options(self, complex_args, module_args):
"""Grab passed options from Ansible complex and module args.
:param complex_args: ``dict``
:param module_args: ``dict``
:returns: ``dict``
"""
options = dict()
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
return options
@staticmethod
def return_config_overrides_ini(config_overrides, resultant):
"""Returns string value from a modified config file.
:param config_overrides: ``dict``
:param resultant: ``str`` || ``unicode``
:returns: ``str``
"""
config = ConfigParser.RawConfigParser(allow_no_value=True)
config_object = io.BytesIO(resultant.encode('utf-8'))
config.readfp(config_object)
for section, items in config_overrides.items():
# If the items value is not a dictionary it is assumed that the
# value is a default item for this config type.
if not isinstance(items, dict):
config.set('DEFAULT', str(section), str(items))
else:
# Attempt to add a section to the config file passing if
# an error is raised that is related to the section
# already existing.
try:
config.add_section(str(section))
except (ConfigParser.DuplicateSectionError, ValueError):
pass
for key, value in items.items():
config.set(str(section), str(key), str(value))
else:
config_object.close()
resultant_bytesio = io.BytesIO()
try:
config.write(resultant_bytesio)
return resultant_bytesio.getvalue()
finally:
resultant_bytesio.close()
def return_config_overrides_json(self, config_overrides, resultant):
"""Returns config json
Its important to note that file ordering will not be preserved as the
information within the json file will be sorted by keys.
:param config_overrides: ``dict``
:param resultant: ``str`` || ``unicode``
:returns: ``str``
"""
original_resultant = json.loads(resultant)
merged_resultant = self._merge_dict(
base_items=original_resultant,
new_items=config_overrides
)
return json.dumps(
merged_resultant,
indent=4,
sort_keys=True
)
def return_config_overrides_yaml(self, config_overrides, resultant):
"""Return config yaml.
:param config_overrides: ``dict``
:param resultant: ``str`` || ``unicode``
:returns: ``str``
"""
original_resultant = yaml.safe_load(resultant)
merged_resultant = self._merge_dict(
base_items=original_resultant,
new_items=config_overrides
)
return yaml.safe_dump(
merged_resultant,
default_flow_style=False,
width=1000,
)
def _merge_dict(self, base_items, new_items):
"""Recursively merge new_items into base_items.
:param base_items: ``dict``
:param new_items: ``dict``
:returns: ``dict``
"""
for key, value in new_items.iteritems():
if isinstance(value, dict):
base_items[key] = self._merge_dict(
base_items.get(key, {}),
value
)
elif isinstance(value, list):
if key in base_items and isinstance(base_items[key], list):
base_items[key].extend(value)
else:
base_items[key] = value
else:
base_items[key] = new_items[key]
return base_items
def run(self, conn, tmp, module_name, module_args, inject,
complex_args=None, **kwargs):
"""Run the method"""
if not self.runner.is_playbook:
raise errors.AnsibleError(
'FAILED: `config_templates` are only available in playbooks'
)
options = self.grab_options(complex_args, module_args)
try:
source = options['src']
dest = options['dest']
config_overrides = options.get('config_overrides', dict())
config_type = options['config_type']
assert config_type.lower() in ['ini', 'json', 'yaml']
except KeyError as exp:
result = dict(failed=True, msg=exp)
return ReturnData(conn=conn, comm_ok=False, result=result)
source_template = template.template(
self.runner.basedir,
source,
inject
)
if '_original_file' in inject:
source_file = utils.path_dwim_relative(
inject['_original_file'],
'templates',
source_template,
self.runner.basedir
)
else:
source_file = utils.path_dwim(self.runner.basedir, source_template)
# Open the template file and return the data as a string. This is
# being done here so that the file can be a vault encrypted file.
resultant = template.template_from_file(
self.runner.basedir,
source_file,
inject,
vault_password=self.runner.vault_pass
)
if config_overrides:
type_merger = getattr(self, CONFIG_TYPES.get(config_type))
resultant = type_merger(
config_overrides=config_overrides,
resultant=resultant
)
# Retemplate the resultant object as it may have new data within it
# as provided by an override variable.
template.template_from_string(
basedir=self.runner.basedir,
data=resultant,
vars=inject,
fail_on_undefined=True
)
# Access to protected method is unavoidable in Ansible 1.x.
new_module_args = dict(
src=self.runner._transfer_str(conn, tmp, 'source', resultant),
dest=dest,
original_basename=os.path.basename(source),
follow=True,
)
module_args_tmp = utils.merge_module_args(
module_args,
new_module_args
)
# Remove data types that are not available to the copy module
complex_args.pop('config_overrides')
complex_args.pop('config_type')
# Return the copy module status. Access to protected method is
# unavoidable in Ansible 1.x.
return self.runner._execute_module(
conn,
tmp,
'copy',
module_args_tmp,
inject=inject,
complex_args=complex_args
)

View File

@ -1,77 +0,0 @@
# The MIT License (MIT)
#
# Copyright (c) 2015, Red Hat, Inc. and others
# Copyright (c) 2015, Rackspace US, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ----------------------------------------------------------------------------
#
# Note that this callback plugin isn't enabled by default. If you'd like to
# enable it, add the following line to ansible.cfg in the 'playbooks'
# directory in this repository:
#
# callback_plugins = plugins/callbacks
#
# Add that line prior to running the playbooks and you will have detailed
# timing information for Ansible tasks right after each playbook finishes
# running.
#
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(self.stats.items(), key=lambda value: value[1],
reverse=True)
# Just keep the top 10
results = results[:10]
# Print the timings
for name, elapsed in results:
print "{0:-<70}{1:->9}".format('{0} '.format(name),
' {0:.02f}s'.format(elapsed))

View File

@ -1,244 +0,0 @@
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2015, Kevin Carter <kevin.carter@rackspace.com>
import os
import re
import urlparse
import hashlib
from ansible import errors
"""Filter usage:
Simple filters that may be useful from within the stack
"""
def _pip_requirement_split(requirement):
version_descriptors = "(>=|<=|>|<|==|~=|!=)"
requirement = requirement.split(';')
requirement_info = re.split(r'%s\s*' % version_descriptors, requirement[0])
name = requirement_info[0]
marker = None
if len(requirement) > 1:
marker = requirement[1]
versions = None
if len(requirement_info) > 1:
versions = requirement_info[1]
return name, versions, marker
def _lower_set_lists(list_one, list_two):
_list_one = set([i.lower() for i in list_one])
_list_two = set([i.lower() for i in list_two])
return _list_one, _list_two
def bit_length_power_of_2(value):
"""Return the smallest power of 2 greater than a numeric value.
:param value: Number to find the smallest power of 2
:type value: ``int``
:returns: ``int``
"""
return 2**(int(value)-1).bit_length()
def get_netloc(url):
"""Return the netloc from a URL.
If the input value is not a value URL the method will raise an Ansible
filter exception.
:param url: the URL to parse
:type url: ``str``
:returns: ``str``
"""
try:
netloc = urlparse.urlparse(url).netloc
except Exception as exp:
raise errors.AnsibleFilterError(
'Failed to return the netloc of: "%s"' % str(exp)
)
else:
return netloc
def get_netloc_no_port(url):
"""Return the netloc without a port from a URL.
If the input value is not a value URL the method will raise an Ansible
filter exception.
:param url: the URL to parse
:type url: ``str``
:returns: ``str``
"""
return get_netloc(url=url).split(':')[0]
def get_netorigin(url):
"""Return the netloc from a URL.
If the input value is not a value URL the method will raise an Ansible
filter exception.
:param url: the URL to parse
:type url: ``str``
:returns: ``str``
"""
try:
parsed_url = urlparse.urlparse(url)
netloc = parsed_url.netloc
scheme = parsed_url.scheme
except Exception as exp:
raise errors.AnsibleFilterError(
'Failed to return the netorigin of: "%s"' % str(exp)
)
else:
return '%s://%s' % (scheme, netloc)
def string_2_int(string):
"""Return the an integer from a string.
The string is hashed, converted to a base36 int, and the modulo of 10240
is returned.
:param string: string to retrieve an int from
:type string: ``str``
:returns: ``int``
"""
# Try to encode utf-8 else pass
try:
string = string.encode('utf-8')
except AttributeError:
pass
hashed_name = hashlib.sha256(string).hexdigest()
return int(hashed_name, 36) % 10240
def pip_requirement_names(requirements):
"""Return a ``str`` of requirement name and list of versions.
:param requirement: Name of a requirement that may have versions within
it. This will use the constant,
VERSION_DESCRIPTORS.
:type requirement: ``str``
:return: ``str``
"""
named_requirements = list()
for requirement in requirements:
name = _pip_requirement_split(requirement)[0]
if name and not name.startswith('#'):
named_requirements.append(name.lower())
return sorted(set(named_requirements))
def pip_constraint_update(list_one, list_two):
_list_one, _list_two = _lower_set_lists(list_one, list_two)
_list_one, _list_two = list(_list_one), list(_list_two)
for item2 in _list_two:
item2_name, item2_versions, _ = _pip_requirement_split(item2)
if item2_versions:
for item1 in _list_one:
if item2_name == _pip_requirement_split(item1)[0]:
item1_index = _list_one.index(item1)
_list_one[item1_index] = item2
break
else:
_list_one.append(item2)
return sorted(_list_one)
def splitlines(string_with_lines):
"""Return a ``list`` from a string with lines."""
return string_with_lines.splitlines()
def filtered_list(list_one, list_two):
_list_one, _list_two = _lower_set_lists(list_one, list_two)
return list(_list_one-_list_two)
def git_link_parse(repo):
"""Return a dict containing the parts of a git repository.
:param repo: git repo string to parse.
:type repo: ``str``
:returns: ``dict``
"""
if 'git+' in repo:
_git_url = repo.split('git+', 1)[-1]
else:
_git_url = repo
if '@' in _git_url:
url, branch = _git_url.split('@', 1)
else:
url = _git_url
branch = 'master'
name = os.path.basename(url.rstrip('/'))
_branch = branch.split('#')
branch = _branch[0]
plugin_path = None
# Determine if the package is a plugin type
if len(_branch) > 1 and 'subdirectory=' in _branch[-1]:
plugin_path = _branch[-1].split('subdirectory=')[-1].split('&')[0]
return {
'name': name.split('.git')[0].lower(),
'version': branch,
'plugin_path': plugin_path,
'url': url,
'original': repo
}
def git_link_parse_name(repo):
"""Return the name of a git repo."""
return git_link_parse(repo)['name']
class FilterModule(object):
"""Ansible jinja2 filters."""
@staticmethod
def filters():
return {
'bit_length_power_of_2': bit_length_power_of_2,
'netloc': get_netloc,
'netloc_no_port': get_netloc_no_port,
'netorigin': get_netorigin,
'string_2_int': string_2_int,
'pip_requirement_names': pip_requirement_names,
'pip_constraint_update': pip_constraint_update,
'splitlines': splitlines,
'filtered_list': filtered_list,
'git_link_parse': git_link_parse,
'git_link_parse_name': git_link_parse_name
}

View File

@ -1,619 +0,0 @@
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
import os
import re
import traceback
from distutils.version import LooseVersion
from ansible import __version__ as __ansible_version__
import yaml
# Used to keep track of git package parts as various files are processed
GIT_PACKAGE_DEFAULT_PARTS = dict()
ROLE_PACKAGES = dict()
REQUIREMENTS_FILE_TYPES = [
'global-requirements.txt',
'test-requirements.txt',
'dev-requirements.txt',
'requirements.txt',
'global-requirement-pins.txt'
]
# List of variable names that could be used within the yaml files that
# represent lists of python packages.
BUILT_IN_PIP_PACKAGE_VARS = [
'service_pip_dependencies',
'pip_common_packages',
'pip_container_packages',
'pip_packages'
]
PACKAGE_MAPPING = {
'packages': set(),
'remote_packages': set(),
'remote_package_parts': list(),
'role_packages': dict()
}
def map_base_and_remote_packages(package, package_map):
"""Determine whether a package is a base package or a remote package
and add to the appropriate set.
:type package: ``str``
:type package_map: ``dict``
"""
if package.startswith(('http:', 'https:', 'git+')):
if '@' not in package:
package_map['packages'].add(package)
else:
git_parts = git_pip_link_parse(package)
package_name = git_parts[-1]
if not package_name:
package_name = git_pip_link_parse(package)[0]
for rpkg in list(package_map['remote_packages']):
rpkg_name = git_pip_link_parse(rpkg)[-1]
if not rpkg_name:
rpkg_name = git_pip_link_parse(package)[0]
if rpkg_name == package_name:
package_map['remote_packages'].remove(rpkg)
package_map['remote_packages'].add(package)
break
else:
package_map['remote_packages'].add(package)
else:
package_map['packages'].add(package)
def parse_remote_package_parts(package_map):
"""Parse parts of each remote package and add them to
the remote_package_parts list.
:type package_map: ``dict``
"""
keys = [
'name',
'version',
'fragment',
'url',
'original',
'egg_name'
]
remote_pkg_parts = [
dict(
zip(
keys, git_pip_link_parse(i)
)
) for i in package_map['remote_packages']
]
package_map['remote_package_parts'].extend(remote_pkg_parts)
package_map['remote_package_parts'] = list(
dict(
(i['name'], i)
for i in package_map['remote_package_parts']
).values()
)
def map_role_packages(package_map):
"""Add and sort packages belonging to a role to the role_packages dict.
:type package_map: ``dict``
"""
for k, v in ROLE_PACKAGES.items():
role_pkgs = package_map['role_packages'][k] = list()
for pkg_list in v.values():
role_pkgs.extend(pkg_list)
else:
package_map['role_packages'][k] = sorted(set(role_pkgs))
def map_base_package_details(package_map):
"""Parse package version and marker requirements and add to the
base packages set.
:type package_map: ``dict``
"""
check_pkgs = dict()
base_packages = sorted(list(package_map['packages']))
for pkg in base_packages:
name, versions, markers = _pip_requirement_split(pkg)
if versions and markers:
versions = '%s;%s' % (versions, markers)
elif not versions and markers:
versions = ';%s' % markers
if name in check_pkgs:
if versions and not check_pkgs[name]:
check_pkgs[name] = versions
else:
check_pkgs[name] = versions
else:
return_pkgs = list()
for k, v in check_pkgs.items():
if v:
return_pkgs.append('%s%s' % (k, v))
else:
return_pkgs.append(k)
package_map['packages'] = set(return_pkgs)
def git_pip_link_parse(repo):
"""Return a tuple containing the parts of a git repository.
Example parsing a standard git repo:
>>> git_pip_link_parse('git+https://github.com/username/repo-name@tag')
('repo-name',
'tag',
None,
'https://github.com/username/repo',
'git+https://github.com/username/repo@tag',
'repo_name')
Example parsing a git repo that uses an installable from a subdirectory:
>>> git_pip_link_parse(
... 'git+https://github.com/username/repo@tag#egg=plugin.name'
... '&subdirectory=remote_path/plugin.name'
... )
('plugin.name',
'tag',
'remote_path/plugin.name',
'https://github.com/username/repo',
'git+https://github.com/username/repo@tag#egg=plugin.name&'
'subdirectory=remote_path/plugin.name',
'plugin.name')
:param repo: git repo string to parse.
:type repo: ``str``
:returns: ``tuple``
"""'meta'
def _meta_return(meta_data, item):
"""Return the value of an item in meta data."""
return meta_data.lstrip('#').split('%s=' % item)[-1].split('&')[0]
_git_url = repo.split('+')
if len(_git_url) >= 2:
_git_url = _git_url[1]
else:
_git_url = _git_url[0]
git_branch_sha = _git_url.split('@')
if len(git_branch_sha) > 2:
branch = git_branch_sha.pop()
url = '@'.join(git_branch_sha)
elif len(git_branch_sha) > 1:
url, branch = git_branch_sha
else:
url = git_branch_sha[0]
branch = 'master'
egg_name = name = os.path.basename(url.rstrip('/'))
egg_name = egg_name.replace('-', '_')
_branch = branch.split('#')
branch = _branch[0]
plugin_path = None
# Determine if the package is a plugin type
if len(_branch) > 1:
if 'subdirectory=' in _branch[-1]:
plugin_path = _meta_return(_branch[-1], 'subdirectory')
name = os.path.basename(plugin_path)
if 'egg=' in _branch[-1]:
egg_name = _meta_return(_branch[-1], 'egg')
egg_name = egg_name.replace('-', '_')
if 'gitname=' in _branch[-1]:
name = _meta_return(_branch[-1], 'gitname')
return name.lower(), branch, plugin_path, url, repo, egg_name
def _pip_requirement_split(requirement):
"""Split pip versions from a given requirement.
The method will return the package name, versions, and any markers.
:type requirement: ``str``
:returns: ``tuple``
"""
version_descriptors = "(>=|<=|>|<|==|~=|!=)"
requirement = requirement.split(';')
requirement_info = re.split(r'%s\s*' % version_descriptors, requirement[0])
name = requirement_info[0]
marker = None
if len(requirement) > 1:
marker = requirement[-1]
versions = None
if len(requirement_info) > 1:
versions = ''.join(requirement_info[1:])
return name, versions, marker
class DependencyFileProcessor(object):
def __init__(self, local_path):
"""Find required files.
:type local_path: ``str``
:return:
"""
self.pip = dict()
self.pip['git_package'] = list()
self.pip['py_package'] = list()
self.pip['git_data'] = list()
self.git_pip_install = 'git+%s@%s'
self.file_names = self._get_files(path=local_path)
# Process everything simply by calling the method
self._process_files()
def _py_pkg_extend(self, packages):
for pkg in packages:
pkg_name = _pip_requirement_split(pkg)[0]
for py_pkg in self.pip['py_package']:
py_pkg_name = _pip_requirement_split(py_pkg)[0]
if pkg_name == py_pkg_name:
self.pip['py_package'].remove(py_pkg)
else:
self.pip['py_package'].extend([i.lower() for i in packages])
@staticmethod
def _filter_files(file_names, ext):
"""Filter the files and return a sorted list.
:type file_names:
:type ext: ``str`` or ``tuple``
:returns: ``list``
"""
_file_names = list()
file_name_words = ['/defaults/', '/vars/', '/user_']
file_name_words.extend(REQUIREMENTS_FILE_TYPES)
for file_name in file_names:
if file_name.endswith(ext):
if any(i in file_name for i in file_name_words):
_file_names.append(file_name)
else:
return _file_names
@staticmethod
def _get_files(path):
"""Return a list of all files in the defaults/repo_packages directory.
:type path: ``str``
:returns: ``list``
"""
paths = os.walk(os.path.abspath(path))
files = list()
for fpath, _, afiles in paths:
for afile in afiles:
files.append(os.path.join(fpath, afile))
else:
return files
def _check_plugins(self, git_repo_plugins, git_data):
"""Check if the git url is a plugin type.
:type git_repo_plugins: ``dict``
:type git_data: ``dict``
"""
for repo_plugin in git_repo_plugins:
strip_plugin_path = repo_plugin['package'].lstrip('/')
plugin = '%s/%s' % (
repo_plugin['path'].strip('/'),
strip_plugin_path
)
name = git_data['name'] = os.path.basename(strip_plugin_path)
git_data['egg_name'] = name.replace('-', '_')
package = self.git_pip_install % (
git_data['repo'], git_data['branch']
)
package += '#egg=%s' % git_data['egg_name']
package += '&subdirectory=%s' % plugin
package += '&gitname=%s' % name
if git_data['fragments']:
package += '&%s' % git_data['fragments']
self.pip['git_data'].append(git_data)
self.pip['git_package'].append(package)
if name not in GIT_PACKAGE_DEFAULT_PARTS:
GIT_PACKAGE_DEFAULT_PARTS[name] = git_data.copy()
else:
GIT_PACKAGE_DEFAULT_PARTS[name].update(git_data.copy())
@staticmethod
def _check_defaults(git_data, name, item):
"""Check if a default exists and use it if an item is undefined.
:type git_data: ``dict``
:type name: ``str``
:type item: ``str``
"""
if not git_data[item] and name in GIT_PACKAGE_DEFAULT_PARTS:
check_item = GIT_PACKAGE_DEFAULT_PARTS[name].get(item)
if check_item:
git_data[item] = check_item
def _process_git(self, loaded_yaml, git_item, yaml_file_name):
"""Process git repos.
:type loaded_yaml: ``dict``
:type git_item: ``str``
"""
git_data = dict()
if git_item.split('_')[0] == 'git':
prefix = ''
else:
prefix = '%s_' % git_item.split('_git_repo')[0].replace('.', '_')
# Set the various variable definitions
repo_var = prefix + 'git_repo'
name_var = prefix + 'git_package_name'
branch_var = prefix + 'git_install_branch'
fragment_var = prefix + 'git_install_fragments'
plugins_var = prefix + 'repo_plugins'
# get the repo definition
git_data['repo'] = loaded_yaml.get(repo_var)
# get the repo name definition
name = git_data['name'] = loaded_yaml.get(name_var)
if not name:
name = git_data['name'] = os.path.basename(
git_data['repo'].rstrip('/')
)
git_data['egg_name'] = name.replace('-', '_')
# This conditional is set to ensure we're only processing git
# repos from the defaults file when those same repos are not
# being set in the repo_packages files.
if '/defaults/main' in yaml_file_name:
if name in GIT_PACKAGE_DEFAULT_PARTS:
return
# get the repo branch definition
git_data['branch'] = loaded_yaml.get(branch_var)
self._check_defaults(git_data, name, 'branch')
if not git_data['branch']:
git_data['branch'] = 'master'
package = self.git_pip_install % (git_data['repo'], git_data['branch'])
# get the repo fragment definitions, if any
git_data['fragments'] = loaded_yaml.get(fragment_var)
self._check_defaults(git_data, name, 'fragments')
package += '#egg=%s' % git_data['egg_name']
package += '&gitname=%s' % name
if git_data['fragments']:
package += '&%s' % git_data['fragments']
self.pip['git_package'].append(package)
self.pip['git_data'].append(git_data.copy())
# Set the default package parts to track data during the run
if name not in GIT_PACKAGE_DEFAULT_PARTS:
GIT_PACKAGE_DEFAULT_PARTS[name] = git_data.copy()
else:
GIT_PACKAGE_DEFAULT_PARTS[name].update()
# get the repo plugin definitions, if any
git_data['plugins'] = loaded_yaml.get(plugins_var)
self._check_defaults(git_data, name, 'plugins')
if git_data['plugins']:
self._check_plugins(
git_repo_plugins=git_data['plugins'],
git_data=git_data
)
def _process_files(self):
"""Process files."""
role_name = None
for file_name in self._filter_files(self.file_names, ('yaml', 'yml')):
with open(file_name, 'r') as f:
# If there is an exception loading the file continue
# and if the loaded_config is None continue. This makes
# no bad config gets passed to the rest of the process.
try:
loaded_config = yaml.safe_load(f.read())
except Exception: # Broad exception so everything is caught
continue
else:
if not loaded_config:
continue
if 'roles' in file_name:
_role_name = file_name.split('roles%s' % os.sep)[-1]
role_name = _role_name.split(os.sep)[0]
for key, values in loaded_config.items():
if key.endswith('git_repo'):
self._process_git(
loaded_yaml=loaded_config,
git_item=key,
yaml_file_name=file_name
)
if [i for i in BUILT_IN_PIP_PACKAGE_VARS if i in key]:
self._py_pkg_extend(values)
if role_name:
if role_name in ROLE_PACKAGES:
role_pkgs = ROLE_PACKAGES[role_name]
else:
role_pkgs = ROLE_PACKAGES[role_name] = dict()
pkgs = role_pkgs.get(key, list())
if 'optional' not in key:
pkgs.extend(values)
ROLE_PACKAGES[role_name][key] = pkgs
else:
for k, v in ROLE_PACKAGES.items():
for item_name in v.keys():
if key == item_name:
ROLE_PACKAGES[k][item_name].extend(values)
for file_name in self._filter_files(self.file_names, 'txt'):
if os.path.basename(file_name) in REQUIREMENTS_FILE_TYPES:
with open(file_name, 'r') as f:
packages = [
i.split()[0] for i in f.read().splitlines()
if i
if not i.startswith('#')
]
self._py_pkg_extend(packages)
def _abs_path(path):
return os.path.abspath(
os.path.expanduser(
path
)
)
class LookupModule(object):
def __new__(class_name, *args, **kwargs):
if LooseVersion(__ansible_version__) < LooseVersion("2.0"):
from ansible import utils, errors
class LookupModuleV1(object):
def __init__(self, basedir=None, **kwargs):
"""Run the lookup module.
:type basedir:
:type kwargs:
"""
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
"""Run the main application.
:type terms: ``str``
:type inject: ``str``
:type kwargs: ``dict``
:returns: ``list``
"""
terms = utils.listify_lookup_plugin_terms(
terms,
self.basedir,
inject
)
if isinstance(terms, basestring):
terms = [terms]
return_data = PACKAGE_MAPPING
for term in terms:
return_list = list()
try:
dfp = DependencyFileProcessor(
local_path=_abs_path(str(term))
)
return_list.extend(dfp.pip['py_package'])
return_list.extend(dfp.pip['git_package'])
except Exception as exp:
raise errors.AnsibleError(
'lookup_plugin.py_pkgs(%s) returned "%s" error "%s"' % (
term,
str(exp),
traceback.format_exc()
)
)
for item in return_list:
map_base_and_remote_packages(item, return_data)
else:
parse_remote_package_parts(return_data)
else:
map_role_packages(return_data)
map_base_package_details(return_data)
# Sort everything within the returned data
for key, value in return_data.items():
if isinstance(value, (list, set)):
return_data[key] = sorted(value)
return [return_data]
return LookupModuleV1(*args, **kwargs)
else:
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModuleV2(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Run the main application.
:type terms: ``str``
:type variables: ``str``
:type kwargs: ``dict``
:returns: ``list``
"""
if isinstance(terms, basestring):
terms = [terms]
return_data = PACKAGE_MAPPING
for term in terms:
return_list = list()
try:
dfp = DependencyFileProcessor(
local_path=_abs_path(str(term))
)
return_list.extend(dfp.pip['py_package'])
return_list.extend(dfp.pip['git_package'])
except Exception as exp:
raise AnsibleError(
'lookup_plugin.py_pkgs(%s) returned "%s" error "%s"' % (
term,
str(exp),
traceback.format_exc()
)
)
for item in return_list:
map_base_and_remote_packages(item, return_data)
else:
parse_remote_package_parts(return_data)
else:
map_role_packages(return_data)
map_base_package_details(return_data)
# Sort everything within the returned data
for key, value in return_data.items():
if isinstance(value, (list, set)):
return_data[key] = sorted(value)
return [return_data]
return LookupModuleV2(*args, **kwargs)
# Used for testing and debuging usage: `python plugins/lookups/py_pkgs.py ../`
if __name__ == '__main__':
import sys
import json
print(json.dumps(LookupModule().run(terms=sys.argv[1:]), indent=4))

View File

@ -125,7 +125,7 @@ pushd $(dirname ${0})/../playbooks
sed -i '/\[defaults\]/a log_path = /openstack/log/ansible-logging/ansible.log' ansible.cfg
# This plugin makes the output easier to read
wget -O plugins/callbacks/human_log.py https://gist.githubusercontent.com/cliffano/9868180/raw/f360f306b3c6d689734a6aa8773a00edf16a0054/human_log.py
wget -O /etc/ansible/plugins/callbacks/human_log.py https://gist.githubusercontent.com/cliffano/9868180/raw/f360f306b3c6d689734a6aa8773a00edf16a0054/human_log.py
# Enable callback plugins
sed -i '/\[defaults\]/a callback_plugins = plugins/callbacks' ansible.cfg

View File

@ -1,5 +1,11 @@
[defaults]
action_plugins = ../playbooks/plugins/actions
callback_plugins = ../playbooks/plugins/callbacks
library = ../playbooks/library
# Additional plugins
lookup_plugins = /etc/ansible/plugins/lookups
filter_plugins = /etc/ansible/plugins/filters
action_plugins = /etc/ansible/plugins/actions
library = /etc/ansible/plugins/library
# Set color options
nocolor = 0
host_key_checking = False