Deletes validations directory
All the validations directory contents have been migrated into Ansible Roles then this directory could be now deleted. Change-Id: I41328b2f1e6d8e8d695c0794557567f07c509874 Implements: blueprint validation-framework Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
This commit is contained in:
parent
73aad529e8
commit
98314886b3
|
@ -29,7 +29,6 @@ scripts =
|
|||
|
||||
data_files =
|
||||
share/openstack-tripleo-validations/ = hosts.sample
|
||||
share/openstack-tripleo-validations/validations = validations/*
|
||||
share/openstack-tripleo-validations/roles = roles/*
|
||||
share/openstack-tripleo-validations/playbooks = playbooks/*
|
||||
share/openstack-tripleo-validations/callback_plugins = callback_plugins/*
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
from mock import MagicMock
|
||||
from mock import patch
|
||||
|
||||
from library.check_package_update import check_update
|
||||
from library.check_package_update import get_package_details
|
||||
from tripleo_validations.tests import base
|
||||
from validations.library.check_package_update import check_update
|
||||
from validations.library.check_package_update import get_package_details
|
||||
|
||||
|
||||
PKG_INSTALLED = """\
|
||||
|
@ -67,7 +67,7 @@ class TestCheckUpdate(base.TestCase):
|
|||
self.module.fail_json.assert_called_with(
|
||||
msg='Package manager "apt" is not supported.')
|
||||
|
||||
@patch('validations.library.check_package_update._command')
|
||||
@patch('library.check_package_update._command')
|
||||
def test_fails_if_installed_package_not_found(self, mock_command):
|
||||
mock_command.side_effect = [
|
||||
['', 'No package found.'],
|
||||
|
@ -76,7 +76,7 @@ class TestCheckUpdate(base.TestCase):
|
|||
self.module.fail_json.assert_called_with(
|
||||
msg='No package found.')
|
||||
|
||||
@patch('validations.library.check_package_update._command')
|
||||
@patch('library.check_package_update._command')
|
||||
def test_returns_current_and_available_versions(self, mock_command):
|
||||
mock_command.side_effect = [
|
||||
[PKG_INSTALLED, ''],
|
||||
|
@ -89,7 +89,7 @@ class TestCheckUpdate(base.TestCase):
|
|||
latest_minor_version='6.2.0',
|
||||
latest_major_version='8.0.0')
|
||||
|
||||
@patch('validations.library.check_package_update._command')
|
||||
@patch('library.check_package_update._command')
|
||||
def test_returns_current_version_if_no_updates(self, mock_command):
|
||||
mock_command.side_effect = [
|
||||
[PKG_INSTALLED, ''],
|
||||
|
|
|
@ -23,8 +23,8 @@ Tests for `ini` module.
|
|||
import os
|
||||
import tempfile
|
||||
|
||||
import library.ini as validation
|
||||
from tripleo_validations.tests import base
|
||||
import validations.library.ini as validation
|
||||
|
||||
|
||||
invalid_content = '''
|
|
@ -19,8 +19,8 @@ test_ip_range
|
|||
Tests for `ip_range` module.
|
||||
"""
|
||||
|
||||
import library.ip_range as validation
|
||||
from tripleo_validations.tests import base
|
||||
import validations.library.ip_range as validation
|
||||
|
||||
|
||||
class TestIPRange(base.TestCase):
|
|
@ -14,8 +14,8 @@
|
|||
# under the License.
|
||||
|
||||
|
||||
import library.network_environment as validation
|
||||
from tripleo_validations.tests import base
|
||||
import validations.library.network_environment as validation
|
||||
|
||||
|
||||
class TestNicConfigs(base.TestCase):
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
|
||||
from tripleo_validations.tests import base
|
||||
|
||||
from validations.library.node_disks import _get_smallest_disk
|
||||
from validations.library.node_disks import _has_root_device_hints
|
||||
from validations.library.node_disks import validate_node_disks
|
||||
from library.node_disks import _get_smallest_disk
|
||||
from library.node_disks import _has_root_device_hints
|
||||
from library.node_disks import validate_node_disks
|
||||
|
||||
|
||||
# node_1: 2 disks, 1 larger than 4GB (50GB)
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
from mock import MagicMock
|
||||
from mock import patch
|
||||
|
||||
import library.ovs_dpdk_pmd_cpus_check as validation
|
||||
from tripleo_validations.tests import base
|
||||
import validations.library.ovs_dpdk_pmd_cpus_check as validation
|
||||
|
||||
|
||||
class TestOvsDpdkPmdCpusCheck(base.TestCase):
|
||||
|
@ -27,9 +27,9 @@ class TestOvsDpdkPmdCpusCheck(base.TestCase):
|
|||
super(TestOvsDpdkPmdCpusCheck, self).setUp()
|
||||
self.module = MagicMock()
|
||||
|
||||
@patch('validations.library.ovs_dpdk_pmd_cpus_check.'
|
||||
@patch('library.ovs_dpdk_pmd_cpus_check.'
|
||||
'get_nodes_cores_info')
|
||||
@patch('validations.library.ovs_dpdk_pmd_cpus_check.'
|
||||
@patch('library.ovs_dpdk_pmd_cpus_check.'
|
||||
'get_cpus_list_from_mask_value')
|
||||
def test_validate_valid_pmd_cpus(self, mock_pmd_cpus, mock_cpus):
|
||||
mock_pmd_cpus.return_value = '0,1'
|
||||
|
@ -46,9 +46,9 @@ class TestOvsDpdkPmdCpusCheck(base.TestCase):
|
|||
self.module.exit_json.assert_called_with(
|
||||
msg="PMD CPU's configured correctly.")
|
||||
|
||||
@patch('validations.library.ovs_dpdk_pmd_cpus_check.'
|
||||
@patch('library.ovs_dpdk_pmd_cpus_check.'
|
||||
'get_nodes_cores_info')
|
||||
@patch('validations.library.ovs_dpdk_pmd_cpus_check.'
|
||||
@patch('library.ovs_dpdk_pmd_cpus_check.'
|
||||
'get_cpus_list_from_mask_value')
|
||||
def test_validate_invalid_pmd_cpus(self, mock_pmd_cpus, mock_cpus):
|
||||
mock_pmd_cpus.return_value = '0,2'
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
# under the License.
|
||||
|
||||
|
||||
import library.switch_vlans as validation
|
||||
from tripleo_validations.tests import base
|
||||
import validations.library.switch_vlans as validation
|
||||
|
||||
|
||||
class TestSwitchVlans(base.TestCase):
|
||||
|
@ -182,6 +182,6 @@ class TestSwitchVlans(base.TestCase):
|
|||
netenv_path = "network_environment.yaml"
|
||||
warnings, errors = validation.validate_switch_vlans(
|
||||
netenv_path, self.network_data, self.introspect_data)
|
||||
self.assertEqual(warnings, [])
|
||||
self.assertEqual(errors, ['VLAN ID 107 not on attached switch',
|
||||
'VLAN ID 107 not on attached switch'])
|
||||
self.assertEqual(warnings, set([]))
|
||||
self.assertEqual(errors, set(['VLAN ID 107 not on attached switch',
|
||||
'VLAN ID 107 not on attached switch']))
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Advanced Format 512e Support
|
||||
description: >
|
||||
Detect whether the undercloud disks use Advanced Format. If they do,
|
||||
the overcloud images may fail to upload to Glance.
|
||||
groups:
|
||||
- prep
|
||||
- pre-deployment
|
||||
tasks:
|
||||
- name: List the available drives
|
||||
register: drive_list
|
||||
command: "ls /sys/class/block/"
|
||||
changed_when: False
|
||||
- name: Detect whether the drive uses Advanced Format
|
||||
advanced_format: drive={{ item }}
|
||||
when: item is match("^sd.$")
|
||||
with_items: "{{ drive_list.stdout_lines }}"
|
|
@ -1,31 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_NAME = 'fail_if_no_hosts'
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if len(stats.processed.keys()) == 0:
|
||||
sys.exit(10)
|
|
@ -1,198 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pprint
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
FAILURE_TEMPLATE = """\
|
||||
Task '{}' failed:
|
||||
Host: {}
|
||||
Message: {}
|
||||
"""
|
||||
|
||||
|
||||
WARNING_TEMPLATE = """\
|
||||
Task '{}' succeeded, but had some warnings:
|
||||
Host: {}
|
||||
Warnings:
|
||||
"""
|
||||
|
||||
DEBUG_TEMPLATE = """\
|
||||
Task: Debug
|
||||
Host: {}
|
||||
{}
|
||||
"""
|
||||
|
||||
|
||||
def indent(text):
|
||||
'''Indent the given text by four spaces.'''
|
||||
return ''.join(' {}\n'.format(line) for line in text.splitlines())
|
||||
|
||||
|
||||
def print_failure_message(host_name, task_name, results, abridged_result):
|
||||
'''Print a human-readable error info from Ansible result dictionary.'''
|
||||
def is_script(results):
|
||||
return ('rc' in results and
|
||||
'invocation' in results and
|
||||
results['invocation'].get('module_name') == 'script' and
|
||||
'_raw_params' in results['invocation'].get('module_args', {}))
|
||||
|
||||
display_full_results = False
|
||||
if 'rc' in results and 'cmd' in results:
|
||||
command = results['cmd']
|
||||
# The command can be either a list or a string. Concat if it's a list:
|
||||
if type(command) == list:
|
||||
command = " ".join(results['cmd'])
|
||||
message = "Command `{}` exited with code: {}".format(
|
||||
command, results['rc'])
|
||||
# There may be an optional message attached to the command. Display it:
|
||||
if 'msg' in results:
|
||||
message = message + ": " + results['msg']
|
||||
elif is_script(results):
|
||||
script_name = results['invocation']['module_args']['_raw_params']
|
||||
message = "Script `{}` exited with code: {}".format(
|
||||
script_name, results['rc'])
|
||||
elif 'msg' in results:
|
||||
message = results['msg']
|
||||
else:
|
||||
message = "Unknown error"
|
||||
display_full_results = True
|
||||
print(FAILURE_TEMPLATE.format(task_name, host_name, message))
|
||||
stdout = results.get('module_stdout', results.get('stdout', ''))
|
||||
if stdout:
|
||||
print('stdout:')
|
||||
print(indent(stdout))
|
||||
stderr = results.get('module_stderr', results.get('stderr', ''))
|
||||
if stderr:
|
||||
print('stderr:')
|
||||
print(indent(stderr))
|
||||
if display_full_results:
|
||||
print("Could not get an error message. Here is the Ansible output:")
|
||||
pprint.pprint(abridged_result, indent=4)
|
||||
warnings = results.get('warnings', [])
|
||||
if warnings:
|
||||
print("Warnings:")
|
||||
for warning in warnings:
|
||||
print("*", warning)
|
||||
print("")
|
||||
|
||||
|
||||
# TODO(shadower): test with async settings
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'validation_output'
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
pass # No need to notify that a play started
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
pass # No need to notify that a task started
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
host_name = result._host
|
||||
task_name = result._task.get_name()
|
||||
results = result._result # A dict of the module name etc.
|
||||
self._dump_results(results)
|
||||
warnings = results.get('warnings', [])
|
||||
# Print only tasks that produced some warnings:
|
||||
if warnings:
|
||||
print(WARNING_TEMPLATE.format(task_name, host_name))
|
||||
for warning in warnings:
|
||||
print("*", warning)
|
||||
|
||||
# Print the result of debug module
|
||||
if (('invocation' in results) and
|
||||
('module_name' in results['invocation'])):
|
||||
|
||||
if ((results['invocation']['module_name'] == 'debug') and
|
||||
('module_args' in results['invocation'])):
|
||||
|
||||
output = ""
|
||||
|
||||
# Variable and its value
|
||||
if 'var' in results['invocation']['module_args']:
|
||||
variable = results['invocation']['module_args']['var']
|
||||
value = results[variable]
|
||||
output = "{}: {}".format(variable, str(value))
|
||||
|
||||
# Debug message
|
||||
elif 'msg' in results['invocation']['module_args']:
|
||||
output = "Message: {}".format(
|
||||
results['invocation']['module_args']['msg'])
|
||||
|
||||
print(DEBUG_TEMPLATE.format(host_name, output))
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
host_name = result._host
|
||||
task_name = result._task.get_name()
|
||||
|
||||
result_dict = result._result # A dict of the module name etc.
|
||||
abridged_result = self._dump_results(result_dict)
|
||||
|
||||
if 'results' in result_dict:
|
||||
# The task is a list of items under `results`
|
||||
for item in result_dict['results']:
|
||||
if item.get('failed', False):
|
||||
print_failure_message(host_name, task_name, item, item)
|
||||
else:
|
||||
# The task is a "normal" module invocation
|
||||
print_failure_message(host_name, task_name, result_dict,
|
||||
abridged_result)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
pass # No need to print skipped tasks
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
host_name = result._host
|
||||
task_name = result._task.get_name()
|
||||
results = {'msg': 'The host is unreachable.'}
|
||||
print_failure_message(host_name, task_name, results, results)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
def failed(host):
|
||||
return (stats.summarize(host).get('failures', 0) > 0 or
|
||||
stats.summarize(host).get('unreachable', 0) > 0)
|
||||
|
||||
hosts = sorted(stats.processed.keys())
|
||||
failed_hosts = [host for host in hosts if failed(host)]
|
||||
|
||||
if hosts:
|
||||
if failed_hosts:
|
||||
if len(failed_hosts) == len(hosts):
|
||||
print("Failure! The validation failed for all hosts:")
|
||||
for failed_host in failed_hosts:
|
||||
print("*", failed_host)
|
||||
else:
|
||||
print("Failure! The validation failed for hosts:")
|
||||
for failed_host in failed_hosts:
|
||||
print("*", failed_host)
|
||||
print("and passed for hosts:")
|
||||
for host in [h for h in hosts if h not in failed_hosts]:
|
||||
print("*", host)
|
||||
else:
|
||||
print("Success! The validation passed for all hosts:")
|
||||
for host in hosts:
|
||||
print("*", host)
|
||||
else:
|
||||
print("Warning! The validation did not run on any host.")
|
|
@ -1,46 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Events Database Size Check
|
||||
description: >
|
||||
The undercloud's events database can grow to a substantial
|
||||
size if event_time_to_live is set to a negative value (infinite limit).
|
||||
|
||||
This validation checks event_time_to_live and fails if the variable is
|
||||
set to a negative value or if it has no custom setting
|
||||
(their value is -1 by default).
|
||||
groups:
|
||||
- pre-deployment
|
||||
event_ttl_check: "event_time_to_live"
|
||||
panko_config_file: "/var/lib/config-data/puppet-generated/panko/etc/panko/panko.conf"
|
||||
tasks:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera: name="tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Check if telemetry services are enabled
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: enable_telemetry
|
||||
register: enable_telemetry
|
||||
become: true
|
||||
|
||||
- name: End play if telemetry is not enabled
|
||||
meta: end_play
|
||||
when: not enable_telemetry.value|bool
|
||||
|
||||
- name: Get event ttl from panko configuration file
|
||||
become: true
|
||||
ini:
|
||||
path: "{{ panko_config_file }}"
|
||||
section: database
|
||||
key: "{{ event_ttl_check }}"
|
||||
ignore_missing_file: true
|
||||
register: event_check_result
|
||||
|
||||
- name: Fail if event_ttl_check is set to -1
|
||||
fail:
|
||||
msg: "'{{ panko_config_file }}[database]/{{ event_ttl_check }} is set to -1 or not set'"
|
||||
when: event_check_result.value|int|default(-1) < 0
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check if ceph-ansible is installed on the undercloud
|
||||
description: >
|
||||
Prints a message if ceph-ansible isn't installed
|
||||
groups:
|
||||
- pre-deployment
|
||||
tasks:
|
||||
- name: Check if ceph-ansible is installed
|
||||
shell: rpm -q ceph-ansible || true
|
||||
args:
|
||||
warn: no
|
||||
changed_when: False
|
||||
ignore_errors: True
|
||||
register: ceph_ansible_installed
|
||||
|
||||
- name: Warn about missing ceph-ansible
|
||||
warn:
|
||||
msg: If planning to use Ceph, it is necessary to install the ceph-ansible on the undercloud
|
||||
when:
|
||||
- ceph_ansible_installed.stdout.find('is not installed') != -1
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Check the status of the ceph cluster
|
||||
description: >
|
||||
Uses `ceph health` to check if cluster is in HEALTH_WARN state
|
||||
and prints a debug message.
|
||||
|
||||
groups:
|
||||
- post-deployment
|
||||
tasks:
|
||||
- name: Check if ceph_mon is deployed
|
||||
become: true
|
||||
shell: hiera -c /etc/puppet/hiera.yaml enabled_services | egrep -sq ceph_mon
|
||||
ignore_errors: true
|
||||
register: ceph_mon_enabled
|
||||
changed_when: False
|
||||
- name: Get ceph health
|
||||
become: true
|
||||
shell: ceph health | awk '{print $1}'
|
||||
register: ceph_health
|
||||
when:
|
||||
- ceph_mon_enabled is succeeded
|
||||
- name: Check ceph health
|
||||
warn:
|
||||
msg: Ceph is in {{ ceph_health.stdout }} state.
|
||||
when:
|
||||
- ceph_mon_enabled is succeeded
|
||||
- ceph_health.stdout == 'HEALTH_WARN'
|
|
@ -1,34 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud, overcloud
|
||||
vars:
|
||||
metadata:
|
||||
name: XFS ftype check
|
||||
description: >
|
||||
Check if there is at least 1 XFS volume
|
||||
with ftype=0 in any deployed node.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
tasks:
|
||||
- name: Check if there are XFS volumes with ftype=0
|
||||
become: true
|
||||
shell: |
|
||||
for dev in $(df -h | grep '/dev/' | grep -v 'tmp' | cut -d' ' -f1)
|
||||
do
|
||||
parseftype=$(xfs_info $dev | grep ftype=0);
|
||||
if [[ ! -z "$parseftype" ]]; then
|
||||
ftype="ftype=0";
|
||||
break;
|
||||
fi
|
||||
done
|
||||
echo $ftype;
|
||||
register: ftype
|
||||
changed_when: false
|
||||
- name: Check ftype
|
||||
fail:
|
||||
msg: >
|
||||
XFS volumes formatted using ftype=0 are incompatible
|
||||
with the docker overlayfs driver.
|
||||
Run xfs_info on {{ ansible_fqdn }} and fix those volumes
|
||||
before proceeding with the upgrade.
|
||||
when:
|
||||
- ftype.stdout == 'ftype=0'
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check if latest minor version is installed
|
||||
description: >
|
||||
Makes sure python-tripleoclient is at its latest minor version
|
||||
before starting an upgrade.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
packages:
|
||||
- python-tripleoclient
|
||||
tasks:
|
||||
- name: Get available updates for packages
|
||||
check_package_update: package={{ item }} pkg_mgr={{ ansible_pkg_mgr }}
|
||||
with_items: "{{ packages }}"
|
||||
register: updates
|
||||
|
||||
- name: Check if current version is latest minor
|
||||
with_items: "{{ updates.results }}"
|
||||
assert:
|
||||
that: "item.latest_minor_version == item.current_version"
|
||||
msg: "A newer version of the {{ item.name }} package is available: {{ item.latest_minor_version }} (currently {{ item.current_version }})."
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check network_gateway on the provisioning network
|
||||
description: >
|
||||
If `gateway` in `undercloud.conf` is different from `local_ip`,
|
||||
verify that the gateway exists and is reachable.
|
||||
groups:
|
||||
- pre-introspection
|
||||
tasks:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera: name="tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Get the local_subnet name from the undercloud_conf file
|
||||
become: True
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_subnet
|
||||
ignore_missing_file: True
|
||||
register: local_subnet
|
||||
|
||||
- name: Get gateway value from the undercloud.conf file
|
||||
become: true
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: "{% if local_subnet.value %}{{ local_subnet.value }}{% else %}ctlplane-subnet{% endif %}"
|
||||
key: gateway
|
||||
ignore_missing_file: True
|
||||
register: gateway
|
||||
|
||||
- name: Get local_ip value from the undercloud.conf file
|
||||
become: true
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_ip
|
||||
ignore_missing_file: True
|
||||
register: local_ip
|
||||
|
||||
- name: Test network_gateway if different from local_ip
|
||||
icmp_ping: host="{{ gateway.value | default('0.0.0.0') }}"
|
||||
when: >
|
||||
"local_ip.value | default('0.0.0.0') | ipaddr('address')"
|
||||
!=
|
||||
"gateway.value | default('0.0.0.0') | ipaddr('address')"
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Collect and verify role flavors
|
||||
description: >
|
||||
This validation checks the flavors assigned to roles exist and have the
|
||||
correct capabilities set.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
tasks:
|
||||
- name: Collect and check the flavors
|
||||
check_flavors:
|
||||
roles_info: "{{ lookup('roles_info', wantlist=True) }}"
|
||||
flavors: "{{ lookup('nova_flavors', wantlist=True) }}"
|
||||
register: flavor_result
|
||||
|
||||
- name: Verify the profiles
|
||||
verify_profiles:
|
||||
nodes: "{{ lookup('ironic_nodes', wantlist=True) }}"
|
||||
flavors: "{{ flavor_result.flavors }}"
|
|
@ -1,104 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify docker containers are up and ports are open
|
||||
description: >
|
||||
Ensure relevant docker containers are up and running, with ports
|
||||
open to listen.
|
||||
|
||||
We iterate through a list of container names and ports provided in
|
||||
defaults, and ensure the system has those available.
|
||||
groups:
|
||||
- post-deployment
|
||||
- pre-upgrade
|
||||
listening_ip: "{{ ctlplane_ip }}"
|
||||
open_ports:
|
||||
- 111
|
||||
- 873
|
||||
- 3000
|
||||
- 3306
|
||||
- 4369
|
||||
- 5000
|
||||
- 5050
|
||||
- 5672
|
||||
- 6000
|
||||
- 6001
|
||||
- 6002
|
||||
- 6379
|
||||
- 6385
|
||||
- 8000
|
||||
- 8004
|
||||
- 8080
|
||||
- 8088
|
||||
- 8774
|
||||
- 8775
|
||||
- 8778
|
||||
- 8787
|
||||
- 8888
|
||||
- 8989
|
||||
- 9000
|
||||
- 9292
|
||||
- 9696
|
||||
- 11211
|
||||
- 15672
|
||||
- 25672
|
||||
- 35357
|
||||
- 39422
|
||||
- port: 22
|
||||
search_regex: OpenSSH
|
||||
running_containers:
|
||||
- glance_api
|
||||
- heat_api
|
||||
- heat_api_cfn
|
||||
- heat_api_cron
|
||||
- heat_engine
|
||||
- ironic_api
|
||||
- ironic_conductor
|
||||
- ironic_inspector
|
||||
- ironic_inspector_dnsmasq
|
||||
- ironic_neutron_agent
|
||||
- ironic_pxe_http
|
||||
- ironic_pxe_tftp
|
||||
- iscsid
|
||||
- keystone
|
||||
- keystone_cron
|
||||
- logrotate_crond
|
||||
- memcached
|
||||
- mistral_api
|
||||
- mistral_engine
|
||||
- mistral_event_engine
|
||||
- mistral_executor
|
||||
- mysql
|
||||
- neutron_api
|
||||
- neutron_dhcp
|
||||
- neutron_l3_agent
|
||||
- neutron_ovs_agent
|
||||
- nova_api
|
||||
- nova_api_cron
|
||||
- nova_compute
|
||||
- nova_conductor
|
||||
- nova_metadata
|
||||
- nova_placement
|
||||
- nova_scheduler
|
||||
- rabbitmq
|
||||
- swift_account_auditor
|
||||
- swift_account_reaper
|
||||
- swift_account_replicator
|
||||
- swift_account_server
|
||||
- swift_container_auditor
|
||||
- swift_container_replicator
|
||||
- swift_container_server
|
||||
- swift_container_updater
|
||||
- swift_object_auditor
|
||||
- swift_object_expirer
|
||||
- swift_object_replicator
|
||||
- swift_object_server
|
||||
- swift_object_updater
|
||||
- swift_proxy
|
||||
- swift_rsync
|
||||
- tripleo_ui
|
||||
- zaqar
|
||||
- zaqar_websocket
|
||||
tasks:
|
||||
- include_tasks: tasks/containerized_services.yaml
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud, Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify that keystone admin token is disabled
|
||||
description: >
|
||||
This validation checks that keystone admin token is disabled on both
|
||||
undercloud and overcloud controller after deployment.
|
||||
groups:
|
||||
- post-deployment
|
||||
keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf"
|
||||
tasks:
|
||||
- name: Fetch token value
|
||||
become: true
|
||||
ini: path={{ keystone_conf_file }} section=DEFAULT key=admin_token ignore_missing_file=True
|
||||
register: token_result
|
||||
- name: Check if token value is disabled.
|
||||
fail: msg="Keystone admin token is not disabled."
|
||||
when: token_result.value != None
|
|
@ -1,38 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Check controller ulimits
|
||||
description: >
|
||||
This will check the ulimits of each controller.
|
||||
groups:
|
||||
- post-deployment
|
||||
nofiles_min: 1024
|
||||
nproc_min: 2048
|
||||
|
||||
tasks:
|
||||
- name: Get nofiles limit
|
||||
become: true
|
||||
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
|
||||
command: sh -c "ulimit -n"
|
||||
register: nofilesval
|
||||
changed_when: False
|
||||
- name: Check nofiles limit
|
||||
fail:
|
||||
msg: >
|
||||
nofiles is set to {{ nofilesval.stdout }}. It should be at least
|
||||
{{ nofiles_min }} or higher, depending on available resources.
|
||||
failed_when: "nofilesval.stdout|int < nofiles_min"
|
||||
|
||||
- name: Get nproc limit
|
||||
become: true
|
||||
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
|
||||
command: sh -c "ulimit -u"
|
||||
register: nprocval
|
||||
changed_when: False
|
||||
- name: Check nproc limit
|
||||
fail:
|
||||
msg: >
|
||||
nproc is set to {{ nprocval.stdout }}. It should be at least
|
||||
{{ nproc_min }} or higher, depending on available resources.
|
||||
failed_when: "nprocval.stdout|int < nproc_min"
|
|
@ -1,39 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check the number of IP addresses available for the overcloud nodes
|
||||
description: >
|
||||
Verify that the number of IP addresses defined in `dhcp_start` and
|
||||
`dhcp_end` fields in `undercloud.conf` is not too low.
|
||||
groups:
|
||||
- pre-introspection
|
||||
ctlplane_iprange_min_size: 25
|
||||
tasks:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera: name="tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Get dhcp_start value from the undercloud.conf file
|
||||
become: true
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: ctlplane-subnet
|
||||
key: dhcp_start
|
||||
ignore_missing_file: True
|
||||
register: dhcp_start
|
||||
|
||||
- name: Get dhcp_end value from the undercloud.conf file
|
||||
become: true
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: ctlplane-subnet
|
||||
key: dhcp_end
|
||||
ignore_missing_file: True
|
||||
register: dhcp_end
|
||||
|
||||
- name: Check the size of the DHCP range for overcloud nodes
|
||||
ip_range:
|
||||
start: "{{ dhcp_start.value|default('192.0.2.5') }}"
|
||||
end: "{{ dhcp_end.value|default('192.0.2.24') }}"
|
||||
min_size: "{{ ctlplane_iprange_min_size }}"
|
|
@ -1,50 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify hypervisor statistics
|
||||
description: >
|
||||
This validation checks that the nodes and hypervisor statistics
|
||||
add up.
|
||||
groups:
|
||||
- pre-deployment
|
||||
tasks:
|
||||
- name: Retrieve the hypervisor statistics
|
||||
set_fact:
|
||||
statistics: "{{ lookup('nova_hypervisor_statistics', wantlist=True) }}"
|
||||
|
||||
- name: Get default role counts
|
||||
set_fact:
|
||||
roles_info: "{{ lookup('roles_info', wantlist=True) }}"
|
||||
|
||||
- name: Set requested count
|
||||
set_fact:
|
||||
requested_count: "{{ roles_info|sum(attribute='count') }}"
|
||||
|
||||
- name: Get associated nodes
|
||||
set_fact:
|
||||
associated_nodes: "{{ lookup('ironic_nodes', 'associated', wantlist=True) }}"
|
||||
|
||||
- name: Get available nodes
|
||||
set_fact:
|
||||
available_nodes: "{{ lookup('ironic_nodes', 'provision_state', ['available'], wantlist=True) }}"
|
||||
|
||||
- name: Set count of available nodes
|
||||
set_fact:
|
||||
available_count: "{{ ((associated_nodes|length) + (available_nodes|length))|int }}"
|
||||
|
||||
- name: Fail when requested is more than available
|
||||
fail:
|
||||
msg: >
|
||||
Not enough baremetal nodes - available: {{ available_count }},
|
||||
requested: {{ requested_count }}
|
||||
failed_when: requested_count|int > available_count|int
|
||||
|
||||
- name: Fail when hypervisor count is less than available count
|
||||
fail:
|
||||
msg: >
|
||||
Only {{ statistics.count }} nodes are exposed to Nova of
|
||||
{{ available_count }} requests. Check that enough nodes are
|
||||
in 'available' state with maintenance mode off.
|
||||
failed_when: statistics.count < available_count|int
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify existence of deployment images
|
||||
description: >
|
||||
This validation checks that images bm-deploy-kernel and
|
||||
bm-deploy-ramdisk exist before deploying the overcloud,
|
||||
and that only one exists by that name.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
deploy_kernel_name: "bm-deploy-kernel"
|
||||
deploy_ramdisk_name: "bm-deploy-ramdisk"
|
||||
tasks:
|
||||
- name: Fetch deploy kernel by name
|
||||
set_fact:
|
||||
deploy_kernel_id: "{{ lookup('glance_images', 'name', ['{{ deploy_kernel_name }}'], wantlist=True) | map(attribute='id') | list }}"
|
||||
- name: Fetch deploy ramdisk by name
|
||||
set_fact:
|
||||
deploy_ramdisk_id: "{{ lookup('glance_images', 'name', ['{{ deploy_ramdisk_name }}'], wantlist=True) | map(attribute='id') | list }}"
|
||||
- name: Fail if image is not found
|
||||
fail: msg="No image with the name '{{ item.name }}' found - make sure you have uploaded boot images."
|
||||
failed_when: item.id | length < 1
|
||||
with_items:
|
||||
- { name: '{{ deploy_kernel_name }}', id: '{{ deploy_kernel_id }}' }
|
||||
- { name: '{{ deploy_ramdisk_name }}', id: '{{ deploy_ramdisk_id }}' }
|
||||
- name: Fail if there is more than one image
|
||||
fail: msg="Please make sure there is only one image named '{{ item.name }}' in glance."
|
||||
failed_when: item.id | length > 1
|
||||
with_items:
|
||||
- { name: '{{ deploy_kernel_name }}', id: '{{ deploy_kernel_id }}' }
|
||||
- { name: '{{ deploy_ramdisk_name }}', id: '{{ deploy_ramdisk_id }}' }
|
|
@ -1,26 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
become: true
|
||||
vars:
|
||||
metadata:
|
||||
name: DHCP on the Introspection Network
|
||||
description: >
|
||||
An unexpected DHCP server on the network used for node
|
||||
introspection can cause some nodes to not be inspected.
|
||||
|
||||
This validations checks for the DHCP responses on the
|
||||
interface specified in ironic-inspector.conf.
|
||||
groups:
|
||||
- pre-introspection
|
||||
tasks:
|
||||
- name: Look up the introspection interface
|
||||
become: True
|
||||
ini: path=/var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf section=iptables key=dnsmasq_interface
|
||||
register: interface
|
||||
- name: Look up the introspection interface from the deprecated option
|
||||
become: True
|
||||
ini: path=/var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf section=firewall key=dnsmasq_interface
|
||||
register: interface_deprecated
|
||||
- name: Look for rogue DHCP servers
|
||||
script: files/rogue_dhcp.py {{ interface.value or interface_deprecated.value or 'br-ctlplane' }}
|
||||
changed_when: False
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
become: true
|
||||
vars:
|
||||
metadata:
|
||||
name: DHCP on the Provisioning Network
|
||||
description: >
|
||||
An unexpected DHCP server on the provisioning network can
|
||||
cause problems with deploying the Ironic nodes.
|
||||
|
||||
This validation checks for DHCP responses on the undercloud's
|
||||
provisioning interface (eth1 by default) and fails if there
|
||||
are any.
|
||||
groups:
|
||||
- pre-deployment
|
||||
tasks:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
hiera: name="tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Gather undercloud.conf values
|
||||
ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_interface
|
||||
ignore_missing_file: True
|
||||
register: local_interface
|
||||
|
||||
- name: Look for DHCP responses
|
||||
script: files/rogue_dhcp.py {{ local_interface.value|default('eth1') }}
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud, overcloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify DNS
|
||||
description: >
|
||||
Verify that the DNS resolution works
|
||||
groups:
|
||||
- pre-deployment
|
||||
server_to_lookup: example.com
|
||||
tasks:
|
||||
- name: Ensure DNS resolution works
|
||||
command: "getent hosts {{ server_to_lookup }}"
|
||||
changed_when: False
|
|
@ -1,239 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import fcntl
|
||||
import socket
|
||||
import struct
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
ETH_P_IP = 0x0800
|
||||
SIOCGIFHWADDR = 0x8927
|
||||
|
||||
dhcp_servers = []
|
||||
interfaces_addresses = {}
|
||||
|
||||
|
||||
class DHCPDiscover(object):
|
||||
def __init__(self, interface):
|
||||
self.interface = interface
|
||||
self.mac = interfaces_addresses[interface]
|
||||
self.socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
|
||||
|
||||
def bind(self):
|
||||
self.socket.bind((self.interface, 0))
|
||||
|
||||
def send(self):
|
||||
packet = self.packet()
|
||||
self.bind()
|
||||
self.socket.send(packet)
|
||||
|
||||
def close_socket(self):
|
||||
self.socket.close()
|
||||
|
||||
def packet(self):
|
||||
return self.ethernet_header() \
|
||||
+ self.ip_header() \
|
||||
+ self.udp_header() \
|
||||
+ self.dhcp_discover_payload()
|
||||
|
||||
def ethernet_header(self):
|
||||
return struct.pack('!6s6sH',
|
||||
b'\xff\xff\xff\xff\xff\xff', # Dest HW address
|
||||
self.mac, # Source HW address
|
||||
ETH_P_IP) # EtherType - IPv4
|
||||
|
||||
def ip_header(self, checksum=None):
|
||||
# 0 1 2 3
|
||||
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
# |Version| IHL |Type of Service| Total Length |
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
# | Identification |Flags| Fragment Offset |
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
# | Time to Live | Protocol | Header Checksum |
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
# | Source Address |
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
# | Destination Address |
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
# | Options | Padding |
|
||||
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
if checksum is None:
|
||||
checksum = self.ip_checksum()
|
||||
return struct.pack('!BBHHHBBHI4s',
|
||||
(4 << 4) + 5, # IPv4 + 20 bytes header length
|
||||
0, # TOS
|
||||
272, # Total Length
|
||||
1, # Id
|
||||
0, # Flags & Fragment Offset
|
||||
64, # TTL
|
||||
socket.IPPROTO_UDP,
|
||||
checksum,
|
||||
0, # Source
|
||||
socket.inet_aton('255.255.255.255')) # Destination
|
||||
|
||||
def ip_checksum(self):
|
||||
generated_checksum = self._checksum(self.ip_header(checksum=0))
|
||||
return socket.htons(generated_checksum)
|
||||
|
||||
def udp_header(self, checksum=None):
|
||||
# 0 7 8 15 16 23 24 31
|
||||
# +--------+--------+--------+--------+
|
||||
# | Source | Destination |
|
||||
# | Port | Port |
|
||||
# +--------+--------+--------+--------+
|
||||
# | | |
|
||||
# | Length | Checksum |
|
||||
# +--------+--------+--------+--------+
|
||||
if checksum is None:
|
||||
checksum = self.udp_checksum()
|
||||
return struct.pack('!HHHH',
|
||||
68,
|
||||
67,
|
||||
252,
|
||||
checksum)
|
||||
|
||||
def udp_checksum(self):
|
||||
pseudo_header = self.ip_pseudo_header()
|
||||
generated_checksum = self._checksum(pseudo_header +
|
||||
self.udp_header(checksum=0) +
|
||||
self.dhcp_discover_payload())
|
||||
return socket.htons(generated_checksum)
|
||||
|
||||
def ip_pseudo_header(self):
|
||||
# 0 7 8 15 16 23 24 31
|
||||
# +--------+--------+--------+--------+
|
||||
# | source address |
|
||||
# +--------+--------+--------+--------+
|
||||
# | destination address |
|
||||
# +--------+--------+--------+--------+
|
||||
# | zero |protocol| UDP length |
|
||||
# +--------+--------+--------+--------+
|
||||
return struct.pack('!I4sBBH',
|
||||
0,
|
||||
socket.inet_aton('255.255.255.255'),
|
||||
0,
|
||||
socket.IPPROTO_UDP,
|
||||
252) # Length
|
||||
|
||||
def dhcp_discover_payload(self):
|
||||
return struct.pack('!BBBBIHHIIII6s10s67s125s4s3s1s',
|
||||
1, # Message Type - Boot Request
|
||||
1, # Hardware Type - Ethernet
|
||||
6, # HW Address Length
|
||||
0, # Hops
|
||||
0, # Transaction ID
|
||||
0, # Seconds elapsed
|
||||
0, # Bootp flags
|
||||
0, # Client IP Address
|
||||
0, # Your IP Address
|
||||
0, # Next server IP Address
|
||||
0, # Relay Agent IP Address
|
||||
self.mac, # Client MAC address
|
||||
b'\x00' * 10, # Client HW address padding
|
||||
b'\x00' * 67, # Server host name not given
|
||||
b'\x00' * 125, # Boot file name not given
|
||||
b'\x63\x82\x53\x63', # Magic Cookie
|
||||
b'\x35\x01\x01', # DHCP Message Type = Discover
|
||||
b'\xff' # Option End
|
||||
)
|
||||
|
||||
def _checksum(self, msg):
|
||||
s = 0
|
||||
for i in range(0, len(msg), 2):
|
||||
w = ord(msg[i]) + (ord(msg[i + 1]) << 8)
|
||||
s = s + w
|
||||
s = (s >> 16) + (s & 0xffff)
|
||||
s = s + (s >> 16)
|
||||
s = ~s & 0xffff
|
||||
return s
|
||||
|
||||
|
||||
def get_hw_addresses(interfaces):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
for interface in interfaces:
|
||||
info = fcntl.ioctl(s.fileno(),
|
||||
SIOCGIFHWADDR,
|
||||
struct.pack('256s', interface[:15]))
|
||||
interfaces_addresses[interface] = info[18:24]
|
||||
s.close()
|
||||
|
||||
|
||||
def inspect_frame(data):
|
||||
eth_type = struct.unpack('!H', data[12:14])[0]
|
||||
protocol = ord(data[23])
|
||||
src_port = struct.unpack('!H', data[34:36])[0]
|
||||
dst_port = struct.unpack('!H', data[36:38])[0]
|
||||
msg_type = ord(data[42])
|
||||
# Make sure we got a DHCP Offer
|
||||
if eth_type == ETH_P_IP \
|
||||
and protocol == socket.IPPROTO_UDP \
|
||||
and src_port == 67 \
|
||||
and dst_port == 68 \
|
||||
and msg_type == 2: # DHCP Boot Reply
|
||||
server_ip_address = '.'.join(["%s" % ord(m) for m in
|
||||
data[26:30]])
|
||||
server_hw_address = ":".join(["%02x" % ord(m) for m in
|
||||
data[6:12]])
|
||||
dhcp_servers.append([server_ip_address, server_hw_address])
|
||||
|
||||
|
||||
def wait_for_dhcp_offers(interfaces, timeout):
|
||||
listening_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW,
|
||||
socket.htons(ETH_P_IP))
|
||||
listening_socket.settimeout(timeout)
|
||||
allowed_macs = interfaces_addresses.values()
|
||||
end_of_time = time.time() + timeout
|
||||
try:
|
||||
while time.time() < end_of_time:
|
||||
data = listening_socket.recv(1024)
|
||||
dst_mac = struct.unpack('!6s', data[0:6])[0]
|
||||
if dst_mac in allowed_macs:
|
||||
inspect_frame(data)
|
||||
except socket.timeout:
|
||||
pass
|
||||
listening_socket.close()
|
||||
|
||||
|
||||
def main():
|
||||
interfaces = sys.argv[1:]
|
||||
timeout = 5
|
||||
|
||||
get_hw_addresses(interfaces)
|
||||
|
||||
listening_thread = threading.Thread(target=wait_for_dhcp_offers,
|
||||
args=[interfaces, timeout])
|
||||
listening_thread.start()
|
||||
|
||||
for interface in interfaces:
|
||||
dhcp_discover = DHCPDiscover(interface)
|
||||
dhcp_discover.send()
|
||||
dhcp_discover.close_socket()
|
||||
|
||||
listening_thread.join()
|
||||
|
||||
if dhcp_servers:
|
||||
sys.stderr.write('Found {} DHCP servers:'.format(len(dhcp_servers)))
|
||||
for ip, mac in dhcp_servers:
|
||||
sys.stderr.write("\n* {} ({})".format(ip, mac))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("No DHCP servers found.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,37 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: HAProxy configuration
|
||||
description: Verify the HAProxy configuration has recommended values.
|
||||
groups:
|
||||
- post-deployment
|
||||
config_file: '/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg'
|
||||
global_maxconn_min: 20480
|
||||
defaults_maxconn_min: 4096
|
||||
defaults_timeout_queue: '2m'
|
||||
defaults_timeout_client: '2m'
|
||||
defaults_timeout_server: '2m'
|
||||
defaults_timeout_check: '10s'
|
||||
tasks:
|
||||
- name: Gather the HAProxy config
|
||||
become: true
|
||||
haproxy_conf: path="{{ config_file }}"
|
||||
- name: Verify global maxconn
|
||||
fail: msg="The 'global maxconn' value '{{ haproxy_conf.global.maxconn }}' must be greater than {{ global_maxconn_min }}"
|
||||
failed_when: haproxy_conf.global.maxconn|int < global_maxconn_min
|
||||
- name: Verify defaults maxconn
|
||||
fail: msg="The 'defaults maxconn' value '{{ haproxy_conf.defaults.maxconn }}' must be greater than {{ defaults_maxconn_min }}"
|
||||
failed_when: haproxy_conf.defaults.maxconn|int < defaults_maxconn_min
|
||||
- name: Verify defaults timeout queue
|
||||
fail: msg="The 'timeout queue' option in 'defaults' is '{{ haproxy_conf.defaults['timeout queue'] }}', but must be set to {{ defaults_timeout_queue }}"
|
||||
failed_when: "haproxy_conf.defaults['timeout queue'] != defaults_timeout_queue"
|
||||
- name: Verify defaults timeout client
|
||||
fail: msg="The 'timeout client' option in 'defaults' is '{{ haproxy_conf.defaults['timeout client'] }}', but must be set to {{ defaults_timeout_client }}"
|
||||
failed_when: "haproxy_conf.defaults['timeout client'] != defaults_timeout_client"
|
||||
- name: Verify defaults timeout server
|
||||
fail: msg="The 'timeout server' option in 'defaults' is '{{ haproxy_conf.defaults['timeout server'] }}', but must be set to {{ defaults_timeout_server }}"
|
||||
failed_when: "haproxy_conf.defaults['timeout server'] != defaults_timeout_server"
|
||||
- name: Verify defaults timeout check
|
||||
fail: msg="The 'timeout check' option in 'defaults' is '{{ haproxy_conf.defaults['timeout check'] }}', but must be set to {{ defaults_timeout_check }}"
|
||||
failed_when: "haproxy_conf.defaults['timeout check'] != defaults_timeout_check"
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check Ironic boot configuration
|
||||
description: >
|
||||
Check if baremetal boot configuration is correct.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
deploy_kernel_name: "bm-deploy-kernel"
|
||||
deploy_ramdisk_name: "bm-deploy-ramdisk"
|
||||
tasks:
|
||||
- name: Get id for deploy kernel by name
|
||||
set_fact:
|
||||
deploy_kernel_id: "{{ lookup('glance_images', 'name', ['{{ deploy_kernel_name }}'], wantlist=True) | map(attribute='id') | join(', ') }}"
|
||||
- name: Get id for deploy ramdisk by name
|
||||
set_fact:
|
||||
deploy_ramdisk_id: "{{ lookup('glance_images', 'name', ['{{ deploy_ramdisk_name }}'], wantlist=True) | map(attribute='id') | join(', ') }}"
|
||||
|
||||
- name: Get ironic nodes
|
||||
set_fact:
|
||||
ironic_nodes: "{{ lookup('ironic_nodes', wantlist=True) }}"
|
||||
|
||||
- name: Check each node for kernel id
|
||||
fail: msg='Node {{ item.uuid }} has an incorrectly configured driver_info/deploy_kernel. Expected "{{ deploy_kernel_id }}" but got "{{ item.driver_info.deploy_kernel }}".'
|
||||
failed_when: item.driver_info.deploy_kernel != deploy_kernel_id
|
||||
with_items: "{{ ironic_nodes }}"
|
||||
|
||||
- name: Check each node for ramdisk id
|
||||
fail: msg='Node {{ item.uuid }} has an incorrectly configured driver_info/deploy_ramdisk. Expected "{{ deploy_ramdisk_id }}" but got "{{ item.driver_info.deploy_ramdisk }}".'
|
||||
failed_when: item.driver_info.deploy_ramdisk != deploy_ramdisk_id
|
||||
with_items: "{{ ironic_nodes }}"
|
|
@ -1,96 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from os import path
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: advanced_format
|
||||
short_description: Check for advanced disk format
|
||||
description:
|
||||
- Check whether a drive uses advanced format
|
||||
options:
|
||||
drive:
|
||||
required: true
|
||||
description:
|
||||
- drive name
|
||||
type: str
|
||||
author: "Martin Andre (@mandre)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Detect whether the drive uses Advanced Format
|
||||
advanced_format: drive=vda
|
||||
'''
|
||||
|
||||
|
||||
def read_int(module, file_path):
|
||||
'''Read a file and convert its value to int.
|
||||
|
||||
Raise ansible failure otherwise.
|
||||
'''
|
||||
try:
|
||||
with open(file_path) as f:
|
||||
file_contents = f.read()
|
||||
return int(file_contents)
|
||||
except IOError:
|
||||
module.fail_json(msg="Cannot open '%s'" % file_path)
|
||||
except ValueError:
|
||||
module.fail_json(msg="The '%s' file doesn't contain an integer value" %
|
||||
file_path)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
drive=dict(required=True, type='str')
|
||||
))
|
||||
|
||||
drive = module.params.get('drive')
|
||||
queue_path = path.join('/sys/class/block', drive, 'queue')
|
||||
|
||||
physical_block_size_path = path.join(queue_path, 'physical_block_size')
|
||||
logical_block_size_path = path.join(queue_path, 'logical_block_size')
|
||||
|
||||
physical_block_size = read_int(module, physical_block_size_path)
|
||||
logical_block_size = read_int(module, logical_block_size_path)
|
||||
|
||||
if physical_block_size == logical_block_size:
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
msg="The disk %s probably doesn't use Advance Format." % drive,
|
||||
)
|
||||
else:
|
||||
module.exit_json(
|
||||
# NOTE(shadower): we're marking this as `changed`, to make it
|
||||
# visually stand out when running via Ansible directly instead of
|
||||
# using the API.
|
||||
#
|
||||
# The API & UI is planned to look for the `warnings` field and
|
||||
# display it differently.
|
||||
changed=True,
|
||||
warnings=["Physical and logical block sizes of drive %s differ "
|
||||
"(%s vs. %s). This can mean the disk uses Advance "
|
||||
"Format." %
|
||||
(drive, physical_block_size, logical_block_size)],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,181 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule # noqa
|
||||
import re
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: check_flavors
|
||||
short_description: Check that assigned flavors exist and are configured
|
||||
description:
|
||||
- Validate that the flavors assigned to roles exist and have the correct
|
||||
settings. Right now, that means that boot_option is unset or set to 'local'
|
||||
, or if set to 'netboot', issue a warning.
|
||||
options:
|
||||
roles_info:
|
||||
required: true
|
||||
description:
|
||||
- A list of role info
|
||||
type: list
|
||||
flavors:
|
||||
required: true
|
||||
description:
|
||||
- A dictionary of flavors from Nova
|
||||
type: dict
|
||||
|
||||
author: "Brad P. Crochet"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: undercloud
|
||||
tasks:
|
||||
- name: Check the flavors
|
||||
check_flavors:
|
||||
roles_info: "{{ lookup('roles_info', wantlist=True) }}"
|
||||
flavors: "{{ lookup('nova_flavors', wantlist=True) }}"
|
||||
'''
|
||||
|
||||
|
||||
def validate_roles_and_flavors(roles_info, flavors):
|
||||
"""Check if roles info is correct
|
||||
|
||||
:param roles_info: list of role data
|
||||
:param flavors: dictionary of flavors
|
||||
:returns result: Flavors and scale
|
||||
warnings: List of warning messages
|
||||
errors: List of error messages
|
||||
"""
|
||||
|
||||
result = {}
|
||||
errors = []
|
||||
warnings = []
|
||||
custom_resource_class = None
|
||||
custom_resource_class_val = None
|
||||
|
||||
message = "Flavor '{1}' provided for the role '{0}', does not exist"
|
||||
missing_message = "Role '{0}' is in use, but has no flavor assigned"
|
||||
warning_message = (
|
||||
'Flavor {0} "capabilities:boot_option" is set to '
|
||||
'"netboot". Nodes will PXE boot from the ironic '
|
||||
'conductor instead of using a local bootloader. '
|
||||
'Make sure that enough nodes are marked with the '
|
||||
'"boot_option" capability set to "netboot".')
|
||||
resource_class_missing = (
|
||||
'Flavor {0} does not have a custom resource class '
|
||||
'associated with it')
|
||||
resource_class_name_incorrect = (
|
||||
'Flavor {0} has an incorrectly named custom '
|
||||
'resource class associated with it')
|
||||
resource_class_value_incorrect = (
|
||||
'Flavor {0} has a resource class that is not '
|
||||
'offering exactly 1 resource')
|
||||
disable_standard_scheduling = (
|
||||
'Flavor {0} has to have scheduling based on '
|
||||
'standard properties disabled by setting '
|
||||
'resources:VCPU=0 resources:MEMORY_MB=0 '
|
||||
'resources:DISK_GB=0 in the flavor property')
|
||||
|
||||
for role in roles_info:
|
||||
target = role.get('name')
|
||||
flavor_name = role.get('flavor')
|
||||
scale = role.get('count', 0)
|
||||
|
||||
if flavor_name is None or not scale:
|
||||
if scale:
|
||||
errors.append(missing_message.format(target))
|
||||
continue
|
||||
|
||||
old_flavor_name, old_scale = result.get(flavor_name, (None, None))
|
||||
|
||||
if old_flavor_name:
|
||||
result[flavor_name] = (old_flavor_name, scale)
|
||||
else:
|
||||
flavor = flavors.get(flavor_name)
|
||||
|
||||
if flavor:
|
||||
keys = flavor.get('keys', None)
|
||||
if keys:
|
||||
if keys.get('capabilities:boot_option', '') \
|
||||
== 'netboot':
|
||||
warnings.append(
|
||||
warning_message.format(flavor_name))
|
||||
# check if the baremetal flavor has custom resource class
|
||||
# required for scheduling since queens
|
||||
resource_specs = {key.split(
|
||||
"resources:", 1)[-1]: val
|
||||
for key, val in keys.items()
|
||||
if key.startswith("resources:")}
|
||||
if not resource_specs:
|
||||
errors.append(resource_class_missing.format(
|
||||
flavor_name))
|
||||
else:
|
||||
for key, val in resource_specs.items():
|
||||
if key.startswith("CUSTOM_"):
|
||||
custom_resource_class = True
|
||||
match = re.match('CUSTOM_[A-Z_]+', key)
|
||||
if match is None:
|
||||
errors.append(
|
||||
resource_class_name_incorrect,
|
||||
flavor_name)
|
||||
else:
|
||||
if val == 1:
|
||||
custom_resource_class_val = True
|
||||
if not custom_resource_class:
|
||||
errors.append(resource_class_missing.format(
|
||||
flavor_name))
|
||||
if not custom_resource_class_val:
|
||||
errors.append(resource_class_value_incorrect.
|
||||
format(flavor_name))
|
||||
disk = resource_specs.get("DISK_GB", None)
|
||||
memory = resource_specs.get("MEMORY_MB", None)
|
||||
vcpu = resource_specs.get("VCPU", None)
|
||||
if any(resource != 0 for resource in [disk, memory,
|
||||
vcpu]):
|
||||
errors.append(disable_standard_scheduling.
|
||||
format(flavor_name))
|
||||
|
||||
result[flavor_name] = (flavor, scale)
|
||||
else:
|
||||
errors.append(message.format(target, flavor_name))
|
||||
|
||||
return result, warnings, errors
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
roles_info=dict(required=True, type='list'),
|
||||
flavors=dict(required=True, type='dict')
|
||||
))
|
||||
|
||||
roles_info = module.params.get('roles_info')
|
||||
flavors = module.params.get('flavors')
|
||||
|
||||
flavor_result, warnings, errors = validate_roles_and_flavors(roles_info,
|
||||
flavors)
|
||||
|
||||
if errors:
|
||||
module.fail_json(msg="\n".join(errors))
|
||||
elif warnings:
|
||||
module.exit_json(warnings="\n".join(warnings))
|
||||
else:
|
||||
module.exit_json(
|
||||
msg="All flavors configured on roles",
|
||||
flavors=flavor_result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,151 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
""" Check for available updates for a given package."""
|
||||
|
||||
import collections
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: check_package_update
|
||||
short_description: Check for available updates for a given package
|
||||
options:
|
||||
package:
|
||||
required: true
|
||||
description:
|
||||
- The name of the package you want to check
|
||||
type: str
|
||||
pkg_mgr:
|
||||
required: true
|
||||
description:
|
||||
- Supported Package Manager, DNF or YUM
|
||||
type: str
|
||||
author: "Florian Fuchs"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Get available updates for packages
|
||||
check_package_update:
|
||||
package: python-tripleoclient
|
||||
pkg_mgr: {{ ansible_pkg_mgr}}
|
||||
'''
|
||||
|
||||
SUPPORTED_PKG_MGRS = (
|
||||
'yum',
|
||||
'dnf',
|
||||
)
|
||||
|
||||
|
||||
PackageDetails = collections.namedtuple('PackageDetails',
|
||||
['name', 'arch', 'version'])
|
||||
|
||||
|
||||
def get_package_details(line):
|
||||
# Parses an output line from a package manager's
|
||||
# `list (available|installed)` command and returns
|
||||
# a named tuple
|
||||
parts = line.rstrip().split()
|
||||
name, arch = parts[0].split('.')
|
||||
# Version string, excluding release string and epoch
|
||||
version = parts[1].split('-')[0].split(':')[-1]
|
||||
return PackageDetails(name, arch, version)
|
||||
|
||||
|
||||
def _command(command):
|
||||
# Return the result of a subprocess call
|
||||
# as [stdout, stderr]
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
return process.communicate()
|
||||
|
||||
|
||||
def _get_installed_version_from_output(output, package):
|
||||
for line in output.split('\n'):
|
||||
if package in line:
|
||||
return get_package_details(line)
|
||||
|
||||
|
||||
def _get_latest_available_versions(output, installed):
|
||||
# Returns the latest available minor and major versions,
|
||||
# one for each.
|
||||
latest_minor = None
|
||||
latest_major = None
|
||||
# Get all packages with the same architecture
|
||||
packages = list([get_package_details(line) for line in output.split('\n')
|
||||
if '{i.name}.{i.arch}'.format(i=installed) in line])
|
||||
# Get all packages with the *same* major version
|
||||
minor = sorted((p for p in packages
|
||||
if p.version[0] == installed.version[0]))
|
||||
if len(minor) > 0:
|
||||
latest_minor = minor[-1].version
|
||||
# Get all packages with a *higher* available major version
|
||||
major = sorted((p for p in packages
|
||||
if p.version[0] > installed.version[0]))
|
||||
if len(major) > 0:
|
||||
latest_major = major[-1].version
|
||||
# If the output doesn't contain packages with the same major version
|
||||
# let's assume the currently installed version as latest minor one.
|
||||
if latest_minor is None:
|
||||
latest_minor = installed.version
|
||||
return latest_minor, latest_major
|
||||
|
||||
|
||||
def check_update(module, package, pkg_mgr):
|
||||
if pkg_mgr not in SUPPORTED_PKG_MGRS:
|
||||
module.fail_json(
|
||||
msg='Package manager "{}" is not supported.'.format(pkg_mgr))
|
||||
return
|
||||
|
||||
installed_stdout, installed_stderr = _command(
|
||||
[pkg_mgr, 'list', 'installed', package])
|
||||
# Fail the module if for some reason we can't lookup the current package.
|
||||
if installed_stderr != '':
|
||||
module.fail_json(msg=installed_stderr)
|
||||
return
|
||||
installed = _get_installed_version_from_output(installed_stdout, package)
|
||||
|
||||
available_stdout, available_stderr = _command(
|
||||
[pkg_mgr, 'list', 'available', installed.name])
|
||||
latest_minor_version, latest_major_version = \
|
||||
_get_latest_available_versions(available_stdout, installed)
|
||||
|
||||
module.exit_json(changed=False,
|
||||
name=installed.name,
|
||||
current_version=installed.version,
|
||||
latest_minor_version=latest_minor_version,
|
||||
latest_major_version=latest_major_version)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
package=dict(required=True, type='str'),
|
||||
pkg_mgr=dict(required=True, type='str')
|
||||
))
|
||||
|
||||
check_update(module,
|
||||
module.params.get('package'),
|
||||
module.params.get('pkg_mgr'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,249 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import six
|
||||
six.add_metaclass(type)
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: docker_facts
|
||||
version_added: '2.6'
|
||||
short_description: Gather list of volumes, images, containers
|
||||
notes:
|
||||
- When specifying mulitple filters, only assets matching B(all) filters
|
||||
will be returned.
|
||||
description:
|
||||
- Gather a list of volumes, images, and containers on a running system
|
||||
- Return both filtered and unfiltered lists of volumes, images,
|
||||
and containers.
|
||||
options:
|
||||
image_filter:
|
||||
description:
|
||||
- List of k=v pairs to use as a filter for images.
|
||||
type: list
|
||||
required: false
|
||||
volume_filter:
|
||||
description:
|
||||
- List of k=v pairs to use as a filter for volumes.
|
||||
type: list
|
||||
required: false
|
||||
container_filter:
|
||||
description:
|
||||
- List of k=v pairs to use as a filter for containers.
|
||||
type: list
|
||||
required: false
|
||||
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Gather Docker facts
|
||||
docker_facts:
|
||||
|
||||
- name: Gather filtered Docker facts
|
||||
docker_facts:
|
||||
image_filter:
|
||||
- dangling=true
|
||||
volume_filter:
|
||||
- dangling=true
|
||||
container_filter:
|
||||
- status=exited
|
||||
- status=dead
|
||||
|
||||
- name: Remove containers that matched filters
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ docker.containers_filtered | map(attribute='id') | list }}"
|
||||
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
docker:
|
||||
description: >
|
||||
Lists of container, volume, and image UUIDs,
|
||||
both filtered and unfiltered.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
containers:
|
||||
description: List of dictionaries of container name, state, and ID
|
||||
returned: always
|
||||
type: complex
|
||||
containers_filtered:
|
||||
description: >
|
||||
List of dictionaries of container name, state, and ID
|
||||
that matched the filter(s)
|
||||
returned: always
|
||||
type: complex
|
||||
images:
|
||||
description: List of image UUIDs
|
||||
returned: always
|
||||
type: list
|
||||
images_filtered:
|
||||
description: List of UUIDs that matched the filter(s)
|
||||
returned: always
|
||||
type: list
|
||||
volumes:
|
||||
description: List of volume UUIDs
|
||||
returned: always
|
||||
type: list
|
||||
volumes_filtered:
|
||||
description: List of UUIDs that matched the filter(s)
|
||||
returned: always
|
||||
type: list
|
||||
"""
|
||||
|
||||
import itertools
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCKER_SUBCOMMAND_LOOKUP = [
|
||||
('images', 'images', '-q'),
|
||||
('volumes', 'volume ls', '-q'),
|
||||
('containers', 'ps -a', '--format {{.Names}}##{{.ID}}##{{.Status}}')
|
||||
]
|
||||
|
||||
|
||||
def run_docker_command(
|
||||
module,
|
||||
docker_bin,
|
||||
sub_command=[],
|
||||
opts='-q',
|
||||
filters=[]):
|
||||
|
||||
for item in docker_bin, sub_command, opts, filters:
|
||||
if not isinstance(item, list):
|
||||
item = item.split('\n')
|
||||
|
||||
if not isinstance(docker_bin, list):
|
||||
docker_bin = docker_bin.split()
|
||||
|
||||
if not isinstance(sub_command, list):
|
||||
sub_command = sub_command.split()
|
||||
|
||||
if not isinstance(opts, list):
|
||||
opts = opts.split()
|
||||
|
||||
if not isinstance(filters, list):
|
||||
filters = filters.split()
|
||||
|
||||
filters = ['-f ' + i for i in filters]
|
||||
command = list(itertools.chain(docker_bin, sub_command, opts, filters))
|
||||
rc, out, err = module.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
msg='Error running command {}.\n\n \
|
||||
Original error:\n\n{}'.format(command, err))
|
||||
|
||||
if out == '':
|
||||
out = []
|
||||
else:
|
||||
out = out.strip().split('\n')
|
||||
|
||||
return rc, out, err
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
image_filter=dict(type='list', default=[]),
|
||||
volume_filter=dict(type='list', default=[]),
|
||||
container_filter=dict(type='list', default=[]),
|
||||
),
|
||||
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
docker_bin = [module.get_bin_path('docker')]
|
||||
|
||||
docker_facts = {}
|
||||
|
||||
for item in DOCKER_SUBCOMMAND_LOOKUP:
|
||||
docker_facts[item[0]] = []
|
||||
docker_facts[item[0] + '_filtered'] = []
|
||||
|
||||
if docker_bin[0]:
|
||||
|
||||
docker_facts[item[0]] = []
|
||||
|
||||
# Run each Docker command
|
||||
for item in DOCKER_SUBCOMMAND_LOOKUP:
|
||||
rc, out, err = run_docker_command(
|
||||
module,
|
||||
docker_bin,
|
||||
sub_command=item[1],
|
||||
opts=item[2])
|
||||
|
||||
# For everything but containers, return just the UIDs
|
||||
if item[0] != 'containers':
|
||||
docker_facts[item[0]] = out
|
||||
elif item[0] == 'containers':
|
||||
|
||||
# For containers, use a custom format to get name, id,
|
||||
# and status
|
||||
for line in out:
|
||||
container_name, container_id, container_status = \
|
||||
line.split('##')
|
||||
container_status = container_status.split()[0]
|
||||
docker_facts[item[0]].append({
|
||||
'name': container_name,
|
||||
'id': container_id,
|
||||
'status': container_status
|
||||
})
|
||||
|
||||
# Get filtered facts
|
||||
rc, out, err = run_docker_command(
|
||||
module,
|
||||
docker_bin,
|
||||
sub_command=item[1],
|
||||
opts=item[2],
|
||||
filters=module.params[item[0].rstrip('s') + '_filter']
|
||||
)
|
||||
|
||||
if item[0] != 'containers':
|
||||
docker_facts[item[0] + '_filtered'] = out
|
||||
elif item[0] == 'containers':
|
||||
|
||||
for line in out:
|
||||
container_name, container_id, container_status = \
|
||||
line.split('##')
|
||||
container_status = container_status.split()[0]
|
||||
docker_facts[item[0] + '_filtered'].append({
|
||||
'name': container_name,
|
||||
'id': container_id,
|
||||
'status': container_status
|
||||
})
|
||||
|
||||
results = dict(
|
||||
ansible_facts=dict(
|
||||
docker=docker_facts
|
||||
)
|
||||
)
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,88 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: haproxy_conf
|
||||
short_description: Gather the HAProxy config
|
||||
description:
|
||||
- Gather the HAProxy config
|
||||
options:
|
||||
path:
|
||||
required: true
|
||||
description:
|
||||
- file path to the config file
|
||||
type: str
|
||||
author: "Tomas Sedovic"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Gather the HAProxy config
|
||||
haproxy_conf: path=/etc/haproxy/haproxy.cfg
|
||||
'''
|
||||
|
||||
|
||||
# ConfigParser chokes on both mariadb and haproxy files. Luckily They have
|
||||
# a syntax approaching ini config file so they are relatively easy to parse.
|
||||
# This generic ini style config parser is not perfect -- it can ignore some
|
||||
# valid options -- but good enough for our use case.
|
||||
def generic_ini_style_conf_parser(file_path, section_regex, option_regex):
|
||||
config = {}
|
||||
current_section = None
|
||||
with open(file_path) as config_file:
|
||||
for line in config_file:
|
||||
match_section = re.match(section_regex, line)
|
||||
if match_section:
|
||||
current_section = match_section.group(1)
|
||||
config[current_section] = {}
|
||||
match_option = re.match(option_regex, line)
|
||||
if match_option and current_section:
|
||||
option = re.sub('\s+', ' ', match_option.group(1))
|
||||
config[current_section][option] = match_option.group(2)
|
||||
return config
|
||||
|
||||
|
||||
def parse_haproxy_conf(file_path):
|
||||
section_regex = '^(\w+)'
|
||||
option_regex = '^(?:\s+)(\w+(?:\s+\w+)*?)\s+([\w/]*)$'
|
||||
return generic_ini_style_conf_parser(file_path, section_regex,
|
||||
option_regex)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
path=dict(required=True, type='str'),
|
||||
))
|
||||
|
||||
haproxy_conf_path = module.params.get('path')
|
||||
|
||||
try:
|
||||
config = parse_haproxy_conf(haproxy_conf_path)
|
||||
except IOError:
|
||||
module.fail_json(msg="Could not open the haproxy conf file at: '%s'" %
|
||||
haproxy_conf_path)
|
||||
|
||||
module.exit_json(changed=False, ansible_facts={u'haproxy_conf': config})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,63 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: hiera
|
||||
short_description: Get data from hiera
|
||||
description:
|
||||
- Get data from hiera
|
||||
options:
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- Name to lookup
|
||||
type: str
|
||||
author: "Martin Andre (@mandre)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Lookup foo
|
||||
hiera: name=foo
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
name=dict(required=True, type='str'),
|
||||
))
|
||||
|
||||
name = module.params.get('name')
|
||||
|
||||
cmd = ['/usr/bin/hiera', '-c', '/etc/puppet/hiera.yaml', name]
|
||||
result = subprocess.check_output(cmd, universal_newlines=True).rstrip()
|
||||
|
||||
if result == 'nil':
|
||||
module.fail_json(msg="Failed to retrieve hiera data for {}"
|
||||
.format(name))
|
||||
|
||||
module.exit_json(changed=False,
|
||||
ansible_facts={name: result})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,61 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: icmp_ping
|
||||
short_description: ICMP ping remote hosts
|
||||
requirements: [ ping ]
|
||||
description:
|
||||
- Check host connectivity with ICMP ping.
|
||||
options:
|
||||
host:
|
||||
required: true
|
||||
description:
|
||||
- IP address or hostname of host to ping
|
||||
type: str
|
||||
author: "Martin Andre (@mandre)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Ping host:
|
||||
- icmp: name=somegroup state=present
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Check Internet connectivity
|
||||
ping: host="www.ansible.com"
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(required=True, type='str'),
|
||||
)
|
||||
)
|
||||
|
||||
host = module.params.pop('host')
|
||||
result = module.run_command('ping -c 1 {}'.format(host))
|
||||
failed = (result[0] != 0)
|
||||
msg = result[1] if result[1] else result[2]
|
||||
|
||||
module.exit_json(changed=False, failed=failed, msg=msg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,156 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Ansible module to read a value from an Ini file.
|
||||
# Usage:
|
||||
# - ini: path=/path/to/file.ini section=default key=something
|
||||
# register: my_ini
|
||||
#
|
||||
# This will read the `path/to/file.ini` file and read the `Hello!` value under:
|
||||
# [default]
|
||||
# something = Hello!
|
||||
#
|
||||
# You can register the result and use it later with `{{ my_ini.value }}`
|
||||
|
||||
try:
|
||||
import configparser as ConfigParser
|
||||
except ImportError:
|
||||
import ConfigParser
|
||||
|
||||
from enum import Enum
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
# Possible return values
|
||||
class ReturnValue(Enum):
|
||||
OK = 0
|
||||
INVALID_FORMAT = 1
|
||||
KEY_NOT_FOUND = 2
|
||||
|
||||
|
||||
def check_file(path, ignore_missing):
|
||||
'''Validate entered path'''
|
||||
|
||||
if not (os.path.exists(path) and os.path.isfile(path)):
|
||||
return "Could not open the ini file: '{}'".format(path)
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
def get_result(path, section, key):
|
||||
'''Get value based on section and key'''
|
||||
|
||||
msg = ''
|
||||
value = None
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
|
||||
try:
|
||||
config.read(path)
|
||||
except Exception:
|
||||
msg = "The file '{}' is not in a valid INI format.".format(path)
|
||||
ret = ReturnValue.INVALID_FORMAT
|
||||
return (ret, msg, value)
|
||||
|
||||
try:
|
||||
value = config.get(section, key)
|
||||
msg = ("The key '{}' under the section '{}' in file {} "
|
||||
"has the value: '{}'").format(key, section, path, value)
|
||||
ret = ReturnValue.OK
|
||||
return (ret, msg, value)
|
||||
except ConfigParser.Error:
|
||||
value = None
|
||||
msg = "There is no key '{}' under the section '{}' in file {}.".format(
|
||||
key, section, path)
|
||||
ret = ReturnValue.KEY_NOT_FOUND
|
||||
return (ret, msg, value)
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ini
|
||||
short_description: Get data from an ini file
|
||||
description:
|
||||
- Get data from an ini file
|
||||
options:
|
||||
path:
|
||||
required: true
|
||||
description:
|
||||
- File path
|
||||
type: str
|
||||
section:
|
||||
required: true
|
||||
description:
|
||||
- Section to look up
|
||||
type: str
|
||||
key:
|
||||
required: true
|
||||
description:
|
||||
- Section key to look up
|
||||
type: str
|
||||
ignore_missing_file:
|
||||
required: false
|
||||
description:
|
||||
- Flag if a missing file should be ignored
|
||||
type: bool
|
||||
author: "Tomas Sedovic"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Lookup bar value
|
||||
ini: path=config.ini section=foo key=bar ignore_missing_file=True
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
path=dict(required=True, type='str'),
|
||||
section=dict(required=True, type='str'),
|
||||
key=dict(required=True, type='str'),
|
||||
ignore_missing_file=dict(required=False, type='bool'),
|
||||
))
|
||||
|
||||
ini_file_path = module.params.get('path')
|
||||
ignore_missing = module.params.get('ignore_missing_file')
|
||||
|
||||
# Check that file exists
|
||||
msg = check_file(ini_file_path, ignore_missing)
|
||||
|
||||
if msg != '':
|
||||
# Opening file failed
|
||||
if ignore_missing:
|
||||
module.exit_json(msg=msg, changed=False, value=None)
|
||||
else:
|
||||
module.fail_json(msg=msg)
|
||||
else:
|
||||
# Try to parse the result from ini file
|
||||
section = module.params.get('section')
|
||||
key = module.params.get('key')
|
||||
|
||||
ret, msg, value = get_result(ini_file_path, section, key)
|
||||
|
||||
if ret == ReturnValue.INVALID_FORMAT:
|
||||
module.fail_json(msg=msg)
|
||||
elif ret == ReturnValue.KEY_NOT_FOUND:
|
||||
module.exit_json(msg=msg, changed=False, value=None)
|
||||
elif ret == ReturnValue.OK:
|
||||
module.exit_json(msg=msg, changed=False, value=value)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,130 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ip_range
|
||||
short_description: Check the size of an IP range
|
||||
description:
|
||||
- Check if the size of an IP range against a minimum value.
|
||||
options:
|
||||
start:
|
||||
required: true
|
||||
description:
|
||||
- Start IP
|
||||
type: str
|
||||
end:
|
||||
required: true
|
||||
description:
|
||||
- End IP
|
||||
type: str
|
||||
min_size:
|
||||
required: true
|
||||
description:
|
||||
- Minum size of the range
|
||||
type: int
|
||||
author: "Tomas Sedovic"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Check the IP range
|
||||
ip_range:
|
||||
start: 192.0.2.5
|
||||
end: 192.0.2.24
|
||||
min_size: 15
|
||||
'''
|
||||
|
||||
|
||||
def check_arguments(start, end, min_size):
|
||||
'''Validate format of arguments'''
|
||||
|
||||
errors = []
|
||||
|
||||
# Check format of arguments
|
||||
try:
|
||||
startIP = netaddr.IPAddress(start)
|
||||
except netaddr.core.AddrFormatError:
|
||||
errors.append('Argument start ({}) must be an IP'.format(start))
|
||||
|
||||
try:
|
||||
endIP = netaddr.IPAddress(end)
|
||||
except netaddr.core.AddrFormatError:
|
||||
errors.append('Argument end ({}) must be an IP'.format(end))
|
||||
|
||||
if not errors:
|
||||
if startIP.version != endIP.version:
|
||||
errors.append("Arguments start, end must share the same IP "
|
||||
"version")
|
||||
if startIP > endIP:
|
||||
errors.append("Lower IP bound ({}) must be smaller than upper "
|
||||
"bound ({})".format(startIP, endIP))
|
||||
|
||||
if min_size < 0:
|
||||
errors.append('Argument min_size({}) must be greater than 0'
|
||||
.format(min_size))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_IP_range(start, end, min_size):
|
||||
'''Compare IP range with minimum size'''
|
||||
|
||||
errors = []
|
||||
iprange = netaddr.IPRange(start, end)
|
||||
|
||||
if len(iprange) < min_size:
|
||||
errors = [
|
||||
'The IP range {} - {} contains {} addresses.'.format(
|
||||
start, end, len(iprange)),
|
||||
'This might not be enough for the deployment or later scaling.'
|
||||
]
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
start=dict(required=True, type='str'),
|
||||
end=dict(required=True, type='str'),
|
||||
min_size=dict(required=True, type='int'),
|
||||
))
|
||||
|
||||
start = module.params.get('start')
|
||||
end = module.params.get('end')
|
||||
min_size = module.params.get('min_size')
|
||||
|
||||
# Check arguments
|
||||
errors = check_arguments(start, end, min_size)
|
||||
if errors:
|
||||
module.fail_json(msg='\n'.join(errors))
|
||||
else:
|
||||
# Check IP range
|
||||
range_errors = check_IP_range(start, end, min_size)
|
||||
|
||||
if range_errors:
|
||||
module.fail_json(msg='\n'.join(range_errors))
|
||||
else:
|
||||
module.exit_json(msg='success')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,533 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import netaddr
|
||||
import os.path
|
||||
import yaml
|
||||
|
||||
import six
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
# from os_net_config import validator
|
||||
|
||||
from tripleo_validations.utils import get_nested
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: network_environment
|
||||
short_description: Validate networking templates
|
||||
description:
|
||||
- Performs networking-related checks on a set of TripleO templates
|
||||
options:
|
||||
netenv_path:
|
||||
required: true
|
||||
description:
|
||||
- The path of the base network environment file
|
||||
type: str
|
||||
plan_env_path:
|
||||
required: true
|
||||
description:
|
||||
- The path of the plan environment file
|
||||
type: str
|
||||
ip_pools_path:
|
||||
required: true
|
||||
description:
|
||||
- The path of the IP pools network environment file
|
||||
type: str
|
||||
template_files:
|
||||
required: true
|
||||
description:
|
||||
- A list of template files and contents
|
||||
type: list
|
||||
author: "Tomas Sedovic, Martin André, Florian Fuchs"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Check the Network environment
|
||||
network_environment:
|
||||
netenv_path: environments/network-environment.yaml
|
||||
template_files: "{{ lookup('tht') }}"
|
||||
plan_env_path: plan-environment.yaml
|
||||
ip_pools_path: environments/ips-from-pool-all.yaml
|
||||
'''
|
||||
|
||||
|
||||
def open_network_environment_files(netenv_path, template_files):
|
||||
errors = []
|
||||
|
||||
try:
|
||||
network_data = yaml.safe_load(template_files[netenv_path])
|
||||
except Exception as e:
|
||||
return ({}, {}, ["Can't open network environment file '{}': {}"
|
||||
.format(netenv_path, e)])
|
||||
nic_configs = []
|
||||
resource_registry = network_data.get('resource_registry', {})
|
||||
for nic_name, relative_path in six.iteritems(resource_registry):
|
||||
if nic_name.endswith("Net::SoftwareConfig"):
|
||||
nic_config_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(netenv_path), relative_path))
|
||||
try:
|
||||
nic_configs.append((
|
||||
nic_name, nic_config_path,
|
||||
yaml.safe_load(template_files[nic_config_path])))
|
||||
except Exception as e:
|
||||
errors.append(
|
||||
"Can't open the resource '{}' reference file '{}': {}"
|
||||
.format(nic_name, nic_config_path, e))
|
||||
|
||||
return (network_data, nic_configs, errors)
|
||||
|
||||
|
||||
def validate(netenv_path, template_files):
|
||||
network_data, nic_configs, errors = open_network_environment_files(
|
||||
netenv_path, template_files)
|
||||
errors.extend(validate_network_environment(network_data, nic_configs))
|
||||
return errors
|
||||
|
||||
|
||||
def validate_network_environment(network_data, nic_configs):
|
||||
errors = []
|
||||
|
||||
cidrinfo = {}
|
||||
poolsinfo = {}
|
||||
vlaninfo = {}
|
||||
staticipinfo = {}
|
||||
|
||||
for item, data in six.iteritems(network_data.get('parameter_defaults',
|
||||
{})):
|
||||
if item.endswith('NetCidr'):
|
||||
cidrinfo[item] = data
|
||||
elif item.endswith('AllocationPools'):
|
||||
poolsinfo[item] = data
|
||||
elif item.endswith('NetworkVlanID'):
|
||||
vlaninfo[item] = data
|
||||
elif item.endswith('IPs'):
|
||||
staticipinfo[item] = data
|
||||
|
||||
for nic_config_name, nic_config_path, nic_config in nic_configs:
|
||||
errors.extend(check_nic_configs(nic_config_path, nic_config))
|
||||
|
||||
errors.extend(check_cidr_overlap(cidrinfo.values()))
|
||||
errors.extend(
|
||||
check_allocation_pools_pairing(
|
||||
network_data.get('parameter_defaults', {}), poolsinfo))
|
||||
errors.extend(check_static_ip_pool_collision(staticipinfo, poolsinfo))
|
||||
errors.extend(check_vlan_ids(vlaninfo))
|
||||
errors.extend(check_static_ip_in_cidr(cidrinfo, staticipinfo))
|
||||
errors.extend(duplicate_static_ips(staticipinfo))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_nic_configs(path, nic_data):
|
||||
errors = []
|
||||
|
||||
if not isinstance(nic_data, collections.Mapping):
|
||||
return ["The nic_data parameter must be a dictionary."]
|
||||
|
||||
# Look though every resources bridges and make sure there is only a single
|
||||
# bond per bridge and only 1 interface per bridge if there are no bonds.
|
||||
resources = nic_data.get('resources')
|
||||
if not isinstance(resources, collections.Mapping):
|
||||
return ["The nic_data must contain the 'resources' key and it must be "
|
||||
"a dictionary."]
|
||||
for name, resource in six.iteritems(resources):
|
||||
try:
|
||||
nested_path = [
|
||||
('properties', collections.Mapping, 'dictionary'),
|
||||
('config', collections.Mapping, 'dictionary'),
|
||||
('network_config', collections.Iterable, 'list'),
|
||||
]
|
||||
bridges = get_nested(resource, name, nested_path)
|
||||
except ValueError as e:
|
||||
errors.append('{}'.format(e))
|
||||
continue
|
||||
# Not all resources contain a network config:
|
||||
if not bridges:
|
||||
continue
|
||||
|
||||
# TODO(flfuchs) 2018-11-22: Rocky introduced a couple of
|
||||
# template changes using a schema that cant't be found in
|
||||
# os-net-config's schema.yaml file yet, so the validator fails
|
||||
# even though the templates are working. Until this is done, we
|
||||
# skip the schema validation.
|
||||
# Validate the os_net_config object against the schema.
|
||||
# v_errors = validator.validate_config(bridges, path)
|
||||
# errors.extend(v_errors)
|
||||
# if len(v_errors) > 0:
|
||||
# continue
|
||||
|
||||
# If we get here, the nic config file conforms to the schema and
|
||||
# there is no more need to check for existence and type of
|
||||
# properties.
|
||||
for bridge in bridges:
|
||||
if bridge['type'] == 'ovs_bridge':
|
||||
bond_count = 0
|
||||
interface_count = 0
|
||||
for bridge_member in bridge['members']:
|
||||
if bridge_member['type'] in ('ovs_bond', 'ovs_dpdk_bond'):
|
||||
bond_count += 1
|
||||
elif bridge_member['type'] == 'interface':
|
||||
interface_count += 1
|
||||
else:
|
||||
pass
|
||||
|
||||
if bond_count >= 2:
|
||||
errors.append(
|
||||
'Invalid bonding: There are >= 2 bonds for'
|
||||
' bridge {} of resource {} in {}'.format(
|
||||
bridge['name'], name, path))
|
||||
if bond_count == 0 and interface_count > 1:
|
||||
errors.append(
|
||||
'Invalid interface: When not using a bond, '
|
||||
'there can only be 1 interface for bridge {} '
|
||||
'of resource {} in {}'.format(
|
||||
bridge['name'], name, path))
|
||||
if bond_count == 0 and interface_count == 0:
|
||||
errors.append(
|
||||
'Invalid config: There must be at least '
|
||||
'1 interface or 1 bond for bridge {}'
|
||||
'of resource {} in {}'.format(
|
||||
bridge['name'], name, path))
|
||||
# check if the bridge has name br-int
|
||||
if bridge['name'] == 'br-int':
|
||||
errors.append(
|
||||
'br-int bridge name is reserved for '
|
||||
'integration bridge')
|
||||
return errors
|
||||
|
||||
|
||||
def check_cidr_overlap(networks):
|
||||
errors = []
|
||||
objs = []
|
||||
if not isinstance(networks, collections.Iterable):
|
||||
return ["The argument must be iterable."]
|
||||
for x in networks:
|
||||
try:
|
||||
objs.append(netaddr.IPNetwork(x))
|
||||
except (ValueError, TypeError):
|
||||
errors.append('Invalid network: {}'.format(x))
|
||||
|
||||
for net1, net2 in itertools.combinations(objs, 2):
|
||||
if (net1 in net2 or net2 in net1):
|
||||
errors.append(
|
||||
'Networks {} and {} overlap.'
|
||||
.format(net1, net2))
|
||||
return errors
|
||||
|
||||
|
||||
def check_allocation_pools_pairing(filedata, pools):
|
||||
if not isinstance(filedata, collections.Mapping):
|
||||
return ["The `filedata` argument must be a dictionary."]
|
||||
if not isinstance(pools, collections.Mapping):
|
||||
return ["The `pools` argument must be a dictionary."]
|
||||
errors = []
|
||||
for poolitem, pooldata in six.iteritems(pools):
|
||||
pool_objs = []
|
||||
if not isinstance(pooldata, collections.Iterable):
|
||||
errors.append('The IP ranges in {} must form a list.'
|
||||
.format(poolitem))
|
||||
continue
|
||||
|
||||
# Check IP range format
|
||||
for dict_range in pooldata:
|
||||
try:
|
||||
pool_objs.append(netaddr.IPRange(
|
||||
netaddr.IPAddress(dict_range['start']),
|
||||
netaddr.IPAddress(dict_range['end'])))
|
||||
except Exception:
|
||||
errors.append("Invalid format of the IP range in {}: {}"
|
||||
.format(poolitem, dict_range))
|
||||
continue
|
||||
|
||||
# Check if CIDR is specified and IP network is valid
|
||||
subnet_item = poolitem.split('AllocationPools')[0] + 'NetCidr'
|
||||
try:
|
||||
network = filedata[subnet_item]
|
||||
subnet_obj = netaddr.IPNetwork(network)
|
||||
except KeyError:
|
||||
errors.append('The {} CIDR is not specified for {}.'
|
||||
.format(subnet_item, poolitem))
|
||||
continue
|
||||
except Exception:
|
||||
errors.append('Invalid IP network: {}'.format(network))
|
||||
continue
|
||||
|
||||
for range in pool_objs:
|
||||
# Check if pool is included in subnet
|
||||
if range not in subnet_obj:
|
||||
errors.append('Allocation pool {} {} outside of subnet'
|
||||
' {}: {}'.format(poolitem,
|
||||
pooldata,
|
||||
subnet_item,
|
||||
subnet_obj))
|
||||
break
|
||||
|
||||
# Check for overlapping pools
|
||||
for other in [r for r in pool_objs if r != range]:
|
||||
if range.first in other or range.last in other:
|
||||
errors.append('Some pools in {} are overlapping.'.format(
|
||||
poolitem))
|
||||
break
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_static_ip_pool_collision(static_ips, pools):
|
||||
"""Statically defined IP address must not conflict with allocation pools.
|
||||
|
||||
The allocation pools come as a dict of items in the following format:
|
||||
|
||||
InternalApiAllocationPools: [
|
||||
{'start': '10.35.191.150', 'end': '10.35.191.240'}
|
||||
]
|
||||
|
||||
The static IP addresses are dicts of:
|
||||
|
||||
ComputeIPs: {
|
||||
'internal_api': ['10.35.191.100', etc.],
|
||||
'storage': ['192.168.100.45', etc.]
|
||||
}
|
||||
"""
|
||||
if not isinstance(static_ips, collections.Mapping):
|
||||
return ["The static IPs input must be a dictionary."]
|
||||
if not isinstance(pools, collections.Mapping):
|
||||
return ["The Pools input must be a dictionary."]
|
||||
errors = []
|
||||
pool_ranges = []
|
||||
for pool_name, ranges in six.iteritems(pools):
|
||||
if not isinstance(ranges, collections.Iterable):
|
||||
errors.append("The IP ranges in {} must form a list."
|
||||
.format(pool_name))
|
||||
continue
|
||||
for allocation_range in ranges:
|
||||
try:
|
||||
ip_range = netaddr.IPRange(allocation_range['start'],
|
||||
allocation_range['end'])
|
||||
except Exception:
|
||||
errors.append("Invalid format of the IP range in {}: {}"
|
||||
.format(pool_name, allocation_range))
|
||||
continue
|
||||
pool_ranges.append((pool_name, ip_range))
|
||||
|
||||
for role, services in six.iteritems(static_ips):
|
||||
if not isinstance(services, collections.Mapping):
|
||||
errors.append("The {} must be a dictionary.".format(role))
|
||||
continue
|
||||
for service, ips in six.iteritems(services):
|
||||
if not isinstance(ips, collections.Iterable):
|
||||
errors.append("The {}->{} must be an array."
|
||||
.format(role, service))
|
||||
continue
|
||||
for ip in ips:
|
||||
try:
|
||||
ip = netaddr.IPAddress(ip)
|
||||
except netaddr.AddrFormatError as e:
|
||||
errors.append("{} is not a valid IP address: {}"
|
||||
.format(ip, e))
|
||||
continue
|
||||
ranges_with_conflict = ranges_conflicting_with_ip(
|
||||
ip, pool_ranges)
|
||||
if ranges_with_conflict:
|
||||
for pool_name, ip_range in ranges_with_conflict:
|
||||
msg = "IP address {} from {}[{}] is in the {} pool."
|
||||
errors.append(msg.format(
|
||||
ip, role, service, pool_name))
|
||||
return errors
|
||||
|
||||
|
||||
def ranges_conflicting_with_ip(ip_address, ip_ranges):
|
||||
"""Check for all conflicts of the IP address conflicts.
|
||||
|
||||
This takes a single IP address and a list of `(pool_name,
|
||||
netenv.IPRange)`s.
|
||||
|
||||
We return all ranges that the IP address conflicts with. This is to
|
||||
improve the final error messages.
|
||||
"""
|
||||
return [(pool_name, ip_range) for (pool_name, ip_range) in ip_ranges
|
||||
if ip_address in ip_range]
|
||||
|
||||
|
||||
def check_vlan_ids(vlans):
|
||||
if not isinstance(vlans, collections.Mapping):
|
||||
return ["The vlans parameter must be a dictionary."]
|
||||
errors = []
|
||||
invertdict = {}
|
||||
for k, v in six.iteritems(vlans):
|
||||
if v not in invertdict:
|
||||
invertdict[v] = k
|
||||
else:
|
||||
errors.append('Vlan ID {} ({}) already exists in {}'.format(
|
||||
v, k, invertdict[v]))
|
||||
return errors
|
||||
|
||||
|
||||
def check_static_ip_in_cidr(networks, static_ips):
|
||||
"""Check all static IP addresses are from the corresponding network range.
|
||||
|
||||
"""
|
||||
if not isinstance(networks, collections.Mapping):
|
||||
return ["The networks argument must be a dictionary."]
|
||||
if not isinstance(static_ips, collections.Mapping):
|
||||
return ["The static_ips argument must be a dictionary."]
|
||||
errors = []
|
||||
network_ranges = {}
|
||||
# TODO(shadower): Refactor this so networks are always valid and already
|
||||
# converted to `netaddr.IPNetwork` here. Will be useful in the other
|
||||
# checks.
|
||||
for name, cidr in six.iteritems(networks):
|
||||
try:
|
||||
network_ranges[name] = netaddr.IPNetwork(cidr)
|
||||
except Exception:
|
||||
errors.append("Network '{}' has an invalid CIDR: '{}'"
|
||||
.format(name, cidr))
|
||||
for role, services in six.iteritems(static_ips):
|
||||
if not isinstance(services, collections.Mapping):
|
||||
errors.append("The {} must be a dictionary.".format(role))
|
||||
continue
|
||||
for service, ips in six.iteritems(services):
|
||||
range_name = service.title().replace('_', '') + 'NetCidr'
|
||||
if range_name in network_ranges:
|
||||
if not isinstance(ips, collections.Iterable):
|
||||
errors.append("The {}->{} must be a list."
|
||||
.format(role, service))
|
||||
continue
|
||||
for ip in ips:
|
||||
if ip not in network_ranges[range_name]:
|
||||
errors.append(
|
||||
"The IP address {} is outside of the {} range: {}"
|
||||
.format(ip, range_name, networks[range_name]))
|
||||
else:
|
||||
errors.append(
|
||||
"Service '{}' does not have a "
|
||||
"corresponding range: '{}'.".format(service, range_name))
|
||||
return errors
|
||||
|
||||
|
||||
def duplicate_static_ips(static_ips):
|
||||
errors = []
|
||||
if not isinstance(static_ips, collections.Mapping):
|
||||
return ["The static_ips argument must be a dictionary."]
|
||||
ipset = collections.defaultdict(list)
|
||||
# TODO(shadower): we're doing this netsted loop multiple times. Turn it
|
||||
# into a generator or something.
|
||||
for role, services in six.iteritems(static_ips):
|
||||
if not isinstance(services, collections.Mapping):
|
||||
errors.append("The {} must be a dictionary.".format(role))
|
||||
continue
|
||||
for service, ips in six.iteritems(services):
|
||||
if not isinstance(ips, collections.Iterable):
|
||||
errors.append("The {}->{} must be a list."
|
||||
.format(role, service))
|
||||
continue
|
||||
for ip in ips:
|
||||
ipset[ip].append((role, service))
|
||||
for ip, sources in six.iteritems(ipset):
|
||||
if len(sources) > 1:
|
||||
msg = "The {} IP address was entered multiple times: {}."
|
||||
formatted_sources = ("{}[{}]"
|
||||
.format(*source) for source in sources)
|
||||
errors.append(msg.format(ip, ", ".join(formatted_sources)))
|
||||
return errors
|
||||
|
||||
|
||||
def validate_node_pool_size(plan_env_path, ip_pools_path, template_files):
|
||||
warnings = []
|
||||
plan_env = yaml.safe_load(template_files[plan_env_path])
|
||||
ip_pools = yaml.safe_load(template_files[ip_pools_path])
|
||||
|
||||
param_defaults = plan_env.get('parameter_defaults')
|
||||
node_counts = {
|
||||
param.replace('Count', ''): count
|
||||
for param, count in six.iteritems(param_defaults)
|
||||
if param.endswith('Count') and count > 0
|
||||
}
|
||||
|
||||
# TODO(akrivoka): There are a lot of inconsistency issues with parameter
|
||||
# naming in THT :( Once those issues are fixed, this block should be
|
||||
# removed.
|
||||
if 'ObjectStorage' in node_counts:
|
||||
node_counts['SwiftStorage'] = node_counts['ObjectStorage']
|
||||
del node_counts['ObjectStorage']
|
||||
|
||||
param_defaults = ip_pools.get('parameter_defaults')
|
||||
role_pools = {
|
||||
param.replace('IPs', ''): pool
|
||||
for param, pool in six.iteritems(param_defaults)
|
||||
if param.endswith('IPs') and param.replace('IPs', '') in node_counts
|
||||
}
|
||||
|
||||
for role, node_count in six.iteritems(node_counts):
|
||||
try:
|
||||
pools = role_pools[role]
|
||||
except KeyError:
|
||||
warnings.append(
|
||||
"Found {} node(s) assigned to '{}' role, but no static IP "
|
||||
"pools defined.".format(node_count, role)
|
||||
)
|
||||
continue
|
||||
for pool_name, pool_ips in six.iteritems(pools):
|
||||
if len(pool_ips) < node_count:
|
||||
warnings.append(
|
||||
"Insufficient number of IPs in '{}' pool for '{}' role: "
|
||||
"{} IP(s) found in pool, but {} nodes assigned to role."
|
||||
.format(pool_name, role, len(pool_ips), node_count)
|
||||
)
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
netenv_path=dict(required=True, type='str'),
|
||||
plan_env_path=dict(required=True, type='str'),
|
||||
ip_pools_path=dict(required=True, type='str'),
|
||||
template_files=dict(required=True, type='list')
|
||||
))
|
||||
|
||||
netenv_path = module.params.get('netenv_path')
|
||||
plan_env_path = module.params.get('plan_env_path')
|
||||
ip_pools_path = module.params.get('ip_pools_path')
|
||||
template_files = {name: content[1] for (name, content) in
|
||||
module.params.get('template_files')}
|
||||
|
||||
errors = validate(netenv_path, template_files)
|
||||
warnings = []
|
||||
|
||||
try:
|
||||
warnings = validate_node_pool_size(plan_env_path, ip_pools_path,
|
||||
template_files)
|
||||
except Exception as e:
|
||||
errors.append("{}".format(e))
|
||||
|
||||
if errors:
|
||||
module.fail_json(msg="\n".join(errors))
|
||||
else:
|
||||
module.exit_json(
|
||||
msg="No errors found for the '{}' file.".format(netenv_path),
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,158 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: node_disks
|
||||
short_description: Check disks, flavors and root device hints
|
||||
description:
|
||||
- Check if each node has a root device hint set if there is more
|
||||
than one disk and compare flavors to disk sizes.
|
||||
options:
|
||||
nodes:
|
||||
required: true
|
||||
description:
|
||||
- A list of nodes
|
||||
type: list
|
||||
flavors:
|
||||
required: true
|
||||
description:
|
||||
- A list of flavors
|
||||
type: list
|
||||
introspection_data:
|
||||
required: true
|
||||
description:
|
||||
- Introspection data for all nodes
|
||||
type: list
|
||||
|
||||
author: "Florian Fuchs <flfuchs@redhat.com>"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: undercloud
|
||||
tasks:
|
||||
- name: Check node disks
|
||||
node_disks:
|
||||
nodes: "{{ lookup('ironic_nodes') }}"
|
||||
flavors: "{{ lookup('nova_flavors') }}"
|
||||
introspection_data: "{{ lookup('introspection_data',
|
||||
auth_url=auth_url.value, password=password.value) }}"
|
||||
'''
|
||||
|
||||
|
||||
IGNORE_BYTE_MAX = 4294967296
|
||||
|
||||
ONE_DISK_TOO_SMALL_ERROR = """\
|
||||
The node {} only has one disk and it's too small for the "{}" flavor"""
|
||||
|
||||
NO_RDH_SMALLEST_DISK_TOO_SMALL_ERROR = (
|
||||
'{} has more than one disk available for deployment and no '
|
||||
'root device hints set. The disk that will be used is too small '
|
||||
'for the flavor with the largest disk requirement ("{}").')
|
||||
|
||||
|
||||
def _get_minimum_disk_size(flavors):
|
||||
min_gb = 0
|
||||
name = 'n.a.'
|
||||
for key, val in flavors.items():
|
||||
disk_gb = val['disk']
|
||||
if disk_gb > min_gb:
|
||||
min_gb = disk_gb
|
||||
name = key
|
||||
# convert GB to bytes to compare to introspection data
|
||||
return name, min_gb * 1073741824
|
||||
|
||||
|
||||
def _get_smallest_disk(disks):
|
||||
smallest = disks[0]
|
||||
for disk in disks[1:]:
|
||||
if disk['size'] < smallest['size']:
|
||||
smallest = disk
|
||||
return smallest
|
||||
|
||||
|
||||
def _has_root_device_hints(node_name, node_data):
|
||||
rdh = node_data.get(
|
||||
node_name, {}).get('properties', {}).get('root_device')
|
||||
return rdh is not None
|
||||
|
||||
|
||||
def validate_node_disks(nodes, flavors, introspection_data):
|
||||
"""Validate root device hints using introspection data.
|
||||
|
||||
:param nodes: Ironic nodes
|
||||
:param introspection_data: Introspection data for all nodes
|
||||
:returns warnings: List of warning messages
|
||||
errors: List of error messages
|
||||
"""
|
||||
errors = []
|
||||
warnings = []
|
||||
# Get the name of the flavor with the largest disk requirement,
|
||||
# which defines the minimum disk size.
|
||||
max_disk_flavor, min_disk_size = _get_minimum_disk_size(flavors)
|
||||
|
||||
for node, content in introspection_data.items():
|
||||
disks = content.get('inventory', {}).get('disks')
|
||||
valid_disks = [disk for disk in disks
|
||||
if disk['size'] > IGNORE_BYTE_MAX]
|
||||
|
||||
root_device_hints = _has_root_device_hints(node, nodes)
|
||||
smallest_disk = _get_smallest_disk(valid_disks)
|
||||
|
||||
if len(valid_disks) == 1:
|
||||
if smallest_disk.get('size', 0) < min_disk_size:
|
||||
errors.append(ONE_DISK_TOO_SMALL_ERROR.format(
|
||||
node, max_disk_flavor))
|
||||
elif not root_device_hints and len(valid_disks) > 1:
|
||||
if smallest_disk.get('size', 0) < min_disk_size:
|
||||
errors.append(NO_RDH_SMALLEST_DISK_TOO_SMALL_ERROR.format(
|
||||
node, max_disk_flavor))
|
||||
else:
|
||||
warnings.append('{} has more than one disk available for '
|
||||
'deployment'.format(node))
|
||||
|
||||
return errors, warnings
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
nodes=dict(required=True, type='list'),
|
||||
flavors=dict(required=True, type='dict'),
|
||||
introspection_data=dict(required=True, type='list')
|
||||
))
|
||||
|
||||
nodes = {node['name']: node for node in module.params.get('nodes')}
|
||||
flavors = module.params.get('flavors')
|
||||
introspection_data = {name: content for (name, content) in
|
||||
module.params.get('introspection_data')}
|
||||
|
||||
errors, warnings = validate_node_disks(nodes,
|
||||
flavors,
|
||||
introspection_data)
|
||||
|
||||
if errors:
|
||||
module.fail_json(msg="\n".join(errors))
|
||||
elif warnings:
|
||||
module.exit_json(warnings="\n".join(warnings))
|
||||
else:
|
||||
module.exit_json(msg="Root device hints are either set or not "
|
||||
"necessary.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,78 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
import os.path
|
||||
import subprocess
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: overcloudrc
|
||||
short_description: Source the overcloudrc file
|
||||
description:
|
||||
- Source the overcloudrc file
|
||||
options:
|
||||
path:
|
||||
required: true
|
||||
description:
|
||||
- The file path
|
||||
type: str
|
||||
author: "Tomas Sedovic"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Source overcloudrc
|
||||
overcloudrc: path=/home/stack/overcloudrc
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
path=dict(required=True, type='str'),
|
||||
))
|
||||
|
||||
overcloudrc_path = os.path.expanduser(module.params.get('path'))
|
||||
|
||||
if not os.path.isfile(overcloudrc_path):
|
||||
module.fail_json(
|
||||
msg="The overcloudrc file at {} does not exist.".format(
|
||||
overcloudrc_path))
|
||||
|
||||
# Use bash to source overcloudrc and print the environment:
|
||||
command = ['bash', '-c', 'source ' + overcloudrc_path + ' && env']
|
||||
proc = subprocess.Popen(
|
||||
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
universal_newlines=True
|
||||
)
|
||||
if proc.wait() != 0:
|
||||
msg = "Could not source '{}'. Return code: {}.\nSTDERR:\n{}".format(
|
||||
overcloudrc_path, proc.returncode, proc.stderr.read())
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
facts = {}
|
||||
for line in proc.stdout:
|
||||
(key, _, value) = line.partition("=")
|
||||
if key.startswith("OS_"):
|
||||
facts[key] = value.rstrip()
|
||||
|
||||
module.exit_json(changed=False, ansible_facts={'overcloudrc': facts})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,137 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: OVS DPDK PMD CPU's check
|
||||
short_description: Run PMD CPU's from all the NUMA nodes check
|
||||
description:
|
||||
- Run PMD CPU's from all the NUMA nodes check
|
||||
options:
|
||||
pmd_cpu_mask:
|
||||
required: true
|
||||
description:
|
||||
- The pmd cpu mask value
|
||||
type: str
|
||||
author: "Jaganathan Palanisamy"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: ComputeOvsDpdk
|
||||
vars:
|
||||
pmd_cpu_mask: "1010010000000001"
|
||||
tasks:
|
||||
- name: Run PMD CPU's check
|
||||
become: true
|
||||
ovs_dpdk_pmd_cpus_check: pmd_cpu_mask={{ pmad_cpu_mask }}
|
||||
'''
|
||||
|
||||
|
||||
def get_cpus_list_from_mask_value(mask_val):
|
||||
"""Gets CPU's list from the mask value
|
||||
|
||||
:return: comma separated CPU's list
|
||||
"""
|
||||
mask_val = mask_val.strip('\\"')
|
||||
cpus_list = []
|
||||
int_mask_val = int(mask_val, 16)
|
||||
bin_mask_val = bin(int_mask_val)
|
||||
bin_mask_val = str(bin_mask_val).replace('0b', '')
|
||||
rev_bin_mask_val = bin_mask_val[::-1]
|
||||
thread = 0
|
||||
for bin_val in rev_bin_mask_val:
|
||||
if bin_val == '1':
|
||||
cpus_list.append(thread)
|
||||
thread += 1
|
||||
return ','.join([str(cpu) for cpu in cpus_list])
|
||||
|
||||
|
||||
# Gets the distinct numa nodes, physical and logical cpus info
|
||||
# for all numa nodes.
|
||||
def get_nodes_cores_info(module):
|
||||
dict_cpus = {}
|
||||
numa_nodes = []
|
||||
cmd = "sudo lscpu -p=NODE,CORE,CPU"
|
||||
result = module.run_command(cmd)
|
||||
if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))):
|
||||
err = "Unable to determine physical and logical cpus."
|
||||
module.fail_json(msg=err)
|
||||
else:
|
||||
for line in str(result[1]).split('\n'):
|
||||
if (line.strip(' ') and not line.strip(' ').startswith('#')):
|
||||
cpu_info = line.strip(' ').split(',')
|
||||
try:
|
||||
node = int(cpu_info[0])
|
||||
cpu = int(cpu_info[1])
|
||||
thread = int(cpu_info[2])
|
||||
if node not in numa_nodes:
|
||||
numa_nodes.append(node)
|
||||
# CPU and NUMA node together forms a unique value,
|
||||
# as cpu is specific to a NUMA node
|
||||
# NUMA node id and cpu id tuple is used for unique key
|
||||
key = node, cpu
|
||||
if key in dict_cpus:
|
||||
if thread not in dict_cpus[key]['thread_siblings']:
|
||||
dict_cpus[key]['thread_siblings'].append(thread)
|
||||
else:
|
||||
cpu_item = {}
|
||||
cpu_item['thread_siblings'] = [thread]
|
||||
cpu_item['cpu'] = cpu
|
||||
cpu_item['numa_node'] = node
|
||||
dict_cpus[key] = cpu_item
|
||||
except (IndexError, ValueError):
|
||||
err = "Unable to determine physical and logical cpus."
|
||||
module.fail_json(msg=err)
|
||||
return (numa_nodes, list(dict_cpus.values()))
|
||||
|
||||
|
||||
def validate_pmd_cpus(module, pmd_cpu_mask):
|
||||
pmd_cpus = get_cpus_list_from_mask_value(pmd_cpu_mask)
|
||||
pmd_cpu_list = pmd_cpus.split(',')
|
||||
cpus = []
|
||||
numa_nodes = []
|
||||
numa_nodes, cpus = get_nodes_cores_info(module)
|
||||
valid_numa_nodes = {}
|
||||
for numa_node in numa_nodes:
|
||||
valid_numa_nodes[str(numa_node)] = False
|
||||
for cpu in cpus:
|
||||
if cpu['numa_node'] == numa_node:
|
||||
if True in [int(pmd_cpu) in cpu['thread_siblings']
|
||||
for pmd_cpu in pmd_cpu_list]:
|
||||
valid_numa_nodes[str(numa_node)] = True
|
||||
invalid_numa_nodes = [node for node, val in valid_numa_nodes.items()
|
||||
if not val]
|
||||
if invalid_numa_nodes:
|
||||
failed_nodes = ','.join(invalid_numa_nodes)
|
||||
err = ("Invalid PMD CPU's, cpu is not used from "
|
||||
"NUMA node(s): %(node)s." % {'node': failed_nodes})
|
||||
module.fail_json(msg=err)
|
||||
else:
|
||||
module.exit_json(msg="PMD CPU's configured correctly.")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
pmd_cpu_mask=dict(required=True, type='str'),
|
||||
))
|
||||
validate_pmd_cpus(module,
|
||||
module.params.get('pmd_cpu_mask'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,84 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pacemaker
|
||||
short_description: Return status from a pacemaker status XML
|
||||
description:
|
||||
- Return status from a pacemaker status XML
|
||||
options:
|
||||
status:
|
||||
required: true
|
||||
description:
|
||||
- pacemaker status XML
|
||||
type: str
|
||||
author: "Tomas Sedovic"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Get pacemaker status
|
||||
become: true
|
||||
command: pcs status xml
|
||||
register: pcs_status
|
||||
- name: Check pacemaker status
|
||||
pacemaker: status="{{ pcs_status.stdout }}"
|
||||
'''
|
||||
|
||||
|
||||
def parse_pcs_status(pcs_status_xml):
|
||||
root = ElementTree.fromstring(pcs_status_xml)
|
||||
result = {
|
||||
'failures': root.findall('failures/failure'),
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
def format_failure(failure):
|
||||
return ("Task {task} {op_key} failed on node {node}. Exit reason: "
|
||||
"'{exitreason}'. Exit status: '{exitstatus}'."
|
||||
.format(task=failure.get('task'),
|
||||
op_key=failure.get('op_key'),
|
||||
node=failure.get('node'),
|
||||
exitreason=failure.get('exitreason'),
|
||||
exitstatus=failure.get('exitstatus')))
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
status=dict(required=True, type='str'),
|
||||
))
|
||||
|
||||
pcs_status = parse_pcs_status(module.params.get('status'))
|
||||
failures = pcs_status['failures']
|
||||
failed = len(failures) > 0
|
||||
if failed:
|
||||
msg = "The pacemaker status contains some failed actions:\n" +\
|
||||
'\n'.join((format_failure(failure) for failure in failures))
|
||||
else:
|
||||
msg = "The pacemaker status reports no errors."
|
||||
module.exit_json(
|
||||
failed=failed,
|
||||
msg=msg,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,225 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import os.path
|
||||
import yaml
|
||||
|
||||
import six
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule # noqa
|
||||
from tripleo_validations import utils
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: switch_vlans
|
||||
short_description: Check configured VLANs against Ironic introspection data
|
||||
description:
|
||||
- Validate that the VLANs defined in TripleO nic config files are in the
|
||||
LLDP info received from network switches. The LLDP data is stored in
|
||||
Ironic introspection data per interface.
|
||||
options:
|
||||
path:
|
||||
required: true
|
||||
description:
|
||||
- The path of the base network environment file
|
||||
type: str
|
||||
template_files:
|
||||
required: true
|
||||
description:
|
||||
- A list of template files and contents
|
||||
type: list
|
||||
introspection_data:
|
||||
required: true
|
||||
description:
|
||||
- Introspection data for all nodes
|
||||
type: list
|
||||
|
||||
author: "Bob Fournier"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: undercloud
|
||||
tasks:
|
||||
- name: Check that switch vlans are present if used in nic-config files
|
||||
network_environment:
|
||||
path: environments/network-environment.yaml
|
||||
template_files: "{{ lookup('tht') }}"
|
||||
introspection_data: "{{ lookup('introspection_data',
|
||||
auth_url=auth_url.value, password=password.value) }}"
|
||||
'''
|
||||
|
||||
|
||||
def open_network_environment_files(netenv_path, template_files):
|
||||
errors = []
|
||||
|
||||
try:
|
||||
network_data = yaml.safe_load(template_files[netenv_path])
|
||||
except Exception as e:
|
||||
return ({}, {}, ["Can't open network environment file '{}': {}"
|
||||
.format(netenv_path, e)])
|
||||
nic_configs = []
|
||||
resource_registry = network_data.get('resource_registry', {})
|
||||
for nic_name, relative_path in six.iteritems(resource_registry):
|
||||
if nic_name.endswith("Net::SoftwareConfig"):
|
||||
nic_config_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(netenv_path), relative_path))
|
||||
try:
|
||||
nic_configs.append((
|
||||
nic_name, nic_config_path,
|
||||
yaml.safe_load(template_files[nic_config_path])))
|
||||
except Exception as e:
|
||||
errors.append(
|
||||
"Can't open the resource '{}' reference file '{}': {}"
|
||||
.format(nic_name, nic_config_path, e))
|
||||
|
||||
return (network_data, nic_configs, errors)
|
||||
|
||||
|
||||
def validate_switch_vlans(netenv_path, template_files, introspection_data):
|
||||
"""Check if VLAN exists in introspection data for node
|
||||
|
||||
:param netenv_path: path to network_environment file
|
||||
:param template_files: template files being checked
|
||||
:param introspection_data: introspection data for all node
|
||||
:returns warnings: List of warning messages
|
||||
errors: List of error messages
|
||||
"""
|
||||
|
||||
network_data, nic_configs, errors =\
|
||||
open_network_environment_files(netenv_path, template_files)
|
||||
warnings = []
|
||||
vlans_in_templates = False
|
||||
|
||||
# Store VLAN IDs from network-environment.yaml.
|
||||
vlaninfo = {}
|
||||
for item, data in six.iteritems(network_data.get('parameter_defaults',
|
||||
{})):
|
||||
if item.endswith('NetworkVlanID'):
|
||||
vlaninfo[item] = data
|
||||
|
||||
# Get the VLANs which are actually used in nic configs
|
||||
for nic_config_name, nic_config_path, nic_config in nic_configs:
|
||||
resources = nic_config.get('resources')
|
||||
if not isinstance(nic_config, collections.Mapping):
|
||||
return [], ["nic_config parameter must be a dictionary."]
|
||||
|
||||
if not isinstance(resources, collections.Mapping):
|
||||
return [], ["The nic_data must contain the 'resources' key "
|
||||
"and it must be a dictionary."]
|
||||
for name, resource in six.iteritems(resources):
|
||||
try:
|
||||
nested_path = [
|
||||
('properties', collections.Mapping, 'dictionary'),
|
||||
('config', collections.Mapping, 'dictionary'),
|
||||
('network_config', collections.Iterable, 'list'),
|
||||
]
|
||||
nw_config = utils.get_nested(resource, name, nested_path)
|
||||
except ValueError as e:
|
||||
errors.append('{}'.format(e))
|
||||
continue
|
||||
# Not all resources contain a network config:
|
||||
if not nw_config:
|
||||
continue
|
||||
|
||||
for elem in nw_config:
|
||||
# VLANs will be in bridge
|
||||
if elem['type'] == 'ovs_bridge' \
|
||||
or elem['type'] == 'linux_bridge':
|
||||
for member in elem['members']:
|
||||
if member['type'] != 'vlan':
|
||||
continue
|
||||
|
||||
vlans_in_templates = True
|
||||
vlan_id_str = member['vlan_id']
|
||||
vlan_id = vlaninfo[vlan_id_str['get_param']]
|
||||
|
||||
msg, result = vlan_exists_on_switch(
|
||||
vlan_id, introspection_data)
|
||||
warnings.extend(msg)
|
||||
|
||||
if not msg and result is False:
|
||||
errors.append(
|
||||
"VLAN ID {} not on attached switch".format(
|
||||
vlan_id))
|
||||
|
||||
if not vlans_in_templates:
|
||||
warnings.append("No VLANs are used on templates files")
|
||||
|
||||
return warnings, errors
|
||||
|
||||
|
||||
def vlan_exists_on_switch(vlan_id, introspection_data):
|
||||
"""Check if VLAN exists in introspection data
|
||||
|
||||
:param vlan_id: VLAN id
|
||||
:param introspection_data: introspection data for all nodes
|
||||
:returns msg: Error or warning message
|
||||
result: boolean indicating if VLAN was found
|
||||
"""
|
||||
|
||||
for node, data in introspection_data.items():
|
||||
node_valid_lldp = False
|
||||
|
||||
all_interfaces = data.get('all_interfaces', [])
|
||||
|
||||
# Check lldp data on all interfaces for this vlan ID
|
||||
for interface in all_interfaces:
|
||||
lldp_proc = all_interfaces[interface].get('lldp_processed', {})
|
||||
|
||||
if lldp_proc:
|
||||
node_valid_lldp = True
|
||||
|
||||
switch_vlans = lldp_proc.get('switch_port_vlans', [])
|
||||
if switch_vlans:
|
||||
if any(vlan['id'] == vlan_id for vlan in switch_vlans):
|
||||
return [], True
|
||||
|
||||
# If no lldp data for node return warning, not possible to locate vlan
|
||||
if not node_valid_lldp:
|
||||
node_uuid = node.split("-", 1)[1]
|
||||
return ["LLDP data not available for node {}".format(node_uuid)],\
|
||||
False
|
||||
|
||||
return [], False # could not find VLAN ID
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
path=dict(required=True, type='str'),
|
||||
template_files=dict(required=True, type='list'),
|
||||
introspection_data=dict(required=True, type='list')
|
||||
))
|
||||
|
||||
netenv_path = module.params.get('path')
|
||||
template_files = {name: content[1] for (name, content) in
|
||||
module.params.get('template_files')}
|
||||
introspection_data = {name: content for (name, content) in
|
||||
module.params.get('introspection_data')}
|
||||
|
||||
warnings, errors = validate_switch_vlans(netenv_path, template_files,
|
||||
introspection_data)
|
||||
|
||||
if errors:
|
||||
module.fail_json(msg="\n".join(errors))
|
||||
elif warnings:
|
||||
module.exit_json(warnings="\n".join(warnings))
|
||||
else:
|
||||
module.exit_json(msg="All VLANs configured on attached switches")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,167 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: verify_profiles
|
||||
short_description: Check that profiles have enough nodes
|
||||
description:
|
||||
- Validate that the profiles assigned have enough nodes available.
|
||||
options:
|
||||
nodes:
|
||||
required: true
|
||||
description:
|
||||
- A list of nodes
|
||||
type: list
|
||||
flavors:
|
||||
required: true
|
||||
description:
|
||||
- A dictionary of flavors
|
||||
type: dict
|
||||
|
||||
author: "Brad P. Crochet"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: undercloud
|
||||
tasks:
|
||||
- name: Collect the flavors
|
||||
check_flavors:
|
||||
roles_info: "{{ lookup('roles_info', wantlist=True) }}"
|
||||
flavors: "{{ lookup('nova_flavors', wantlist=True) }}"
|
||||
register: flavor_result
|
||||
- name: Check the profiles
|
||||
verify_profiles:
|
||||
nodes: "{{ lookup('ironic_nodes', wantlist=True) }}"
|
||||
flavors: flavor_result.flavors
|
||||
'''
|
||||
|
||||
|
||||
def _capabilities_to_dict(caps):
|
||||
"""Convert the Node's capabilities into a dictionary."""
|
||||
if not caps:
|
||||
return {}
|
||||
if isinstance(caps, dict):
|
||||
return caps
|
||||
return dict([key.split(':', 1) for key in caps.split(',')])
|
||||
|
||||
|
||||
def _node_get_capabilities(node):
|
||||
"""Get node capabilities."""
|
||||
return _capabilities_to_dict(
|
||||
node['properties'].get('capabilities'))
|
||||
|
||||
|
||||
def verify_profiles(nodes, flavors):
|
||||
"""Check if roles info is correct
|
||||
|
||||
:param nodes: list of nodes
|
||||
:param flavors: dictionary of flavors
|
||||
:returns warnings: List of warning messages
|
||||
errors: List of error messages
|
||||
"""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
bm_nodes = {node['uuid']: node for node in nodes
|
||||
if node['provision_state'] in ('available', 'active')}
|
||||
|
||||
free_node_caps = {uu: _node_get_capabilities(node)
|
||||
for uu, node in bm_nodes.items()}
|
||||
|
||||
profile_flavor_used = False
|
||||
for flavor_name, (flavor, scale) in flavors.items():
|
||||
if not scale:
|
||||
continue
|
||||
|
||||
profile = None
|
||||
keys = flavor.get('keys')
|
||||
if keys:
|
||||
profile = keys.get('capabilities:profile')
|
||||
|
||||
if not profile and len(flavors) > 1:
|
||||
message = ('Error: The {flavor} flavor has no profile '
|
||||
'associated.\n'
|
||||
'Recommendation: assign a profile with openstack '
|
||||
'flavor set --property '
|
||||
'"capabilities:profile"="PROFILE_NAME" {flavor}')
|
||||
|
||||
errors.append(message.format(flavor=flavor_name))
|
||||
continue
|
||||
|
||||
profile_flavor_used = True
|
||||
|
||||
assigned_nodes = [uu for uu, caps in free_node_caps.items()
|
||||
if caps.get('profile') == profile]
|
||||
required_count = scale - len(assigned_nodes)
|
||||
|
||||
if required_count < 0:
|
||||
warnings.append('%d nodes with profile %s won\'t be used '
|
||||
'for deployment now' % (-required_count,
|
||||
profile))
|
||||
required_count = 0
|
||||
|
||||
for uu in assigned_nodes:
|
||||
free_node_caps.pop(uu)
|
||||
|
||||
if required_count > 0:
|
||||
message = ('Error: only {total} of {scale} requested ironic '
|
||||
'nodes are tagged to profile {profile} (for flavor '
|
||||
'{flavor}).\n'
|
||||
'Recommendation: tag more nodes using openstack '
|
||||
'baremetal node set --property "capabilities='
|
||||
'profile:{profile}" <NODE ID>')
|
||||
errors.append(message.format(total=scale - required_count,
|
||||
scale=scale,
|
||||
profile=profile,
|
||||
flavor=flavor_name))
|
||||
|
||||
nodes_without_profile = [uu for uu, caps in free_node_caps.items()
|
||||
if not caps.get('profile')]
|
||||
if nodes_without_profile and profile_flavor_used:
|
||||
warnings.append("There are %d ironic nodes with no profile that "
|
||||
"will not be used: %s" % (
|
||||
len(nodes_without_profile),
|
||||
', '.join(nodes_without_profile)))
|
||||
|
||||
return warnings, errors
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
nodes=dict(required=True, type='list'),
|
||||
flavors=dict(required=True, type='dict')
|
||||
))
|
||||
|
||||
nodes = module.params.get('nodes')
|
||||
flavors = module.params.get('flavors')
|
||||
|
||||
warnings, errors = verify_profiles(nodes,
|
||||
flavors)
|
||||
|
||||
if errors:
|
||||
module.fail_json(msg="\n".join(errors))
|
||||
elif warnings:
|
||||
module.exit_json(warnings="\n".join(warnings))
|
||||
else:
|
||||
module.exit_json(
|
||||
msg="No profile errors detected.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,54 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: warn
|
||||
short_description: Add warning to playbook output
|
||||
description:
|
||||
- Add warning to playbook output
|
||||
options:
|
||||
msg:
|
||||
required: true
|
||||
description:
|
||||
- The warning text
|
||||
type: str
|
||||
author: "Martin Andre (@mandre)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: webservers
|
||||
tasks:
|
||||
- name: Output warning message
|
||||
warn: msg="Warning!"
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
msg=dict(required=True, type='str'),
|
||||
))
|
||||
|
||||
msg = module.params.get('msg')
|
||||
|
||||
module.exit_json(changed=False,
|
||||
warnings=[msg])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,78 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
from glanceclient.exc import HTTPNotFound
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: glance_images
|
||||
description: Retrieve image information from Glance
|
||||
long_description:
|
||||
- Load image information using the Glance API and search by attribute.
|
||||
options:
|
||||
_terms:
|
||||
description: Optional filter attribute and filter value
|
||||
author: Brad P. Crochet <brad@redhat.com>
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get all image ids from glance
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('glance_images', wantlist=True) |
|
||||
map(attribute='id') | join(', ') }}
|
||||
|
||||
- name: Get image with name 'overcloud-full'
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('glance_images', 'name', ['overcloud-full'],
|
||||
wantlist=True) | map(attribute='name') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: A Python list with results from the API call.
|
||||
"""
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns server information from nova."""
|
||||
glance = utils.get_glance_client(variables)
|
||||
|
||||
images = []
|
||||
if len(terms) > 0:
|
||||
# Look up images by name
|
||||
if terms[0] == 'name':
|
||||
for value in terms[1]:
|
||||
try:
|
||||
search_data = {terms[0]: value}
|
||||
images.extend(
|
||||
[image for image in
|
||||
glance.images.list(filters=search_data)]
|
||||
)
|
||||
except HTTPNotFound:
|
||||
pass
|
||||
else:
|
||||
images = [image for image in glance.images.list()]
|
||||
|
||||
return images
|
|
@ -1,52 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ironic_inspector_client import ClientError
|
||||
from ironic_inspector_client import ClientV1
|
||||
from ironicclient import client
|
||||
|
||||
from tripleo_validations.utils import get_auth_session
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns Ironic Inspector introspection data.
|
||||
|
||||
Access swift and return introspection data for all nodes.
|
||||
|
||||
:returns a list of tuples, one for each node.
|
||||
"""
|
||||
|
||||
session = get_auth_session({
|
||||
'auth_url': kwargs.get('auth_url'),
|
||||
'password': kwargs.get('password'),
|
||||
'username': 'ironic',
|
||||
'project_name': 'service',
|
||||
})
|
||||
ironic = client.get_client(1, session=session)
|
||||
ironic_inspector = ClientV1(session=session)
|
||||
|
||||
ret = []
|
||||
for node in ironic.node.list():
|
||||
try:
|
||||
ret.append((node.name, ironic_inspector.get_data(node.uuid)))
|
||||
except ClientError:
|
||||
pass
|
||||
|
||||
return ret
|
|
@ -1,101 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: ironic_nodes
|
||||
description: Retrieve node information from Ironic
|
||||
long_description:
|
||||
- Load node information using the Ironic API
|
||||
options:
|
||||
_terms:
|
||||
description: Optional filter attribute and filter value
|
||||
author: Florian Fuchs <flfuchs@redhat.com>
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get all nodes from Ironic
|
||||
debug:
|
||||
msg: "{{ lookup('ironic_nodes', wantlist=True) }}"
|
||||
|
||||
- name: Lookup all nodes that match a list of IDs
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('ironic_nodes', 'id',
|
||||
['c8a1c7b8-d6b1-408b-b4a6-5881efdfd65c',
|
||||
'4bea536d-9d37-432f-a77e-7c65f1cf3acb'],
|
||||
wantlist=True) }}"
|
||||
|
||||
- name: Get all nodes for a set of instance UUIDs
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('ironic_nodes', 'instance_uuid',
|
||||
['1691a1c7-9974-4bcc-a07a-5dec7fc04da0',
|
||||
'07f2435d-820c-46ce-9097-cf8a7282293e'],
|
||||
wantlist=True) }}"
|
||||
|
||||
- name: Get all nodes marked as 'associated'
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('ironic_nodes', 'associated',
|
||||
wantlist=True) }}"
|
||||
|
||||
- name: Get nodes in provision state, and not associated or in maintenance
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('ironic_nodes', 'provision_state',
|
||||
['available', 'inspect'], wantlist=True)}}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: A Python list with results from the API call.
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns node information from ironic."""
|
||||
ironic = utils.get_ironic_client(variables)
|
||||
|
||||
if len(terms) > 0:
|
||||
if terms[0] == 'id':
|
||||
nodes = [ironic.node.get(id) for id in terms[1]]
|
||||
return [utils.filtered(node) for node in nodes]
|
||||
elif terms[0] == 'instance_uuid':
|
||||
nodes = [ironic.node.get_by_instance_uuid(uuid)
|
||||
for uuid in terms[1]]
|
||||
return [utils.filtered(node) for node in nodes]
|
||||
elif terms[0] == 'associated':
|
||||
nodes = ironic.node.list(associated=True, detail=True)
|
||||
return [utils.filtered(node) for node in nodes]
|
||||
elif terms[0] == 'provision_state':
|
||||
nodes = []
|
||||
for term in terms[1]:
|
||||
nodes.extend(ironic.node.list(
|
||||
provision_state=term,
|
||||
associated=False,
|
||||
maintenance=False,
|
||||
detail=True))
|
||||
return [utils.filtered(node) for node in nodes]
|
||||
else:
|
||||
return [utils.filtered(node)
|
||||
for node in ironic.node.list(detail=True)]
|
|
@ -1,59 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: nova_flavors
|
||||
description: Retrieve flavor information from Nova
|
||||
long_description:
|
||||
- Load flavor information using the Nova API.
|
||||
author: Brad P. Crochet <brad@redhat.com>
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get all flavors from nova
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('nova_flavors') }}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: A Python list with results from the API call.
|
||||
"""
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns server information from nova."""
|
||||
nova = utils.get_nova_client(variables)
|
||||
return {f.name: {'name': f.name,
|
||||
'id': f.id,
|
||||
'disk': f.disk,
|
||||
'ram': f.ram,
|
||||
'vcpus': f.vcpus,
|
||||
'ephemeral': f.ephemeral,
|
||||
'swap': f.swap,
|
||||
'is_public': f.is_public,
|
||||
'rxtx_factor': f.rxtx_factor,
|
||||
'keys': f.get_keys()}
|
||||
for f in nova.flavors.list()}
|
|
@ -1,50 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: nova_hypervisor_statistics
|
||||
description: Retrieve hypervisor statistic information from Nova
|
||||
long_description:
|
||||
- Load hypervisor statistics using the Nova API.
|
||||
author: Brad P. Crochet <brad@redhat.com>
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get all hypervisor statistics from nova
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('nova_hypervisor_statistics') }}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: A Python list with results from the API call.
|
||||
"""
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns server information from nova."""
|
||||
nova = utils.get_nova_client(variables)
|
||||
statistics = nova.hypervisor_stats.statistics()
|
||||
return utils.filtered(statistics)
|
|
@ -1,91 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: nova_servers
|
||||
description: Retrieve server information from Nova
|
||||
long_description:
|
||||
- Load server information using the Nova API and search by attribute.
|
||||
options:
|
||||
_terms:
|
||||
description: Optional filter attribute and filter value
|
||||
author: Florian Fuchs <flfuchs@redhat.com>
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get all server ids from nova
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('nova_servers', wantlist=True) |
|
||||
map(attribute='id') | join(', ') }}
|
||||
|
||||
- name: Lookup all server ids from nova with a certain ctlplane IP
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('nova_servers', 'ip', 'ctlplane', ['192.168.24.15'],
|
||||
wantlist=True) | map(attribute='id') | join(', ') }}"
|
||||
|
||||
- name: Get server with name 'overcloud-controller-0'
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('nova_servers', 'name', ['overcloud-controller-0'],
|
||||
wantlist=True) | map(attribute='name') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: A Python list with results from the API call.
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from novaclient.exceptions import NotFound
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns server information from nova."""
|
||||
nova = utils.get_nova_client(variables)
|
||||
|
||||
servers = []
|
||||
if len(terms) > 0:
|
||||
# Look up servers by network and IP
|
||||
if terms[0] == 'ip':
|
||||
for ip in terms[2]:
|
||||
try:
|
||||
servers.append(nova.servers.find(
|
||||
networks={terms[1]: [ip]}))
|
||||
except NotFound:
|
||||
pass
|
||||
# Look up servers by attribute
|
||||
else:
|
||||
for value in terms[1]:
|
||||
try:
|
||||
search_data = {terms[0]: value}
|
||||
servers.append(nova.servers.find(**search_data))
|
||||
except NotFound:
|
||||
pass
|
||||
else:
|
||||
servers = nova.servers.list()
|
||||
|
||||
# For each server only return properties whose value
|
||||
# can be properly serialized. (Things like
|
||||
# novaclient.v2.servers.ServerManager will make
|
||||
# Ansible return the whole result as a string.)
|
||||
return [utils.filtered(server) for server in servers]
|
|
@ -1,83 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import yaml
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: roles_info
|
||||
description: Retrieve role information from Heat and Swift.
|
||||
long_description:
|
||||
- Load role information using the Heat API.
|
||||
options:
|
||||
_terms:
|
||||
description: Optional filter attribute and filter value
|
||||
author: Brad P. Crochet <brad@redhat.com>
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get all role info from Heat and Swift
|
||||
debug:
|
||||
msg: |
|
||||
{{ lookup('roles_info', wantlist=True) }}
|
||||
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: A Python list with results from the API call.
|
||||
"""
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def _get_object_yaml(self, swiftclient, container, obj):
|
||||
obj_ret = swiftclient.get_object(container=container, obj=obj)
|
||||
return yaml.safe_load(obj_ret[1])
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns server information from nova."""
|
||||
swift = utils.get_swift_client(variables)
|
||||
plan = variables.get('plan')
|
||||
plan_env = self._get_object_yaml(swift, plan, 'plan-environment.yaml')
|
||||
roles_data = self._get_object_yaml(swift, plan, 'roles_data.yaml')
|
||||
|
||||
def default_role_data(role):
|
||||
return {
|
||||
'name': role['name'],
|
||||
'count': role.get('CountDefault', 0),
|
||||
'flavor': None
|
||||
}
|
||||
|
||||
roles = list(map(default_role_data, roles_data))
|
||||
|
||||
parameter_defaults = plan_env.get('parameter_defaults', {})
|
||||
|
||||
for role in roles:
|
||||
new_count = parameter_defaults.get("%sCount" % role['name'])
|
||||
if new_count:
|
||||
role['count'] = new_count
|
||||
|
||||
new_flavor = parameter_defaults.get("Overcloud%sFlavor" %
|
||||
role['name'])
|
||||
if new_flavor:
|
||||
role['flavor'] = new_flavor
|
||||
|
||||
return roles
|
|
@ -1,48 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns the current plan's stack resources.
|
||||
|
||||
:return: A list of dicts
|
||||
"""
|
||||
ret = []
|
||||
heat = utils.get_heat_client(variables)
|
||||
resource_list = heat.resources.list(variables['plan'])
|
||||
for resource in resource_list:
|
||||
ret.append(dict(
|
||||
resource_name=resource.resource_name,
|
||||
resource_status=resource.resource_status,
|
||||
logical_resource_id=resource.logical_resource_id,
|
||||
links=resource.links,
|
||||
creation_time=resource.creation_time,
|
||||
resource_status_reason=resource.resource_status_reason,
|
||||
updated_time=resource.updated_time,
|
||||
required_by=resource.required_by,
|
||||
physical_resource_id=resource.physical_resource_id,
|
||||
resource_type=resource.resource_type
|
||||
))
|
||||
return ret
|
|
@ -1,47 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
from tripleo_validations import utils
|
||||
|
||||
|
||||
EXCLUDED_EXT = (
|
||||
'.pyc',
|
||||
'.pyo',
|
||||
)
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Returns the current plan files.
|
||||
|
||||
Returns a list of tuples, one for each plan file,
|
||||
containing the template path and the template content.
|
||||
"""
|
||||
ret = []
|
||||
swift = utils.get_swift_client(variables)
|
||||
container = swift.get_container(variables['plan'])
|
||||
for item in container[1]:
|
||||
obj = swift.get_object(variables['plan'], item['name'])
|
||||
if os.path.splitext(item['name'])[-1] not in EXCLUDED_EXT:
|
||||
ret.append((item['name'], obj))
|
||||
|
||||
return ret
|
|
@ -1,32 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: MySQL Open Files Limit
|
||||
description: >
|
||||
Verify the `open-files-limit` configuration is high enough
|
||||
|
||||
https://access.redhat.com/solutions/1598733
|
||||
groups:
|
||||
- post-deployment
|
||||
min_open_files_limit: 16384
|
||||
tasks:
|
||||
- name: Set container_cli fact from the inventory
|
||||
set_fact:
|
||||
container_cli: "{{ hostvars[inventory_hostname].container_cli }}"
|
||||
|
||||
- name: Get the open_files_limit value
|
||||
become: true
|
||||
shell: >-
|
||||
"{{ container_cli }}" exec -u root
|
||||
$("{{ container_cli }}" ps -q --filter "name=mysql|galera-bundle" | head -1)
|
||||
/bin/bash -c 'ulimit -n'
|
||||
changed_when: False
|
||||
register: mysqld_open_files_limit
|
||||
|
||||
- name: Test the open-files-limit value
|
||||
fail:
|
||||
msg: >
|
||||
The open_files_limit option for mysql must be higher than
|
||||
{{ min_open_files_limit }}. Right now it's {{ mysqld_open_files_limit.stdout }}.
|
||||
failed_when: "mysqld_open_files_limit.stdout|int < min_open_files_limit"
|
|
@ -1,26 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Validate the Heat environment file for network configuration
|
||||
description: >
|
||||
This validates the network environment and nic-config files
|
||||
that specify the overcloud network configuration and are stored
|
||||
in the current plan's Swift container.
|
||||
|
||||
The deployers are expected to write these files themselves as
|
||||
described in the Network Isolation guide:
|
||||
|
||||
http://tripleo.org/advanced_deployment/network_isolation.html
|
||||
groups:
|
||||
- pre-deployment
|
||||
network_environment_path: environments/network-environment.yaml
|
||||
plan_env_path: plan-environment.yaml
|
||||
ip_pools_path: environments/ips-from-pool-all.yaml
|
||||
tasks:
|
||||
- name: Validate the network environment files
|
||||
network_environment:
|
||||
netenv_path: "{{ network_environment_path }}"
|
||||
plan_env_path: "{{ plan_env_path }}"
|
||||
ip_pools_path: "{{ ip_pools_path }}"
|
||||
template_files: "{{ lookup('tht') }}"
|
|
@ -1,65 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Neutron Sanity Check
|
||||
description: >
|
||||
Run `neutron-sanity-check` on the controller nodes to find out
|
||||
potential issues with Neutron's configuration.
|
||||
|
||||
The tool expects all the configuration files that are passed
|
||||
to the Neutron services.
|
||||
|
||||
groups:
|
||||
- post-deployment
|
||||
|
||||
# The list of Neutron configuration files and directories that
|
||||
# will be passed to the Neutron services. The order is important
|
||||
# here: the values in later files take precedence.
|
||||
configs:
|
||||
- /etc/neutron/neutron.conf
|
||||
- /usr/share/neutron/neutron-dist.conf
|
||||
- /etc/neutron/metadata_agent.ini
|
||||
- /etc/neutron/dhcp_agent.ini
|
||||
- /etc/neutron/fwaas_driver.ini
|
||||
- /etc/neutron/l3_agent.ini
|
||||
- /usr/share/neutron/neutron-lbaas-dist.conf
|
||||
- /etc/neutron/lbaas_agent.ini
|
||||
|
||||
tasks:
|
||||
- name: Run neutron-sanity-check
|
||||
command: "docker exec -u root neutron_ovs_agent /bin/bash -c 'neutron-sanity-check --config-file {{ item }}'"
|
||||
with_items: "{{ configs }}"
|
||||
become: true
|
||||
register: nsc_return
|
||||
ignore_errors: true
|
||||
changed_when: False
|
||||
|
||||
- name: Detect errors
|
||||
set_fact:
|
||||
has_errors: "{{ nsc_return.results
|
||||
| sum(attribute='stderr_lines', start=[])
|
||||
| select('search', '(ERROR)')
|
||||
| list | length | int > 0 }}"
|
||||
|
||||
- name: Detect warnings
|
||||
set_fact:
|
||||
has_warnings: "{{ nsc_return.results
|
||||
| sum(attribute='stderr_lines', start=[])
|
||||
| select('search', '(WARNING)')
|
||||
| list | length | int > 0 }}"
|
||||
|
||||
- name: Create output
|
||||
set_fact:
|
||||
output_msg: "{{ nsc_return.results
|
||||
| sum(attribute='stderr_lines', start=[])
|
||||
| select('search', '(ERROR|WARNING)')
|
||||
| list }}"
|
||||
|
||||
- name: Output warning
|
||||
warn: msg="{{ output_msg | join('\n') }}"
|
||||
when: has_warnings and not has_errors
|
||||
|
||||
- name: Fail
|
||||
fail: msg="{{ output_msg | join('\n') }}"
|
||||
when: has_errors
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- hosts: nova_compute
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify NoOpFirewallDriver is set in Nova
|
||||
description: >
|
||||
When using Neutron, the `firewall_driver` option in Nova must be set to
|
||||
`NoopFirewallDriver`.
|
||||
groups:
|
||||
- post-deployment
|
||||
tasks:
|
||||
- name: Read the `firewall_driver` value
|
||||
become: true
|
||||
ini: path=/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf section=DEFAULT key=firewall_driver
|
||||
register: nova_firewall_driver
|
||||
- name: Verify `firewall_driver` is set to `NoopFirewallDriver`
|
||||
fail:
|
||||
msg: >
|
||||
The firewall_driver value in /etc/nova/nova.conf is
|
||||
{{ nova_firewall_driver.value or 'unset' }}, but it must be set to:
|
||||
nova.virt.firewall.NoopFirewallDriver
|
||||
failed_when: "nova_firewall_driver.value != 'nova.virt.firewall.NoopFirewallDriver'"
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check node disk configuration
|
||||
description: >
|
||||
Check node disk numbers and sizes and whether root device hints are set.
|
||||
groups:
|
||||
- pre-deployment
|
||||
tasks:
|
||||
- name: Get Ironic Inspector swift auth_url
|
||||
become: true
|
||||
ini: path=/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf section=inspector key=auth_url
|
||||
register: ironic_auth_url
|
||||
- name: Get Ironic Inspector swift password
|
||||
become: true
|
||||
ini: path=/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf section=inspector key=password
|
||||
register: ironic_password
|
||||
- name: Check node disks
|
||||
node_disks:
|
||||
nodes: "{{ lookup('ironic_nodes', wantlist=True) }}"
|
||||
flavors: "{{ lookup('nova_flavors', wantlist=True) }}"
|
||||
introspection_data: "{{ lookup('introspection_data',
|
||||
auth_url=ironic_auth_url.value,
|
||||
password=ironic_password.value) }}"
|
|
@ -1,39 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Node health check
|
||||
description: >
|
||||
Check if all overcloud nodes can be connected to before starting a
|
||||
scale-up or an upgrade.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
tasks:
|
||||
- name: Collect IPs for overcloud nodes
|
||||
set_fact: ansible_host="{{ hostvars[item]['ansible_host'] }}"
|
||||
register: oc_ips
|
||||
with_items: "{{ groups.overcloud }}"
|
||||
- name: Ping all overcloud nodes
|
||||
icmp_ping:
|
||||
host: "{{ item }}"
|
||||
with_items: "{{ oc_ips.results | map(attribute='ansible_facts.ansible_host') | list }}"
|
||||
ignore_errors: true
|
||||
register: ping_results
|
||||
- name: Extract failed pings
|
||||
set_fact:
|
||||
failed_ips: "{{ ping_results.results | selectattr('failed', 'equalto', True) | map(attribute='item') | list }}"
|
||||
- name: Lookup nova servers for each failed IP
|
||||
set_fact:
|
||||
servers: "{{ lookup('nova_servers', 'ip', 'ctlplane', failed_ips, wantlist=True) }}"
|
||||
- name: Extract nova ids
|
||||
set_fact:
|
||||
server_ids: "{{ servers | map(attribute='id') | list }}"
|
||||
- name: Lookup ironic nodes for unreachable nova servers
|
||||
set_fact:
|
||||
nodes: "{{ lookup('ironic_nodes', 'instance_uuid', server_ids, wantlist=True) }}"
|
||||
- name: Fail if there are unreachable nodes
|
||||
fail:
|
||||
msg: |
|
||||
{{ lookup('template', './templates/unreachable_nodes.j2',
|
||||
template_vars=dict(nodes=nodes)) }}
|
||||
when: nodes|length > 0
|
|
@ -1,91 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Nova Event Callback Configuration Check
|
||||
description: >
|
||||
This validations verifies that the Nova Event Callback feature is
|
||||
configured which is generally enabled by default.
|
||||
It checks the following files on the Overcloud Controller(s):
|
||||
- /etc/nova/nova.conf:
|
||||
[DEFAULT]/vif_plugging_is_fatal = True
|
||||
[DEFAULT]/vif_plugging_timeout >= 300
|
||||
- /etc/neutron/neutron.conf:
|
||||
[nova]/auth_url = 'http://nova_admin_auth_ip:5000'
|
||||
[nova]/tenant_name = 'service'
|
||||
[DEFAULT]/notify_nova_on_port_data_changes = True
|
||||
[DEFAULT]/notify_nova_on_port_status_changes = True
|
||||
groups:
|
||||
- post-deployment
|
||||
nova_config_file: /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf
|
||||
neutron_config_file: /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf
|
||||
vif_plugging_fatal_check: "vif_plugging_is_fatal"
|
||||
vif_plugging_timeout_check: "vif_plugging_timeout"
|
||||
vif_plugging_timeout_value_min: 300
|
||||
notify_nova_on_port_data_check: "notify_nova_on_port_data_changes"
|
||||
notify_nova_on_port_status_check: "notify_nova_on_port_status_changes"
|
||||
tenant_name_check: "tenant_name"
|
||||
tasks:
|
||||
- name: Get VIF Plugging setting values from nova.conf
|
||||
become: True
|
||||
ini: path={{ nova_config_file }} section=DEFAULT key={{ item }} ignore_missing_file=true
|
||||
register: nova_config_result
|
||||
with_items:
|
||||
- "{{ vif_plugging_fatal_check }}"
|
||||
- "{{ vif_plugging_timeout_check }}"
|
||||
|
||||
- name: Check Nova configuration values
|
||||
fail: msg="Value of {{ item.item }} is set to {{ item.value or 'None' }}."
|
||||
when:
|
||||
- "(item.item == vif_plugging_fatal_check and (not item.value|bool or None)) or
|
||||
(item.item == vif_plugging_timeout_check and (item.value|int <= vif_plugging_timeout_value_min|int
|
||||
or None))"
|
||||
with_items: "{{ nova_config_result.results }}"
|
||||
|
||||
- name: Get auth_url value from hiera
|
||||
become: True
|
||||
command: hiera -c /etc/puppet/hiera.yaml neutron::server::notifications::auth_url
|
||||
ignore_errors: True
|
||||
changed_when: False
|
||||
register: auth_url
|
||||
|
||||
- name: Get auth_url value from neutron.conf
|
||||
become: True
|
||||
ini: path={{ neutron_config_file }} section=nova key=auth_url ignore_missing_file=true
|
||||
register: neutron_auth_url_result
|
||||
|
||||
- name: Check [nova]/auth_url setting value from neutron.conf
|
||||
fail:
|
||||
msg: >-
|
||||
[nova]/auth_url from {{ neutron_config_file }} is set to
|
||||
{{ neutron_auth_url_result.value or 'None' }}
|
||||
but it should be set to {{ auth_url.stdout }}.
|
||||
failed_when: "neutron_auth_url_result.value != auth_url.stdout"
|
||||
|
||||
- name: Get Notify Nova settings values from neutron.conf
|
||||
become: True
|
||||
ini: path={{ neutron_config_file }} section=DEFAULT key={{ item }} ignore_missing_file=true
|
||||
register: neutron_notify_nova_result
|
||||
with_items:
|
||||
- "{{ notify_nova_on_port_data_check }}"
|
||||
- "{{ notify_nova_on_port_status_check }}"
|
||||
|
||||
- name: Check Notify Nova settings values
|
||||
fail: msg="Value of {{ item.item }} is set to {{ item.value|bool }}."
|
||||
when: not item.value|bool or item.value == None
|
||||
with_items: "{{ neutron_notify_nova_result.results }}"
|
||||
|
||||
- name: Get Tenant Name setting value from neutron.conf
|
||||
become: True
|
||||
ini: path={{ neutron_config_file }} section=nova key={{ tenant_name_check }} ignore_missing_file=true
|
||||
register: neutron_tenant_name_result
|
||||
|
||||
- name: Check Tenant Name settings value
|
||||
fail:
|
||||
msg: >-
|
||||
[nova]/tenant_name from {{ neutron_config_file }} is set to
|
||||
{{ neutron_tenant_name_result.value or 'None' }}
|
||||
but it should be set to 'service'.
|
||||
when: neutron_tenant_name_result.value != 'service'
|
||||
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
- hosts: overcloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify all deployed nodes have their clock synchronised
|
||||
description: >
|
||||
Each overcloud node should have their clocks synchronised.
|
||||
|
||||
The deployment should configure and run ntpd. This validation verifies
|
||||
that it is indeed running and connected to an NPT server on all nodes.
|
||||
groups:
|
||||
- post-deployment
|
||||
tasks:
|
||||
- name: Run ntpstat
|
||||
# ntpstat returns 0 if synchronised and non-zero otherwise:
|
||||
command: ntpstat
|
||||
changed_when: False
|
|
@ -1,173 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check resources for an OpenShift on OpenStack deployment
|
||||
description: |
|
||||
Check if there are enough resources for an OpenShift deployment on top
|
||||
of Openstack
|
||||
deployment:
|
||||
- Is there a flavor that meets the minimum requirements for a test
|
||||
environment?
|
||||
(4GB RAM, 40GB disk)
|
||||
- Is there a flavor that meets the minimum requirements for a
|
||||
production environment?
|
||||
(16GB RAM, 40GB disk, 4 VCPUs)
|
||||
- Are images named centos or rhel available?
|
||||
- Are there sufficient compute resources available for a default setup?
|
||||
(1 Master node, 1 Infra node, 2 App nodes)
|
||||
groups:
|
||||
- openshift-on-openstack
|
||||
min_total_ram_testing: 16384 # 4 per node
|
||||
min_total_vcpus_testing: 4 # 1 per node
|
||||
min_total_disk_testing: 93 # Master: 40, others: 17 per node
|
||||
min_total_ram_prod: 40960 # Master: 16, others: 8 per node
|
||||
min_total_vcpus_prod: 7 # Master: 4, others 1 per node
|
||||
min_total_disk_prod: 93 # Master: 42, others: 17 per node
|
||||
min_node_ram_testing: 4096 # Minimum ram per node for testing
|
||||
min_node_disk_testing: 40 # Minimum disk per node for testing
|
||||
min_node_ram_prod: 16384 # Minimum ram per node for production
|
||||
min_node_disk_prod: 42 # Minimum disk per node for production
|
||||
resource_reqs_testing: False
|
||||
resource_reqs_prod: False
|
||||
|
||||
tasks:
|
||||
|
||||
# Get auth token and service catalog from Keystone and extract service urls.
|
||||
- name: Get token and catalog from Keystone
|
||||
uri:
|
||||
url: "{{ overcloud_keystone_url
|
||||
| urlsplit('scheme') }}://{{ overcloud_keystone_url
|
||||
| urlsplit('netloc') }}/v3/auth/tokens"
|
||||
method: POST
|
||||
body_format: json
|
||||
body:
|
||||
auth:
|
||||
scope:
|
||||
project:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
identity:
|
||||
methods:
|
||||
- password
|
||||
password:
|
||||
user:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: yes
|
||||
status_code: 201
|
||||
register: keystone_result
|
||||
when: overcloud_keystone_url|default('')
|
||||
- name: Set auth token
|
||||
set_fact: auth_token="{{ keystone_result.x_subject_token }}"
|
||||
- name: Get Nova URL from catalog
|
||||
set_fact: nova_url="{{ keystone_result.json.token
|
||||
| json_query("catalog[?name=='nova'].endpoints")
|
||||
| first
|
||||
| selectattr('interface', 'equalto', 'public')
|
||||
| map(attribute='url') | first }}"
|
||||
- name: Get Glance URL from catalog
|
||||
set_fact: glance_url="{{ keystone_result.json.token
|
||||
| json_query("catalog[?name=='glance'].endpoints")
|
||||
| first
|
||||
| selectattr('interface', 'equalto', 'public')
|
||||
| map(attribute='url') | first }}"
|
||||
|
||||
- name: Get flavors with required values for testing
|
||||
uri:
|
||||
url: "{{ nova_url }}/flavors/detail?minRam={{ min_node_ram_testing }}&minDisk={{ min_node_disk_testing }}"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
Accept: application/vnd.openstack.compute.v2.1+json
|
||||
return_content: yes
|
||||
follow_redirects: all
|
||||
register: flavors_result_testing
|
||||
|
||||
- name: Get flavors with required values for production
|
||||
uri:
|
||||
url: "{{ nova_url }}/flavors/detail?minRam={{ min_node_ram_prod }}&minDisk={{ min_node_disk_prod }}"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
Accept: application/vnd.openstack.compute.v2.1+json
|
||||
return_content: yes
|
||||
follow_redirects: all
|
||||
register: flavors_result_prod
|
||||
|
||||
- name: Set matching_flavors_testing variable
|
||||
set_fact:
|
||||
matching_flavors_testing: "{{ flavors_result_testing.json.flavors
|
||||
| list | length > 0 }}"
|
||||
|
||||
- name: Set matching_flavors_prod variable
|
||||
set_fact:
|
||||
matching_flavors_prod: "{{ flavors_result_prod.json.flavors
|
||||
| selectattr('vcpus', 'ge', 4)
|
||||
| list
|
||||
| length > 0 }}"
|
||||
|
||||
# Get hypervisor stats from nova and check if there are sufficient
|
||||
# available resources.
|
||||
- name: Get hypervisor details from nova
|
||||
uri:
|
||||
url: "{{ nova_url }}/os-hypervisors/statistics"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
Accept: application/vnd.openstack.compute.v2.1+json
|
||||
return_content: yes
|
||||
follow_redirects: all
|
||||
register: hypervisors_result
|
||||
- name: Set hypervisor stats
|
||||
set_fact: hv_stats="{{ hypervisors_result.json.hypervisor_statistics }}"
|
||||
- name: Set flag whether min resources for testing are available
|
||||
set_fact: resource_reqs_testing=True
|
||||
when: hv_stats.disk_available_least >= min_total_disk_testing
|
||||
and hv_stats.free_ram_mb >= min_total_ram_testing
|
||||
and hv_stats.vcpus - hv_stats.vcpus_used >= min_total_vcpus_testing
|
||||
- name: Set flag whether min resources for production are available
|
||||
set_fact: resource_reqs_prod=True
|
||||
when: hv_stats.disk_available_least >= min_total_disk_prod
|
||||
and hv_stats.free_ram_mb >= min_total_ram_prod
|
||||
and hv_stats.vcpus - hv_stats.vcpus_used >= min_total_vcpus_prod
|
||||
|
||||
# Get overcloud images from Glance and check if there is one named either
|
||||
# rhel or centos.
|
||||
- name: Get images from glance
|
||||
uri:
|
||||
url: "{{ glance_url }}/v2/images"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
return_content: yes
|
||||
follow_redirects: all
|
||||
register: images
|
||||
|
||||
- name: Find matching images
|
||||
set_fact:
|
||||
matching_image: "{{ images.json.images
|
||||
| map(attribute='name')
|
||||
| map('lower')
|
||||
| select('search', '(centos|rhel)')
|
||||
| list | length | int > 0 }}"
|
||||
|
||||
- name: Create warning message
|
||||
set_fact:
|
||||
warning_msg: |
|
||||
{{ lookup('template', './templates/openshift-hw-requirements-warnings.j2') }}
|
||||
|
||||
- name: Fail if minimum requirements aren't met
|
||||
fail: msg="{{ warning_msg }}"
|
||||
when: not matching_flavors_testing
|
||||
or not matching_image
|
||||
or not resource_reqs_testing
|
||||
|
||||
- name: Warn if production requirements aren't met
|
||||
warn: msg="{{ warning_msg }}"
|
||||
when: not matching_flavors_prod
|
||||
or not matching_image
|
||||
or not resource_reqs_prod
|
|
@ -1,82 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check network requirements for an OpenShift on OpenStack deployment
|
||||
description: |
|
||||
Checks if an external network has been configured on the overcloud as
|
||||
required for an OpenShift deployment on top of OpenStack.
|
||||
groups:
|
||||
- openshift-on-openstack
|
||||
|
||||
tasks:
|
||||
- name: Set fact to identify if the overcloud was deployed
|
||||
set_fact:
|
||||
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
|
||||
|
||||
- name: Warn if no overcloud deployed yet
|
||||
warn:
|
||||
msg: >-
|
||||
This validation should be executed on the Undercloud with a working
|
||||
Overcloud.
|
||||
when: not overcloud_deployed|bool
|
||||
|
||||
- when: overcloud_deployed|bool
|
||||
block:
|
||||
# Get auth token and service catalog from Keystone and extract service urls.
|
||||
- name: Get token and catalog from Keystone
|
||||
uri:
|
||||
url: "{{ overcloud_keystone_url
|
||||
| urlsplit('scheme') }}://{{ overcloud_keystone_url
|
||||
| urlsplit('netloc') }}/v3/auth/tokens"
|
||||
method: POST
|
||||
body_format: json
|
||||
body:
|
||||
auth:
|
||||
scope:
|
||||
project:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
identity:
|
||||
methods:
|
||||
- password
|
||||
password:
|
||||
user:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: yes
|
||||
status_code: 201
|
||||
register: keystone_result
|
||||
when: overcloud_keystone_url|default('')
|
||||
|
||||
- name: Set auth token
|
||||
set_fact: token="{{ keystone_result.x_subject_token }}"
|
||||
- name: Get Neutron URL from catalog
|
||||
set_fact: neutron_url="{{ keystone_result.json.token
|
||||
| json_query("catalog[?name=='neutron'].endpoints")
|
||||
| first
|
||||
| selectattr('interface', 'equalto', 'public')
|
||||
| map(attribute='url') | first }}"
|
||||
|
||||
# Get overcloud networks from Neutron and check if there is
|
||||
# a network with a common name for external networks.
|
||||
- name: Get networks from Neutron
|
||||
uri:
|
||||
url: "{{ neutron_url }}/v2.0/networks?router:external=true"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ token }}"
|
||||
return_content: yes
|
||||
follow_redirects: all
|
||||
register: networks_result
|
||||
|
||||
- name: Warn if there are no matching networks
|
||||
warn:
|
||||
msg: |
|
||||
No external network found. It is strongly recommended that you
|
||||
configure an external Neutron network with a floating IP address
|
||||
pool.
|
||||
when: networks_result.json.networks | length == 0
|
|
@ -1,61 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check connectivity to various OpenStack services
|
||||
# TODO: this could also check for undercloud endpoints
|
||||
description: >
|
||||
This validation gets the PublicVip address from the deployment and
|
||||
tries to access Horizon and get a Keystone token.
|
||||
groups:
|
||||
- post-deployment
|
||||
- pre-upgrade
|
||||
- post-upgrade
|
||||
tasks:
|
||||
- name: Set fact to identify if the overcloud was deployed
|
||||
set_fact:
|
||||
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
|
||||
|
||||
# Check that the Horizon endpoint exists
|
||||
- name: Fail if the HorizonPublic endpoint is not defined
|
||||
fail: msg="The `HorizonPublic` endpoint is not defined in the `EndpointMap` of the deployed stack. This means Horizon may not have been deployed correctly."
|
||||
when:
|
||||
- overcloud_horizon_url|default('') | length == 0
|
||||
- overcloud_deployed|bool
|
||||
|
||||
# Check connectivity to horizon
|
||||
- name: Check Horizon
|
||||
uri: url={{ overcloud_horizon_url }}
|
||||
when: overcloud_horizon_url|default('')
|
||||
|
||||
# Check that the Keystone endpoint exists
|
||||
- name: Fail if KeystoneURL output is not available
|
||||
fail: msg="The `KeystoneURL` output is not available in the deployed stack."
|
||||
when:
|
||||
- overcloud_keystone_url|default('') | length == 0
|
||||
- overcloud_deployed|bool
|
||||
|
||||
# Check that we can obtain an auth token from horizon
|
||||
- name: Check Keystone
|
||||
uri:
|
||||
url: "{{ overcloud_keystone_url | urlsplit('scheme') }}://{{ overcloud_keystone_url | urlsplit('netloc') }}/v3/auth/tokens"
|
||||
method: POST
|
||||
body_format: json
|
||||
body:
|
||||
auth:
|
||||
identity:
|
||||
methods:
|
||||
- password
|
||||
password:
|
||||
user:
|
||||
name: admin
|
||||
domain:
|
||||
name: Default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: yes
|
||||
status_code: 201
|
||||
register: auth_token
|
||||
when: overcloud_keystone_url|default('')
|
||||
|
||||
|
||||
# TODO(shadower): other endpoints
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- hosts: ComputeOvsDpdk
|
||||
vars:
|
||||
metadata:
|
||||
name: Validates OVS DPDK PMD cores from all NUMA nodes.
|
||||
description: >
|
||||
OVS DPDK PMD cpus must be provided from all NUMA nodes.
|
||||
|
||||
A failed status post-deployment indicates PMD CPU list is not
|
||||
configured correctly.
|
||||
groups:
|
||||
- post-deployment
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Get OVS DPDK PMD cores mask value
|
||||
become_method: sudo
|
||||
register: pmd_cpu_mask
|
||||
command: ovs-vsctl --no-wait get Open_vSwitch . other_config:pmd-cpu-mask
|
||||
changed_when: False
|
||||
|
||||
- name: Run OVS DPDK PMD cores check
|
||||
ovs_dpdk_pmd_cpus_check: pmd_cpu_mask={{ pmd_cpu_mask.stdout }}
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Check the status of the pacemaker cluster
|
||||
description: >
|
||||
This runs `pcs status` and checks for any failed actions.
|
||||
|
||||
A failed status post-deployment indicates something is not configured
|
||||
correctly. This should also be run before upgrade as the process will
|
||||
likely fail with a cluster that's not completely healthy.
|
||||
groups:
|
||||
- post-deployment
|
||||
tasks:
|
||||
- name: Check pacemaker service is running
|
||||
become: True
|
||||
command: "/usr/bin/systemctl show pacemaker --property ActiveState"
|
||||
register: check_service
|
||||
changed_when: False
|
||||
ignore_errors: True
|
||||
|
||||
- when: "check_service.stdout == 'ActiveState=active'"
|
||||
block:
|
||||
- name: Get pacemaker status
|
||||
become: true
|
||||
command: pcs status xml
|
||||
register: pcs_status
|
||||
changed_when: False
|
||||
- name: Check pacemaker status
|
||||
pacemaker: status="{{ pcs_status.stdout }}"
|
|
@ -1,28 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Rabbitmq limits
|
||||
description: >
|
||||
Make sure the rabbitmq file descriptor limits are set to reasonable values.
|
||||
groups:
|
||||
- post-deployment
|
||||
min_fd_limit: 16384
|
||||
tasks:
|
||||
- name: Set container_cli fact from the inventory
|
||||
set_fact:
|
||||
container_cli: "{{ hostvars[inventory_hostname].container_cli }}"
|
||||
|
||||
- name: Get file_descriptors total_limit
|
||||
become: true
|
||||
register: actual_fd_limit
|
||||
shell: >
|
||||
"{{ container_cli }}" exec $("{{ container_cli }}" ps -q --filter "name=rabbitmq" | head -1)
|
||||
rabbitmqctl eval 'proplists:get_value(max_fds, erlang:system_info(check_io)).'
|
||||
changed_when: false
|
||||
|
||||
- name: Verify the actual limit exceeds the minimal value
|
||||
fail:
|
||||
msg: >-
|
||||
{{ actual_fd_limit.stdout }} must be greater than or equal to {{ min_fd_limit }}
|
||||
failed_when: "actual_fd_limit.stdout|int < min_fd_limit"
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud, overcloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check correctness of current repositories
|
||||
description: >
|
||||
Detect whether the repositories listed in `yum repolist`
|
||||
can be connected to and that there is at least one repo
|
||||
configured.
|
||||
|
||||
Detect if there are any unwanted repositories (such as EPEL) enabled.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
tasks:
|
||||
- name: List repositories
|
||||
command: 'yum repolist -v'
|
||||
args:
|
||||
warn: no
|
||||
changed_when: False
|
||||
register: repositories
|
||||
- name: Find repository URLs
|
||||
shell: 'echo "{{ repositories.stdout }}" | grep Repo-baseurl | sed "s/Repo-baseurl.*\(http[^ ]*\).*/\1/g"'
|
||||
register: repository_urls
|
||||
changed_when: False
|
||||
- name: Check if there is at least one repository baseurl
|
||||
fail:
|
||||
msg: No repository found in yum repolist
|
||||
when: repository_urls.stdout_lines|length < 1
|
||||
- name: Call repository URLs
|
||||
uri:
|
||||
url: "{{ item }}"
|
||||
with_items: "{{ repository_urls.stdout_lines }}"
|
||||
- name: Find repository IDs
|
||||
changed_when: False
|
||||
shell: 'echo "{{ repositories.stdout }}" | grep Repo-id | sed "s/Repo-id.*://" | tr -d " "'
|
||||
register: repository_ids
|
||||
- name: Check if there are any unwanted repositories enabled
|
||||
fail:
|
||||
msg: Found unwanted repository {{ item.0 }} enabled
|
||||
when: item.0 == item.1
|
||||
with_nested:
|
||||
- [ 'epel/x86_64' ]
|
||||
- "{{ repository_ids.stdout_lines }}"
|
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Stack Health Check
|
||||
description: >
|
||||
Check if all stack resources are in a *_COMPLETE state before starting
|
||||
an upgrade.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
- post-upgrade
|
||||
tasks:
|
||||
- name: Check stack resource statuses
|
||||
assert:
|
||||
that:
|
||||
- "'_COMPLETE' in item.resource_status"
|
||||
msg: "Health check failed for resource {{ item.resource_name }} with status: {{ item.resource_status }}"
|
||||
with_items: "{{ lookup('stack_resources', wantlist=True) }}"
|
|
@ -1,35 +0,0 @@
|
|||
---
|
||||
- hosts: Controller
|
||||
vars:
|
||||
metadata:
|
||||
name: Validate stonith devices
|
||||
description: >
|
||||
Verify that stonith devices are configured for your OpenStack Platform HA cluster.
|
||||
We don't configure stonith device with TripleO Installer. Because the hardware
|
||||
configuration may be differ in each environment and requires different fence agents.
|
||||
How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
|
||||
groups:
|
||||
- post-deployment
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Check if we are in HA cluster environment
|
||||
register: pcs_cluster_status
|
||||
command: pcs cluster status
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Get all currently configured stonith devices
|
||||
when: "pcs_cluster_status.rc == 0"
|
||||
register: stonith_devices
|
||||
command: "pcs stonith"
|
||||
changed_when: false
|
||||
|
||||
- name: Verify the stonith device are configured
|
||||
fail:
|
||||
msg: "Stonith devices are not configured."
|
||||
when: >
|
||||
pcs_cluster_status.rc == 0
|
||||
and
|
||||
'NO stonith devices configured' in stonith_devices.stdout
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Compare switch port VLANs to VLANs in nic config
|
||||
description: >
|
||||
LLDP data received during introspection contains the configured VLANs
|
||||
for each switch port attached to the nodes interfaces. Compare the
|
||||
VLAN IDs set on the switch port to those configured in nic config
|
||||
files. Since the mapping of roles to nodes isn't known prior to
|
||||
deployment, this check can only check VLANs across all switch ports,
|
||||
not on a particular switch port.
|
||||
groups:
|
||||
- pre-deployment
|
||||
network_environment_path: environments/network-environment.yaml
|
||||
tasks:
|
||||
- name: Get Ironic Inspector swift auth_url
|
||||
become: true
|
||||
ini: path=/var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf section=swift key=auth_url
|
||||
register: auth_url
|
||||
- name: Get Ironic Inspector swift password
|
||||
become: true
|
||||
ini: path=/var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf section=swift key=password
|
||||
register: password
|
||||
- name: Check that switch vlans are present if used in nic-config files
|
||||
switch_vlans:
|
||||
path: "{{ network_environment_path }}"
|
||||
template_files: "{{ lookup('tht') }}"
|
||||
introspection_data: "{{ lookup('introspection_data',
|
||||
auth_url=auth_url.value, password=password.value) }}"
|
|
@ -1,29 +0,0 @@
|
|||
- name: gather docker facts
|
||||
docker_facts:
|
||||
container_filter: status=running
|
||||
become: yes
|
||||
|
||||
- name: compare running containers to list
|
||||
set_fact:
|
||||
container_difference: "{{ running_containers | difference(docker.containers_filtered | map(attribute='name') | list) }}"
|
||||
|
||||
- block:
|
||||
- name: check appropriate running containers against list - if FAILED, check next task
|
||||
assert:
|
||||
that: "{{ container_difference | length == 0 }}"
|
||||
rescue:
|
||||
- name: following containers found to be NOT running
|
||||
debug:
|
||||
var: container_difference
|
||||
|
||||
- name: check appropriate ports are listening
|
||||
wait_for:
|
||||
host: "{{ listening_ip }}"
|
||||
port: "{{ item.port | default(item) }}"
|
||||
search_regex: "{{ item.search_regex | default(omit) }}"
|
||||
state: started # Port should be open
|
||||
delay: 0 # No wait before first check (sec)
|
||||
timeout: 3 # Stop checking after timeout (sec)
|
||||
ignore_errors: yes
|
||||
loop: "{{ open_ports }}"
|
||||
when: ctlplane_ip is defined
|
|
@ -1,36 +0,0 @@
|
|||
- name: Set a constant defining number of Bytes in 1 GB
|
||||
set_fact:
|
||||
const_bytes_in_gb: 1073741824
|
||||
|
||||
- name: Stat volume directories
|
||||
stat:
|
||||
path: "{{ item.mount }}"
|
||||
with_items: "{{ volumes }}"
|
||||
register: volumes_stat
|
||||
|
||||
- name: Initialize existing_volumes to an empty array
|
||||
set_fact:
|
||||
existing_volumes="{{ [] }}"
|
||||
|
||||
- name: Filter out non-existing volumes
|
||||
set_fact:
|
||||
existing_volumes: "{{ existing_volumes +[item.item] }}"
|
||||
with_items: "{{ volumes_stat.results }}"
|
||||
when: item.stat.exists
|
||||
loop_control:
|
||||
label: "{{ item.item.mount }}"
|
||||
|
||||
- name: Loop on volumes and gather available space
|
||||
shell: df -B1 {{ item.mount }} --output=avail | sed 1d
|
||||
register: volume_size
|
||||
with_items: "{{ existing_volumes }}"
|
||||
changed_when: False
|
||||
|
||||
- name: Fail if any of the volumes are too small
|
||||
fail:
|
||||
msg: "Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G - current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G"
|
||||
when: >
|
||||
item.stdout|int / const_bytes_in_gb|int < item.item.min_size|int
|
||||
with_items: "{{ volume_size.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.mount }}"
|
|
@ -1,17 +0,0 @@
|
|||
While checking the hardware requirements for an OpenShift deployment, the following problems were detected:
|
||||
|
||||
{% if not matching_image %}
|
||||
- No image with name "centos" or "rhel" could be found.
|
||||
{% endif %}
|
||||
{% if not matching_flavors_testing %}
|
||||
- There is no flavor available that meets the hardware requirements for a test setup.
|
||||
{% endif %}
|
||||
{% if not matching_flavors_prod %}
|
||||
- There is no flavor available that meets the hardware requirements for a production setup.
|
||||
{% endif %}
|
||||
{% if not resource_reqs_testing %}
|
||||
- The resources necessary for a default test setup are not available on the hypervisors.
|
||||
{% endif %}
|
||||
{% if not resource_reqs_prod %}
|
||||
- The resources necessary for a default production setup are not available on the hypervisors.
|
||||
{% endif %}
|
|
@ -1,9 +0,0 @@
|
|||
The following nodes could not be reached ({{ nodes|length}} nodes):
|
||||
|
||||
{% for node in nodes %}
|
||||
* {{ node.name }}
|
||||
UUID: {{ node.uuid }}
|
||||
Instance: {{ node.instance_uuid }}
|
||||
Last Error: {{ node.last_error }}
|
||||
Power State: {{ node.power_state }}
|
||||
{% endfor %}
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify undercloud fits the CPU core requirements
|
||||
description: >
|
||||
Make sure that the undercloud has enough CPU cores.
|
||||
|
||||
https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux_OpenStack_Platform/7/html/Director_Installation_and_Usage/sect-Undercloud_Requirements.html
|
||||
groups:
|
||||
- prep
|
||||
- pre-introspection
|
||||
min_undercloud_cpu_count: 8
|
||||
tasks:
|
||||
- name: Verify the number of CPU cores
|
||||
fail: msg="There are {{ ansible_processor_vcpus }} cores in the system, but there should be at least {{ min_undercloud_cpu_count }}"
|
||||
failed_when: "ansible_processor_vcpus|int < min_undercloud_cpu_count|int"
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Undercloud Services Debug Check
|
||||
description: >
|
||||
The undercloud's openstack services should _not_ have debug enabled.
|
||||
This will check if debug is enabled on undercloud services.
|
||||
If debug is enabled, the root filesystem can fill up quickly, and
|
||||
is not a good thing.
|
||||
groups:
|
||||
- pre-deployment
|
||||
debug_check: "True"
|
||||
tasks:
|
||||
- name: Check the services for debug flag
|
||||
become: true
|
||||
ini: path={{ item }} section=DEFAULT key=debug ignore_missing_file=True
|
||||
register: config_result
|
||||
with_items:
|
||||
- /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf
|
||||
- /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf
|
||||
- /var/lib/config-data/puppet-generated/ceilometer/etc/ceilometer/ceilometer.conf
|
||||
- /var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf
|
||||
- /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf
|
||||
failed_when: "debug_check|bool == config_result.value|bool"
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify undercloud fits the disk space requirements to perform an upgrade
|
||||
description: >
|
||||
Make sure that the root partition on the undercloud node has enough
|
||||
free space before starting an upgrade
|
||||
|
||||
http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements
|
||||
groups:
|
||||
- pre-upgrade
|
||||
volumes:
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/config-data, min_size: 3}
|
||||
- {mount: /var, min_size: 16}
|
||||
- {mount: /, min_size: 20}
|
||||
|
||||
tasks:
|
||||
- include_tasks: tasks/disk_space.yaml
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify undercloud fits the disk space requirements
|
||||
description: >
|
||||
Make sure that the root partition on the undercloud node has enough
|
||||
free space.
|
||||
|
||||
http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements
|
||||
groups:
|
||||
- prep
|
||||
- pre-introspection
|
||||
volumes:
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/config-data, min_size: 3}
|
||||
- {mount: /var/log, min_size: 3}
|
||||
- {mount: /usr, min_size: 5}
|
||||
- {mount: /var, min_size: 20}
|
||||
- {mount: /, min_size: 25}
|
||||
|
||||
tasks:
|
||||
- include_tasks: tasks/disk_space.yaml
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify heat-manage purge_deleted is enabled in crontab
|
||||
description: >
|
||||
Without a purge_deleted crontab enabled, the
|
||||
heat database can grow very large. This validation checks that
|
||||
the purge_deleted crontab has been set up.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
cron_check: "heat-manage purge_deleted"
|
||||
tasks:
|
||||
- name: Get heat crontab
|
||||
become: true
|
||||
shell: 'docker exec heat_api_cron crontab -l -u heat |grep -v "^#"'
|
||||
register: cron_result
|
||||
changed_when: False
|
||||
- name: Check heat crontab
|
||||
fail: msg="heat-manage purge_deleted does not appear to be enabled via cron. You should add '<desired interval > {{ cron_check }}' to the heat users crontab."
|
||||
failed_when: "cron_check not in cron_result.stdout"
|
|
@ -1,66 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Undercloud Neutron Sanity Check
|
||||
description: >
|
||||
Run `neutron-sanity-check` on the undercloud node to find out
|
||||
potential issues with Neutron's configuration.
|
||||
|
||||
The tool expects all the configuration files that are passed
|
||||
to the Neutron services.
|
||||
|
||||
groups:
|
||||
- pre-introspection
|
||||
|
||||
# The list of Neutron configuration files and directories that
|
||||
# will be passed to the Neutron services. The order is important
|
||||
# here: the values in later files take precedence.
|
||||
configs:
|
||||
- /etc/neutron/neutron.conf
|
||||
- /usr/share/neutron/neutron-dist.conf
|
||||
- /etc/neutron/metadata_agent.ini
|
||||
- /etc/neutron/dhcp_agent.ini
|
||||
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
|
||||
- /etc/neutron/fwaas_driver.ini
|
||||
- /etc/neutron/l3_agent.ini
|
||||
- /usr/share/neutron/neutron-lbaas-dist.conf
|
||||
- /etc/neutron/lbaas_agent.ini
|
||||
|
||||
tasks:
|
||||
- name: Run neutron-sanity-check
|
||||
command: "docker exec -u root neutron_ovs_agent /bin/bash -c 'neutron-sanity-check --config-file {{ item }}'"
|
||||
with_items: "{{ configs }}"
|
||||
become: true
|
||||
register: nsc_return
|
||||
ignore_errors: true
|
||||
changed_when: False
|
||||
|
||||
- name: Detect errors
|
||||
set_fact:
|
||||
has_errors: "{{ nsc_return.results
|
||||
| sum(attribute='stderr_lines', start=[])
|
||||
| select('search', '(ERROR)')
|
||||
| list | length | int > 0 }}"
|
||||
|
||||
- name: Detect warnings
|
||||
set_fact:
|
||||
has_warnings: "{{ nsc_return.results
|
||||
| sum(attribute='stderr_lines', start=[])
|
||||
| select('search', '(WARNING)')
|
||||
| list | length | int > 0 }}"
|
||||
|
||||
- name: Create output
|
||||
set_fact:
|
||||
output_msg: "{{ nsc_return.results
|
||||
| sum(attribute='stderr_lines', start=[])
|
||||
| select('search', '(ERROR|WARNING)')
|
||||
| list }}"
|
||||
|
||||
- name: Output warning
|
||||
warn: msg="{{ output_msg | join('\n') }}"
|
||||
when: has_warnings and not has_errors
|
||||
|
||||
- name: Fail
|
||||
fail: msg="{{ output_msg | join('\n') }}"
|
||||
when: has_errors
|
|
@ -1,49 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Check the number of OpenStack processes on undercloud
|
||||
description: >
|
||||
The default settings for OpenStack is to run one process (heat-engine,
|
||||
keystone, etc.) per CPU core. On a machine with a lot of cores this is
|
||||
both unnecessary and can consume a significant amount of RAM, leading
|
||||
to crashes due to OOMKiller.
|
||||
groups:
|
||||
- pre-deployment
|
||||
max_process_count: 8
|
||||
tasks:
|
||||
- name: Collect the number of running processes per OpenStack service
|
||||
command: 'docker exec {{ item.container }} pgrep -f -c {{ item.proc }}'
|
||||
become: true
|
||||
ignore_errors: yes
|
||||
register: "process_count"
|
||||
changed_when: False
|
||||
loop:
|
||||
- {container: "heat_engine", proc: "heat-engine"}
|
||||
- {container: "ironic_inspector", proc: "ironic-inspector"}
|
||||
- {container: "ironic_conductor", proc: "ironic-conductor"}
|
||||
- {container: "nova_api", proc: "nova_api"}
|
||||
- {container: "nova_scheduler", proc: "nova-scheduler"}
|
||||
- {container: "nova_conductor", proc: "nova-conductor"}
|
||||
- {container: "nova_compute", proc: "nova-compute"}
|
||||
- {container: "glance_api", proc: "glance-api"}
|
||||
- {container: "swift_proxy", proc: "swift-proxy-server"}
|
||||
- {container: "swift_object_server", proc: "swift-object-server"}
|
||||
- {container: "swift_container_server", proc: "swift-container-server"}
|
||||
- {container: "zaqar", proc: "zaqar"}
|
||||
- {container: "zaqar_websocket", proc: "zaqar-server"}
|
||||
- {container: "mistral_api", proc: "mistral-server"}
|
||||
- {container: "mistral_engine", proc: "mistral-server"}
|
||||
- {container: "mistral_executor", proc: "mistral-server"}
|
||||
|
||||
- name: Create warning messages
|
||||
command: echo "There are {{ item.stdout }} {{ item.item }} processes running. Having more than {{ max_process_count }} risks running out of memory."
|
||||
register: process_warnings
|
||||
with_items: "{{ process_count.results }}"
|
||||
when: "item.stdout|int > max_process_count"
|
||||
|
||||
- name: Output warning message
|
||||
warn: msg={{ warning_msg }}
|
||||
when: "warning_msg|length > 0"
|
||||
vars:
|
||||
warning_msg: "{{ process_warnings.results|selectattr('changed')|map(attribute='stdout')|join('\n') }}"
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify the undercloud fits the RAM requirements
|
||||
description: >
|
||||
Verify that the undercloud has enough RAM.
|
||||
|
||||
https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/14/html/director_installation_and_usage/planning-your-undercloud#determining-environment-scale
|
||||
groups:
|
||||
- prep
|
||||
- pre-introspection
|
||||
- pre-upgrade
|
||||
min_undercloud_ram_gb: 24
|
||||
tasks:
|
||||
- name: Verify the RAM requirements
|
||||
fail: msg="The RAM on the undercloud node is {{ ansible_memtotal_mb }} MB, the minimal recommended value is {{ min_undercloud_ram_gb|int * 1024 }} MB."
|
||||
# NOTE(shadower): converting GB to MB
|
||||
failed_when: "(ansible_memtotal_mb) < min_undercloud_ram_gb|int * 1024"
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify undercloud services state before running update or upgrade
|
||||
description: >
|
||||
Check undercloud status before running a stack update - especially minor update and major upgrade.
|
||||
groups:
|
||||
- post-upgrade
|
||||
- pre-upgrade
|
||||
tasks:
|
||||
- name: Check Services are running
|
||||
command: "/usr/bin/systemctl show {{ item }} --property ActiveState"
|
||||
become: true
|
||||
with_items: "{{ undercloud_service_list }}"
|
||||
register: "check_services"
|
||||
changed_when: False
|
||||
ignore_errors: true
|
||||
- name: Fail if services were not running
|
||||
fail: msg="One of the undercloud services was not active. Please check {{ item.item }} first and then confirm the status of undercloud services in general before attempting to update or upgrade the environment."
|
||||
failed_when: "item.stdout != 'ActiveState=active'"
|
||||
with_items: "{{ check_services.results }}"
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
- hosts: undercloud
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify token_flush is enabled in keystone users crontab
|
||||
description: >
|
||||
Without a token_flush crontab enabled for the keystone user, the
|
||||
keystone database can grow very large. This validation checks that
|
||||
the keystone token_flush crontab has been set up.
|
||||
groups:
|
||||
- pre-introspection
|
||||
cron_check: "keystone-manage token_flush"
|
||||
tasks:
|
||||
- name: Get keystone crontab
|
||||
become: true
|
||||
shell: 'docker exec keystone_cron crontab -l -u keystone |grep -v "^#"'
|
||||
register: cron_result
|
||||
changed_when: False
|
||||
- name: Check keystone crontab
|
||||
fail:
|
||||
msg: >-
|
||||
keystone token_flush does not appear to be enabled via cron. You should
|
||||
add '<desired interval > {{ cron_check }}' to the keystone users
|
||||
crontab.
|
||||
failed_when: "cron_check not in cron_result.stdout"
|
Loading…
Reference in New Issue