Remove playbookconfig from StarlingX config repo
This commit is part of multi-commit move of Ansible playbooks to the new repo that hosts playbooks and artifacts related to deployment of StarlingX. Tests: Install and run bootstrap in simplex and standard systems. Story: 2004695 Task: 33567 Depends-On: Iddbe2cb8105ede96d29e2a1d4bb29031a36f327f Change-Id: I60b9bce3f3d23a2316b3a24c48006b71ff3ecd52 Signed-off-by: Tee Ngo <tee.ngo@windriver.com>
This commit is contained in:
parent
feba4d21cb
commit
eb47ee4585
@ -32,15 +32,6 @@ config-gate-worker
|
||||
# puppet-manifests
|
||||
puppet-manifests
|
||||
|
||||
# ansible
|
||||
sshpass
|
||||
python2-ptyprocess
|
||||
python2-pexpect
|
||||
ansible
|
||||
|
||||
# playbookconfig
|
||||
playbookconfig
|
||||
|
||||
# Platform helm charts
|
||||
stx-platform-helm
|
||||
|
||||
|
@ -18,6 +18,5 @@ puppet-modules-wrs/puppet-dcorch
|
||||
puppet-modules-wrs/puppet-dcmanager
|
||||
puppet-modules-wrs/puppet-smapi
|
||||
puppet-modules-wrs/puppet-fm
|
||||
playbookconfig
|
||||
puppet-modules-wrs/puppet-dcdbsync
|
||||
pm-qos-mgr
|
||||
|
@ -1,12 +0,0 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: playbookconfig
|
||||
Version: 1.0
|
||||
Summary: Ansible Playbooks for StarlingX Configurations
|
||||
Home-page: https://wiki.openstack.org/wiki/StarlingX
|
||||
Author: Windriver
|
||||
Author-email: starlingx-discuss@lists.starlingx.io
|
||||
License: Apache-2.0
|
||||
|
||||
Description: This package contains playbooks used for StarlingX Configurations
|
||||
|
||||
Platform: UNKNOWN
|
@ -1,2 +0,0 @@
|
||||
SRC_DIR="playbookconfig"
|
||||
TIS_PATCH_VER=2
|
@ -1,45 +0,0 @@
|
||||
Name: playbookconfig
|
||||
Version: 1.0
|
||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||
Summary: Ansible Playbooks for StarlingX Configurations
|
||||
|
||||
Group: base
|
||||
License: Apache-2.0
|
||||
URL: unknown
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
Requires: python
|
||||
Requires: python-netaddr
|
||||
Requires: sshpass
|
||||
Requires: python2-ptyprocess
|
||||
Requires: python2-pexpect
|
||||
Requires: ansible
|
||||
|
||||
%description
|
||||
This package contains playbooks used for configuring StarlingX.
|
||||
|
||||
%define local_stx_ansible_dir %{_datadir}/ansible/stx-ansible
|
||||
%define local_etc_ansible /etc/ansible
|
||||
%define debug_package %{nil}
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
|
||||
%build
|
||||
|
||||
%install
|
||||
make install DESTDIR=%{buildroot}%{local_stx_ansible_dir}
|
||||
|
||||
%post
|
||||
cp %{local_stx_ansible_dir}/playbooks/bootstrap/ansible.cfg %{local_etc_ansible}
|
||||
cp %{local_stx_ansible_dir}/playbooks/bootstrap/hosts %{local_etc_ansible}
|
||||
chmod 644 %{local_etc_ansible}/ansible.cfg
|
||||
chmod 644 %{local_etc_ansible}/hosts
|
||||
|
||||
%clean
|
||||
rm -rf $RPM_BUILD_ROOT
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc LICENSE
|
||||
%{local_stx_ansible_dir}/*
|
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,9 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
DESTDIR ?= /usr/share/ansible/stx-ansible
|
||||
|
||||
install:
|
||||
install -d -m 0755 $(DESTDIR)/playbooks
|
||||
cp -R playbooks/ $(DESTDIR)/
|
@ -1,497 +0,0 @@
|
||||
# config file for ansible -- https://ansible.com/
|
||||
# ===============================================
|
||||
|
||||
# nearly all parameters can be overridden in ansible-playbook
|
||||
# or with command line flags. ansible will read ANSIBLE_CONFIG,
|
||||
# ansible.cfg in the current working directory, .ansible.cfg in
|
||||
# the home directory or /etc/ansible/ansible.cfg, whichever it
|
||||
# finds first
|
||||
|
||||
[defaults]
|
||||
|
||||
# some basic default values...
|
||||
|
||||
#inventory = /etc/ansible/hosts
|
||||
#library = /usr/share/my_modules/
|
||||
#module_utils = /usr/share/my_module_utils/
|
||||
remote_tmp = /tmp/.ansible-${USER}/tmp
|
||||
#local_tmp = ~/.ansible/tmp
|
||||
#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
|
||||
#forks = 5
|
||||
#poll_interval = 15
|
||||
#sudo_user = root
|
||||
#ask_sudo_pass = True
|
||||
#ask_pass = True
|
||||
#transport = smart
|
||||
#remote_port = 22
|
||||
#module_lang = C
|
||||
#module_set_locale = False
|
||||
|
||||
# plays will gather facts by default, which contain information about
|
||||
# the remote system.
|
||||
#
|
||||
# smart - gather by default, but don't regather if already gathered
|
||||
# implicit - gather by default, turn off with gather_facts: False
|
||||
# explicit - do not gather by default, must say gather_facts: True
|
||||
#gathering = implicit
|
||||
|
||||
# This only affects the gathering done by a play's gather_facts directive,
|
||||
# by default gathering retrieves all facts subsets
|
||||
# all - gather all subsets
|
||||
# network - gather min and network facts
|
||||
# hardware - gather hardware facts (longest facts to retrieve)
|
||||
# virtual - gather min and virtual facts
|
||||
# facter - import facts from facter
|
||||
# ohai - import facts from ohai
|
||||
# You can combine them using comma (ex: network,virtual)
|
||||
# You can negate them using ! (ex: !hardware,!facter,!ohai)
|
||||
# A minimal set of facts is always gathered.
|
||||
#gather_subset = all
|
||||
|
||||
# some hardware related facts are collected
|
||||
# with a maximum timeout of 10 seconds. This
|
||||
# option lets you increase or decrease that
|
||||
# timeout to something more suitable for the
|
||||
# environment.
|
||||
# gather_timeout = 10
|
||||
|
||||
# Ansible facts are available inside the ansible_facts.* dictionary
|
||||
# namespace. This setting maintains the behaviour which was the default prior
|
||||
# to 2.5, duplicating these variables into the main namespace, each with a
|
||||
# prefix of 'ansible_'.
|
||||
# This variable is set to True by default for backwards compatibility. It
|
||||
# will be changed to a default of 'False' in a future release.
|
||||
# ansible_facts.
|
||||
# inject_facts_as_vars = True
|
||||
|
||||
# additional paths to search for roles in, colon separated
|
||||
#roles_path = /etc/ansible/roles
|
||||
|
||||
# uncomment this to disable SSH key host checking
|
||||
host_key_checking = False
|
||||
|
||||
# change the default callback, you can only have one 'stdout' type enabled at a time.
|
||||
stdout_callback = skippy
|
||||
|
||||
|
||||
## Ansible ships with some plugins that require whitelisting,
|
||||
## this is done to avoid running all of a type by default.
|
||||
## These setting lists those that you want enabled for your system.
|
||||
## Custom plugins should not need this unless plugin author specifies it.
|
||||
|
||||
# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
|
||||
#callback_whitelist = timer, mail
|
||||
|
||||
# Determine whether includes in tasks and handlers are "static" by
|
||||
# default. As of 2.0, includes are dynamic by default. Setting these
|
||||
# values to True will make includes behave more like they did in the
|
||||
# 1.x versions.
|
||||
#task_includes_static = False
|
||||
#handler_includes_static = False
|
||||
|
||||
# Controls if a missing handler for a notification event is an error or a warning
|
||||
#error_on_missing_handler = True
|
||||
|
||||
# change this for alternative sudo implementations
|
||||
#sudo_exe = sudo
|
||||
|
||||
# What flags to pass to sudo
|
||||
# WARNING: leaving out the defaults might create unexpected behaviours
|
||||
#sudo_flags = -H -S -n
|
||||
|
||||
# SSH timeout
|
||||
#timeout = 10
|
||||
|
||||
# default user to use for playbooks if user is not specified
|
||||
# (/usr/bin/ansible will use current user as default)
|
||||
#remote_user = root
|
||||
|
||||
# logging is off by default unless this path is defined
|
||||
# if so defined, consider logrotate
|
||||
#log_path = /var/log/ansible.log
|
||||
log_path = ~/ansible.log
|
||||
|
||||
# default module name for /usr/bin/ansible
|
||||
#module_name = command
|
||||
|
||||
# use this shell for commands executed under sudo
|
||||
# you may need to change this to bin/bash in rare instances
|
||||
# if sudo is constrained
|
||||
#executable = /bin/sh
|
||||
|
||||
# if inventory variables overlap, does the higher precedence one win
|
||||
# or are hash values merged together? The default is 'replace' but
|
||||
# this can also be set to 'merge'.
|
||||
#hash_behaviour = replace
|
||||
|
||||
# by default, variables from roles will be visible in the global variable
|
||||
# scope. To prevent this, the following option can be enabled, and only
|
||||
# tasks and handlers within the role will see the variables there
|
||||
#private_role_vars = yes
|
||||
|
||||
# list any Jinja2 extensions to enable here:
|
||||
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
|
||||
|
||||
# if set, always use this private key file for authentication, same as
|
||||
# if passing --private-key to ansible or ansible-playbook
|
||||
#private_key_file = /path/to/file
|
||||
|
||||
# If set, configures the path to the Vault password file as an alternative to
|
||||
# specifying --vault-password-file on the command line.
|
||||
#vault_password_file = /path/to/vault_password_file
|
||||
|
||||
# format of string {{ ansible_managed }} available within Jinja2
|
||||
# templates indicates to users editing templates files will be replaced.
|
||||
# replacing {file}, {host} and {uid} and strftime codes with proper values.
|
||||
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
|
||||
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
|
||||
# in some situations so the default is a static string:
|
||||
#ansible_managed = Ansible managed
|
||||
|
||||
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
|
||||
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
|
||||
# messages. NOTE: the task header will still be shown regardless of whether or not the
|
||||
# task is skipped.
|
||||
#display_skipped_hosts = True
|
||||
|
||||
# by default, if a task in a playbook does not include a name: field then
|
||||
# ansible-playbook will construct a header that includes the task's action but
|
||||
# not the task's args. This is a security feature because ansible cannot know
|
||||
# if the *module* considers an argument to be no_log at the time that the
|
||||
# header is printed. If your environment doesn't have a problem securing
|
||||
# stdout from ansible-playbook (or you have manually specified no_log in your
|
||||
# playbook on all of the tasks where you have secret information) then you can
|
||||
# safely set this to True to get more informative messages.
|
||||
#display_args_to_stdout = False
|
||||
|
||||
# by default (as of 1.3), Ansible will raise errors when attempting to dereference
|
||||
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
|
||||
# to revert the behavior to pre-1.3.
|
||||
#error_on_undefined_vars = False
|
||||
|
||||
# by default (as of 1.6), Ansible may display warnings based on the configuration of the
|
||||
# system running ansible itself. This may include warnings about 3rd party packages or
|
||||
# other conditions that should be resolved if possible.
|
||||
# to disable these warnings, set the following value to False:
|
||||
#system_warnings = True
|
||||
|
||||
# by default (as of 1.4), Ansible may display deprecation warnings for language
|
||||
# features that should no longer be used and will be removed in future versions.
|
||||
# to disable these warnings, set the following value to False:
|
||||
#deprecation_warnings = True
|
||||
|
||||
# (as of 1.8), Ansible can optionally warn when usage of the shell and
|
||||
# command module appear to be simplified by using a default Ansible module
|
||||
# instead. These warnings can be silenced by adjusting the following
|
||||
# setting or adding warn=yes or warn=no to the end of the command line
|
||||
# parameter string. This will for example suggest using the git module
|
||||
# instead of shelling out to the git command.
|
||||
# command_warnings = False
|
||||
|
||||
|
||||
# set plugin path directories here, separate with colons
|
||||
#action_plugins = /usr/share/ansible/plugins/action
|
||||
#cache_plugins = /usr/share/ansible/plugins/cache
|
||||
#callback_plugins = /usr/share/ansible/plugins/callback
|
||||
#connection_plugins = /usr/share/ansible/plugins/connection
|
||||
#lookup_plugins = /usr/share/ansible/plugins/lookup
|
||||
#inventory_plugins = /usr/share/ansible/plugins/inventory
|
||||
#vars_plugins = /usr/share/ansible/plugins/vars
|
||||
#filter_plugins = /usr/share/ansible/plugins/filter
|
||||
#test_plugins = /usr/share/ansible/plugins/test
|
||||
#terminal_plugins = /usr/share/ansible/plugins/terminal
|
||||
#strategy_plugins = /usr/share/ansible/plugins/strategy
|
||||
|
||||
|
||||
# by default, ansible will use the 'linear' strategy but you may want to try
|
||||
# another one
|
||||
#strategy = free
|
||||
|
||||
# by default callbacks are not loaded for /bin/ansible, enable this if you
|
||||
# want, for example, a notification or logging callback to also apply to
|
||||
# /bin/ansible runs
|
||||
#bin_ansible_callbacks = False
|
||||
|
||||
|
||||
# don't like cows? that's unfortunate.
|
||||
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
|
||||
#nocows = 1
|
||||
|
||||
# set which cowsay stencil you'd like to use by default. When set to 'random',
|
||||
# a random stencil will be selected for each task. The selection will be filtered
|
||||
# against the `cow_whitelist` option below.
|
||||
#cow_selection = default
|
||||
#cow_selection = random
|
||||
|
||||
# when using the 'random' option for cowsay, stencils will be restricted to this list.
|
||||
# it should be formatted as a comma-separated list with no spaces between names.
|
||||
# NOTE: line continuations here are for formatting purposes only, as the INI parser
|
||||
# in python does not support them.
|
||||
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
|
||||
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
|
||||
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
|
||||
|
||||
# don't like colors either?
|
||||
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
|
||||
#nocolor = 1
|
||||
|
||||
# if set to a persistent type (not 'memory', for example 'redis') fact values
|
||||
# from previous runs in Ansible will be stored. This may be useful when
|
||||
# wanting to use, for example, IP information from one group of servers
|
||||
# without having to talk to them in the same playbook run to get their
|
||||
# current IP information.
|
||||
#fact_caching = memory
|
||||
|
||||
#This option tells Ansible where to cache facts. The value is plugin dependent.
|
||||
#For the jsonfile plugin, it should be a path to a local directory.
|
||||
#For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
|
||||
|
||||
#fact_caching_connection=/tmp
|
||||
|
||||
|
||||
|
||||
# retry files
|
||||
# When a playbook fails by default a .retry file will be created in ~/
|
||||
# You can disable this feature by setting retry_files_enabled to False
|
||||
# and you can change the location of the files by setting retry_files_save_path
|
||||
|
||||
retry_files_enabled = False
|
||||
#retry_files_save_path = ~/.ansible-retry
|
||||
|
||||
# squash actions
|
||||
# Ansible can optimise actions that call modules with list parameters
|
||||
# when looping. Instead of calling the module once per with_ item, the
|
||||
# module is called once with all items at once. Currently this only works
|
||||
# under limited circumstances, and only with parameters named 'name'.
|
||||
#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper
|
||||
|
||||
# prevents logging of task data, off by default
|
||||
#no_log = False
|
||||
|
||||
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
|
||||
#no_target_syslog = False
|
||||
|
||||
# controls whether Ansible will raise an error or warning if a task has no
|
||||
# choice but to create world readable temporary files to execute a module on
|
||||
# the remote machine. This option is False by default for security. Users may
|
||||
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
||||
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
||||
# for more secure ways to fix this than enabling this option.
|
||||
#allow_world_readable_tmpfiles = False
|
||||
|
||||
# controls the compression level of variables sent to
|
||||
# worker processes. At the default of 0, no compression
|
||||
# is used. This value must be an integer from 0 to 9.
|
||||
#var_compression_level = 9
|
||||
|
||||
# controls what compression method is used for new-style ansible modules when
|
||||
# they are sent to the remote system. The compression types depend on having
|
||||
# support compiled into both the controller's python and the client's python.
|
||||
# The names should match with the python Zipfile compression types:
|
||||
# * ZIP_STORED (no compression. available everywhere)
|
||||
# * ZIP_DEFLATED (uses zlib, the default)
|
||||
# These values may be set per host via the ansible_module_compression inventory
|
||||
# variable
|
||||
#module_compression = 'ZIP_DEFLATED'
|
||||
|
||||
# This controls the cutoff point (in bytes) on --diff for files
|
||||
# set to 0 for unlimited (RAM may suffer!).
|
||||
#max_diff_size = 1048576
|
||||
|
||||
# This controls how ansible handles multiple --tags and --skip-tags arguments
|
||||
# on the CLI. If this is True then multiple arguments are merged together. If
|
||||
# it is False, then the last specified argument is used and the others are ignored.
|
||||
# This option will be removed in 2.8.
|
||||
#merge_multiple_cli_flags = True
|
||||
|
||||
# Controls showing custom stats at the end, off by default
|
||||
#show_custom_stats = True
|
||||
|
||||
# Controls which files to ignore when using a directory as inventory with
|
||||
# possibly multiple sources (both static and dynamic)
|
||||
#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
|
||||
|
||||
# This family of modules use an alternative execution path optimized for network appliances
|
||||
# only update this setting if you know how this works, otherwise it can break module execution
|
||||
#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
|
||||
|
||||
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
|
||||
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
|
||||
# jinja2 templating language which will be run through the templating engine.
|
||||
# ENABLING THIS COULD BE A SECURITY RISK
|
||||
#allow_unsafe_lookups = False
|
||||
|
||||
# set default errors for all plays
|
||||
#any_errors_fatal = False
|
||||
|
||||
[inventory]
|
||||
# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini', 'auto'
|
||||
#enable_plugins = host_list, virtualbox, yaml, constructed
|
||||
|
||||
# ignore these extensions when parsing a directory as inventory source
|
||||
#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
|
||||
|
||||
# ignore files matching these patterns when parsing a directory as inventory source
|
||||
#ignore_patterns=
|
||||
|
||||
# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
|
||||
#unparsed_is_failed=False
|
||||
|
||||
[privilege_escalation]
|
||||
#become=True
|
||||
#become_method=sudo
|
||||
#become_user=root
|
||||
#become_ask_pass=False
|
||||
|
||||
[paramiko_connection]
|
||||
|
||||
# uncomment this line to cause the paramiko connection plugin to not record new host
|
||||
# keys encountered. Increases performance on new host additions. Setting works independently of the
|
||||
# host key checking setting above.
|
||||
#record_host_keys=False
|
||||
|
||||
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
|
||||
# line to disable this behaviour.
|
||||
#pty=False
|
||||
|
||||
# paramiko will default to looking for SSH keys initially when trying to
|
||||
# authenticate to remote devices. This is a problem for some network devices
|
||||
# that close the connection after a key failure. Uncomment this line to
|
||||
# disable the Paramiko look for keys function
|
||||
#look_for_keys = False
|
||||
|
||||
# When using persistent connections with Paramiko, the connection runs in a
|
||||
# background process. If the host doesn't already have a valid SSH key, by
|
||||
# default Ansible will prompt to add the host key. This will cause connections
|
||||
# running in background processes to fail. Uncomment this line to have
|
||||
# Paramiko automatically add host keys.
|
||||
#host_key_auto_add = True
|
||||
|
||||
[ssh_connection]
|
||||
|
||||
# ssh arguments to use
|
||||
# Leaving off ControlPersist will result in poor performance, so use
|
||||
# paramiko on older platforms rather than removing it, -C controls compression use
|
||||
#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
|
||||
|
||||
# The base directory for the ControlPath sockets.
|
||||
# This is the "%(directory)s" in the control_path option
|
||||
#
|
||||
# Example:
|
||||
# control_path_dir = /tmp/.ansible/cp
|
||||
#control_path_dir = ~/.ansible/cp
|
||||
|
||||
# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
|
||||
# port and username (empty string in the config). The hash mitigates a common problem users
|
||||
# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
|
||||
# In those cases, a "too long for Unix domain socket" ssh error would occur.
|
||||
#
|
||||
# Example:
|
||||
# control_path = %(directory)s/%%h-%%r
|
||||
#control_path =
|
||||
|
||||
# Enabling pipelining reduces the number of SSH operations required to
|
||||
# execute a module on the remote server. This can result in a significant
|
||||
# performance improvement when enabled, however when using "sudo:" you must
|
||||
# first disable 'requiretty' in /etc/sudoers
|
||||
#
|
||||
# By default, this option is disabled to preserve compatibility with
|
||||
# sudoers configurations that have requiretty (the default on many distros).
|
||||
#
|
||||
pipelining = True
|
||||
|
||||
# Control the mechanism for transferring files (old)
|
||||
# * smart = try sftp and then try scp [default]
|
||||
# * True = use scp only
|
||||
# * False = use sftp only
|
||||
#scp_if_ssh = smart
|
||||
|
||||
# Control the mechanism for transferring files (new)
|
||||
# If set, this will override the scp_if_ssh option
|
||||
# * sftp = use sftp to transfer files
|
||||
# * scp = use scp to transfer files
|
||||
# * piped = use 'dd' over SSH to transfer files
|
||||
# * smart = try sftp, scp, and piped, in that order [default]
|
||||
#transfer_method = smart
|
||||
|
||||
# if False, sftp will not use batch mode to transfer files. This may cause some
|
||||
# types of file transfer failures impossible to catch however, and should
|
||||
# only be disabled if your sftp version has problems with batch mode
|
||||
#sftp_batch_mode = False
|
||||
|
||||
# The -tt argument is passed to ssh when pipelining is not enabled because sudo
|
||||
# requires a tty by default.
|
||||
#use_tty = True
|
||||
|
||||
# Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
|
||||
# For each retry attempt, there is an exponential backoff,
|
||||
# so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
|
||||
#retries = 3
|
||||
|
||||
[persistent_connection]
|
||||
|
||||
# Configures the persistent connection timeout value in seconds. This value is
|
||||
# how long the persistent connection will remain idle before it is destroyed.
|
||||
# If the connection doesn't receive a request before the timeout value
|
||||
# expires, the connection is shutdown. The default value is 30 seconds.
|
||||
#connect_timeout = 30
|
||||
|
||||
# Configures the persistent connection retry timeout. This value configures the
|
||||
# the retry timeout that ansible-connection will wait to connect
|
||||
# to the local domain socket. This value must be larger than the
|
||||
# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
|
||||
# The default value is 15 seconds.
|
||||
#connect_retry_timeout = 15
|
||||
|
||||
# The command timeout value defines the amount of time to wait for a command
|
||||
# or RPC call before timing out. The value for the command timeout must
|
||||
# be less than the value of the persistent connection idle timeout (connect_timeout)
|
||||
# The default value is 10 second.
|
||||
#command_timeout = 10
|
||||
|
||||
[accelerate]
|
||||
#accelerate_port = 5099
|
||||
#accelerate_timeout = 30
|
||||
#accelerate_connect_timeout = 5.0
|
||||
|
||||
# The daemon timeout is measured in minutes. This time is measured
|
||||
# from the last activity to the accelerate daemon.
|
||||
#accelerate_daemon_timeout = 30
|
||||
|
||||
# If set to yes, accelerate_multi_key will allow multiple
|
||||
# private keys to be uploaded to it, though each user must
|
||||
# have access to the system via SSH to add a new key. The default
|
||||
# is "no".
|
||||
#accelerate_multi_key = yes
|
||||
|
||||
[selinux]
|
||||
# file systems that require special treatment when dealing with security context
|
||||
# the default behaviour that copies the existing context or uses the user default
|
||||
# needs to be changed to use the file system dependent context.
|
||||
#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p
|
||||
|
||||
# Set this to yes to allow libvirt_lxc connections to work without SELinux.
|
||||
#libvirt_lxc_noseclabel = yes
|
||||
|
||||
[colors]
|
||||
#highlight = white
|
||||
#verbose = blue
|
||||
#warn = bright purple
|
||||
#error = red
|
||||
#debug = dark gray
|
||||
#deprecate = purple
|
||||
#skip = cyan
|
||||
#unreachable = red
|
||||
#ok = green
|
||||
#changed = yellow
|
||||
#diff_add = green
|
||||
#diff_remove = red
|
||||
#diff_lines = cyan
|
||||
|
||||
|
||||
[diff]
|
||||
# Always print diff when running ( same as always running with -D/--diff )
|
||||
# always = no
|
||||
|
||||
# Set how many context lines to show in diff
|
||||
# context = 3
|
@ -1,35 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
- hosts: bootstrap
|
||||
# If gathering facts is really necessary, run setup task AFTER host connectivity
|
||||
# check block in prepare-env role.
|
||||
gather_facts: no
|
||||
|
||||
vars_files:
|
||||
- host_vars/default.yml
|
||||
|
||||
pre_tasks:
|
||||
- include_vars: "{{ override_files_dir }}/secret"
|
||||
failed_when: false
|
||||
- include_vars: "{{ override_files_dir }}/site.yml"
|
||||
failed_when: false
|
||||
- include_vars: "{{ override_files_dir }}/{{ inventory_hostname }}.yml"
|
||||
failed_when: false
|
||||
|
||||
# Main play
|
||||
roles:
|
||||
- prepare-env
|
||||
- { role: validate-config, when: not skip_play }
|
||||
- { role: store-passwd, when: not skip_play and save_password }
|
||||
- { role: apply-bootstrap-manifest, when: not skip_play and not replayed }
|
||||
- { role: persist-config, when: not skip_play and save_config }
|
||||
- { role: bringup-essential-services, when: not skip_play and save_config }
|
||||
|
||||
vars:
|
||||
change_password: false
|
||||
skip_play: false
|
||||
replayed: false
|
@ -1,149 +0,0 @@
|
||||
---
|
||||
# SYSTEM PROPERTIES
|
||||
# =================
|
||||
system_mode: simplex
|
||||
timezone: UTC
|
||||
|
||||
# At least one DNS server is required and maximum 3 servers are allowed
|
||||
dns_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
|
||||
# NETWORK PROPERTIES
|
||||
# ==================
|
||||
#
|
||||
# Unless specified in the host override file, the start and end addresses of
|
||||
# each subnet are derived from the provided CIDR as follows:
|
||||
# For pxebook, management and cluster host subnets:
|
||||
# - start address: index 2 of CIDR
|
||||
# - end address: index -2 of CIDR
|
||||
# e.g. management_subnet (provided/default): 192.168.204.0/28
|
||||
# management_start_address (derived): 192.168.204.2
|
||||
# management_end_address (derived): 192.168.204.14
|
||||
#
|
||||
# For cluster pod, cluster service, oam and multicast subnets:
|
||||
# - start address: index 1 of CIDR
|
||||
# - end address: index -2 of CIDR
|
||||
# e.g. multicast_subnet (provided/default): 239.1.1.0/28
|
||||
# multicast_start_address (derived): 239.1.1.1
|
||||
# multicast_end_address (derived): 238.1.1.14
|
||||
#
|
||||
# Unless specified, the external_oam_node_0_address and external_oam_node_1_address
|
||||
# are derived from the external_oam_floating address as follows:
|
||||
# external_oam_node_0_address: next address after external_oam_floating_address
|
||||
# external_oam_node_0_address: next address after external_oam_node_0_address
|
||||
# e.g. external_oam_floating_address (provided/default): 10.10.10.2
|
||||
# external_oam_node_0_address (derived): 10.10.10.3
|
||||
# external_oam_node_1_address (derived): 10.10.10.4
|
||||
#
|
||||
# These addresses are only applicable to duplex or duplex-direct system mode.
|
||||
#
|
||||
pxeboot_subnet: 169.254.202.0/24
|
||||
# pxeboot_start_address:
|
||||
# pxeboot_end_address:
|
||||
|
||||
management_subnet: 192.168.204.0/28
|
||||
# management_start_address:
|
||||
# management_end_address:
|
||||
|
||||
cluster_host_subnet: 192.168.206.0/24
|
||||
# cluster_host_start_address:
|
||||
# cluster_host_end_address:
|
||||
|
||||
cluster_pod_subnet: 172.16.0.0/16
|
||||
# cluster_pod_start_address:
|
||||
# cluster_pod_end_address:
|
||||
|
||||
cluster_service_subnet: 10.96.0.0/12
|
||||
# cluster_service_start_address:
|
||||
# cluster_service_end_address:
|
||||
|
||||
external_oam_subnet: 10.10.10.0/24
|
||||
external_oam_gateway_address: 10.10.10.1
|
||||
external_oam_floating_address: 10.10.10.2
|
||||
# external_oam_start_address:
|
||||
# external_oam_end_address:
|
||||
# external_oam_node_0_address:
|
||||
# external_oam_node_1_address:
|
||||
|
||||
management_multicast_subnet: 239.1.1.0/28
|
||||
# mangement_multicast_start_address:
|
||||
# management_multicast_end_address:
|
||||
|
||||
# Management network address allocation (True = dynamic, False = static)
|
||||
dynamic_address_allocation: True
|
||||
|
||||
# DOCKER PROXIES
|
||||
# ==============
|
||||
#
|
||||
# If the host OAM network is behind a proxy, Docker must be configured with
|
||||
# the same proxy. When an http and/or https proxy is provided, a no-proxy
|
||||
# address list can optionally be provided. This list will be added to the
|
||||
# default no-proxy list derived from localhost, loopback, management and oam
|
||||
# floating addresses at run time. Each address in the list must neither
|
||||
# contain a wildcard nor have subnet format.
|
||||
|
||||
# docker_http_proxy: http://proxy.com:1234
|
||||
# docker_https_proxy: https://proxy.com:1234
|
||||
# docker_no_proxy:
|
||||
# - 1.2.3.4
|
||||
# - 5.6.7.8
|
||||
|
||||
# DOCKER REGISTRIES
|
||||
# =================
|
||||
#
|
||||
# The docker_registries is a map of known registry keys and their
|
||||
# source values. Each key is a fully scoped registry name and the
|
||||
# same name is used as the default value. For instance,
|
||||
# k8s.gcr.io registry which hosts Kubernetes related images, has
|
||||
# the default registry value of k8s.gcr.io
|
||||
#
|
||||
# To overwrite a particular registry, use the original registry
|
||||
# value as the key followed by a custom IP address or domain for
|
||||
# the value.
|
||||
#
|
||||
# The "unified" is a special registry key. Defining and giving
|
||||
# it a value implies all images are to be retrieved from this
|
||||
# single source. Hence, registry values of all other registry keys
|
||||
# if specified will be ignored.
|
||||
#
|
||||
# The docker registries map can be extended with new custom keys in
|
||||
# the near future.
|
||||
#
|
||||
# The valid formats for a registry value are:
|
||||
# - domain (e.g. example.domain)
|
||||
# - domain with port (e.g. example.domain:5000)
|
||||
# - IPv4 address (e.g. 1.2.3.4)
|
||||
# - IPv4 address with port (e.g. 1.2.3.4:5000)
|
||||
# - IPv6 address (e.g. FD01::0100)
|
||||
# - IPv6 address with port (e.g. [FD01::0100]:5000
|
||||
#
|
||||
# Parameter is_secure_registry is only relevant when a unified registry is
|
||||
# used.
|
||||
|
||||
docker_registries:
|
||||
k8s.gcr.io:
|
||||
gcr.io:
|
||||
quay.io:
|
||||
docker.io:
|
||||
# unified: example.domain
|
||||
|
||||
#is_secure_registry: True
|
||||
|
||||
# ADMIN CREDENTIALS
|
||||
# =================
|
||||
#
|
||||
# WARNING: It is strongly reconmmended to save this info in Ansible vault
|
||||
# file named "secret" under override files directory. Configuration parameters
|
||||
# stored in vault must start with vault_ prefix (i.e. vault_admin_username,
|
||||
# vault_admin_password).
|
||||
#
|
||||
admin_username: admin
|
||||
admin_password: St8rlingX*
|
||||
|
||||
# OVERRIDE FILES DIRECTORY
|
||||
# ========================
|
||||
#
|
||||
# Default directory where user override file(s) can be found
|
||||
#
|
||||
override_files_dir: "{{ lookup('env', 'HOME') }}"
|
@ -1,28 +0,0 @@
|
||||
# This is the default ansible 'hosts' file.
|
||||
#
|
||||
# It should live in /etc/ansible/hosts
|
||||
#
|
||||
# - Comments begin with the '#' character
|
||||
# - Blank lines are ignored
|
||||
# - Groups of hosts are delimited by [header] elements
|
||||
# - You can enter hostnames or ip addresses
|
||||
# - A hostname/ip can be a member of multiple groups
|
||||
|
||||
# Ex 1: Ungrouped hosts, specify before any group headers.
|
||||
---
|
||||
bootstrap:
|
||||
hosts:
|
||||
localhost:
|
||||
ansible_connection: local
|
||||
|
||||
vars:
|
||||
ansible_ssh_user: wrsroot
|
||||
ansible_ssh_pass: St8rlingX*
|
||||
ansible_become_pass: St8rlingX*
|
||||
ansible_become: true
|
||||
password_change_responses:
|
||||
yes/no: 'yes'
|
||||
wrsroot*: 'wrsroot'
|
||||
\(current\) UNIX password: 'wrsroot'
|
||||
(?i)New password: 'St8rlingX*'
|
||||
(?i)Retype new password: 'St8rlingX*'
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
hieradata_workdir: /tmp/hieradata
|
||||
manifest_apply_log: /tmp/apply_manifest.log
|
||||
loopback_ifname: lo
|
@ -1,56 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role is to create static configuration and apply the puppet bootstrap
|
||||
# manifest.
|
||||
|
||||
- name: Create config workdir
|
||||
file:
|
||||
path: "{{ hieradata_workdir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Generating static config data
|
||||
command: "/usr/bin/sysinv-puppet create-static-config {{ hieradata_workdir }}"
|
||||
failed_when: false
|
||||
register: static_config_result
|
||||
|
||||
- name: Fail if static hieradata cannot be generated
|
||||
fail:
|
||||
msg: "Failed to create puppet hiera static config."
|
||||
when: static_config_result.rc != 0
|
||||
|
||||
- name: Applying puppet bootstrap manifest
|
||||
command: >
|
||||
/usr/local/bin/puppet-manifest-apply.sh
|
||||
{{ hieradata_workdir }}
|
||||
{{ derived_network_params.controller_0_address }}
|
||||
controller ansible_bootstrap > {{ manifest_apply_log }}
|
||||
register: bootstrap_manifest
|
||||
environment:
|
||||
INITIAL_CONFIG_PRIMARY: "true"
|
||||
|
||||
- name: Fail if puppet manifest apply script returns an error
|
||||
fail:
|
||||
msg: >-
|
||||
Failed to apply bootstrap manifest. See /var/log/puppet/latest/puppet.log
|
||||
for details.
|
||||
when: bootstrap_manifest.rc != 0
|
||||
|
||||
- name: Ensure Puppet directory exists
|
||||
file:
|
||||
path: "{{ puppet_permdir }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Persist puppet working files
|
||||
command: "mv {{ hieradata_workdir }} {{ puppet_permdir }}"
|
@ -1,53 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# - Restart Barbican
|
||||
# - Start up FM, skip auth middleware as it is not functional at this
|
||||
# early stage
|
||||
# - Start up Maintenance Agent
|
||||
# - Restart Maintenance Client to pick the new config which will update
|
||||
# the controller-0 status from offline to online.
|
||||
#
|
||||
- block:
|
||||
- name: Update barbican bind host with management floating IP
|
||||
replace:
|
||||
path: /etc/barbican/barbican.conf
|
||||
regexp: "bind_host=.*$"
|
||||
replace: "bind_host={{ controller_floating_address | ipwrap }}"
|
||||
|
||||
- name: Restart barbican
|
||||
systemd:
|
||||
state: restarted
|
||||
name: openstack-barbican-api
|
||||
|
||||
- name: Apply workaround for fm-api
|
||||
lineinfile:
|
||||
path: /etc/fm/api-paste.ini
|
||||
line: "pipeline=request_id api_v1"
|
||||
regex: "pipeline*"
|
||||
|
||||
- name: Update bind_host config parameter in fm config file
|
||||
replace:
|
||||
path: /etc/fm/fm.conf
|
||||
regexp: "bind_host=.*$"
|
||||
replace: "bind_host={{ controller_floating_address }}"
|
||||
|
||||
- name: Restart FM API and bring up FM Manager
|
||||
command: "{{ item }}"
|
||||
with_items:
|
||||
- /etc/init.d/fm-api restart
|
||||
- /etc/init.d/fminit start
|
||||
|
||||
- name: Bring up Maintenance Agent
|
||||
command: /usr/lib/ocf/resource.d/platform/mtcAgent start
|
||||
|
||||
- name: Restart Maintenance Client
|
||||
command: /etc/init.d/mtcClient restart
|
||||
|
||||
environment: # block environment
|
||||
OCF_ROOT: "/usr/lib/ocf"
|
||||
OCF_RESKEY_state: "active"
|
@ -1,256 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Bring up Helm
|
||||
# - Set up needed directories
|
||||
# - Pull Tiller and Armada images
|
||||
# - Create service account and cluster role binding
|
||||
# - Initialize Helm
|
||||
# - Restart lighttpd
|
||||
# - Generate repo index on target
|
||||
# - Add local helm repo
|
||||
# - Stop lighttpd
|
||||
# - Bind mount
|
||||
# - Generate repo index on source
|
||||
#
|
||||
|
||||
- name: Create www group
|
||||
group:
|
||||
name: www
|
||||
gid: 1877
|
||||
state: present
|
||||
|
||||
- name: Create www user in preparation for Helm bringup
|
||||
user:
|
||||
name: www
|
||||
uid: 1877
|
||||
group: www
|
||||
groups: wrs_protected
|
||||
shell: /sbin/nologin
|
||||
state: present
|
||||
|
||||
- name: Ensure /www/tmp exists
|
||||
file:
|
||||
path: /www/tmp
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: www
|
||||
group: root
|
||||
#mode: 1700
|
||||
|
||||
- name: Ensure /www/var exists
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: www
|
||||
group: root
|
||||
with_items:
|
||||
- /www/var
|
||||
- /www/var/log
|
||||
|
||||
- name: Set up lighttpd.conf
|
||||
copy:
|
||||
src: "{{ lighttpd_conf_template }}"
|
||||
dest: /etc/lighttpd/lighttpd.conf
|
||||
remote_src: yes
|
||||
mode: 0640
|
||||
|
||||
# TODO(tngo): Check if enable_https should be configurable..
|
||||
# Resort to sed due to replace/lineinfile module deficiency
|
||||
- name: Update lighttpd.conf
|
||||
command: "{{ item }}"
|
||||
args:
|
||||
warn: false
|
||||
with_items:
|
||||
- "sed -i -e 's|<%= @http_port %>|'$PORT_NUM'|g' /etc/lighttpd/lighttpd.conf"
|
||||
- "sed -i '/@enable_https/,/% else/d' /etc/lighttpd/lighttpd.conf"
|
||||
- "sed -i '/@tmp_object/,/%- end/d' /etc/lighttpd/lighttpd.conf"
|
||||
- "sed -i '/<% end/d' /etc/lighttpd/lighttpd.conf"
|
||||
- "sed -i '/@tpm_object/,/%- end/d' /etc/lighttpd/lighttpd.conf"
|
||||
environment:
|
||||
PORT_NUM: 80
|
||||
|
||||
- name: Set up lighttpd-inc.conf
|
||||
copy:
|
||||
src: "{{ lighttpd_inc_conf_template }}"
|
||||
dest: /etc/lighttpd/lighttpd-inc.conf
|
||||
remote_src: yes
|
||||
mode: 0640
|
||||
|
||||
- name: Update management subnet in lighttpd-inc.conf
|
||||
replace:
|
||||
path: /etc/lighttpd/lighttpd-inc.conf
|
||||
regexp: "var.management_ip_network =.*$"
|
||||
replace: 'var.management_ip_network = "{{ management_subnet }}"'
|
||||
|
||||
- name: Update pxe subnet in lighttp-inc.conf
|
||||
replace:
|
||||
path: /etc/lighttpd/lighttpd-inc.conf
|
||||
regexp: "var.pxeboot_ip_network =.*$"
|
||||
replace: 'var.pxeboot_ip_network = "{{ pxeboot_subnet }}"'
|
||||
|
||||
- name: Update Tiller and Armada image tags
|
||||
set_fact:
|
||||
tiller_img: "{{ tiller_img | regex_replace('gcr.io', '{{ gcr_registry }}') }}"
|
||||
armada_img: "{{ armada_img | regex_replace('quay.io', '{{ quay_registry }}') }}"
|
||||
|
||||
- name: Pull Tiller and Armada images
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ tiller_img }}"
|
||||
- "{{ armada_img }}"
|
||||
|
||||
- name: Create source and target helm bind directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: www
|
||||
group: root
|
||||
mode: 0755
|
||||
with_items:
|
||||
- "{{ source_helm_bind_dir }}"
|
||||
- "{{ target_helm_bind_dir }}"
|
||||
|
||||
- name: Create helm repository directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: www
|
||||
group: root
|
||||
mode: 0755
|
||||
with_items:
|
||||
- "{{ source_helm_bind_dir }}/{{ helm_repo_name_apps }}"
|
||||
- "{{ source_helm_bind_dir }}/{{ helm_repo_name_platform }}"
|
||||
|
||||
- name: Create service account for Tiller
|
||||
command: >
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount
|
||||
--namespace kube-system tiller
|
||||
|
||||
- name: Create cluster role binding for Tiller service account
|
||||
command: >
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding
|
||||
tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
|
||||
|
||||
- name: Initialize Helm (local host)
|
||||
command: >-
|
||||
helm init --skip-refresh --service-account tiller --node-selectors
|
||||
"node-role.kubernetes.io/master"="" --tiller-image={{ tiller_img }}
|
||||
--override spec.template.spec.hostNetwork=true
|
||||
become_user: wrsroot
|
||||
environment:
|
||||
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||
HOME: /home/wrsroot
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
# Workaround for helm init remotely. Not sure why the task cannot be executed
|
||||
# successfully as wrsroot on remote host.
|
||||
- block:
|
||||
- name: Initialize Helm (remote host)
|
||||
command: >-
|
||||
helm init --skip-refresh --service-account tiller --node-selectors
|
||||
"node-role.kubernetes.io/master"="" --tiller-image={{ tiller_img }}
|
||||
--override spec.template.spec.hostNetwork=true
|
||||
environment:
|
||||
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||
HOME: /home/wrsroot
|
||||
|
||||
- name: Change helm directory ownership (remote host)
|
||||
file:
|
||||
dest: /home/wrsroot/.helm
|
||||
owner: wrsroot
|
||||
group: wrs
|
||||
mode: 0755
|
||||
recurse: yes
|
||||
when: inventory_hostname != 'localhost'
|
||||
|
||||
- name: Generate Helm repo indicies
|
||||
command: helm repo index "{{ source_helm_bind_dir }}/{{ item }}"
|
||||
become_user: www
|
||||
with_items:
|
||||
- "{{ helm_repo_name_apps }}"
|
||||
- "{{ helm_repo_name_platform }}"
|
||||
|
||||
- name: Stop lighttpd
|
||||
systemd:
|
||||
name: lighttpd
|
||||
state: stopped
|
||||
|
||||
- name: Disable lighttpd
|
||||
# Systemd module does not support disabled state. Resort to command
|
||||
command: systemctl disable lighttpd
|
||||
|
||||
- name: Bind mount on {{ target_helm_bind_dir }}
|
||||
# Due to deficiency of mount module, resort to command for now
|
||||
command: mount -o bind -t ext4 {{ source_helm_bind_dir }} {{ target_helm_bind_dir }}
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Enable lighttpd
|
||||
command: systemctl enable lighttpd
|
||||
|
||||
- name: Restart lighttpd for Helm
|
||||
systemd:
|
||||
name: lighttpd
|
||||
state: restarted
|
||||
|
||||
- name: Add Helm repos (local host)
|
||||
command: helm repo add "{{ item }}" "http://127.0.0.1:$PORT/helm_charts/{{ item }}"
|
||||
become_user: wrsroot
|
||||
environment:
|
||||
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||
HOME: /home/wrsroot
|
||||
PORT: 80
|
||||
with_items:
|
||||
- "{{ helm_repo_name_apps }}"
|
||||
- "{{ helm_repo_name_platform }}"
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
# Workaround for helm repo add remotely
|
||||
- block:
|
||||
- name: Add Helm repos (remote host)
|
||||
command: helm repo add "{{ item }}" "http://127.0.0.1:$PORT/helm_charts/{{ item }}"
|
||||
environment:
|
||||
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||
HOME: /home/wrsroot
|
||||
PORT: 80
|
||||
with_items:
|
||||
- "{{ helm_repo_name_apps }}"
|
||||
- "{{ helm_repo_name_platform }}"
|
||||
|
||||
- name: Change helm directory ownership to pick up newly generated files (remote host)
|
||||
file:
|
||||
dest: /home/wrsroot/.helm
|
||||
owner: wrsroot
|
||||
group: wrs
|
||||
mode: 0755
|
||||
recurse: yes
|
||||
when: inventory_hostname != 'localhost'
|
||||
|
||||
- name: Update info of available charts locally from chart repos
|
||||
command: helm repo update
|
||||
become_user: wrsroot
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
# Workaround for helm update remotely. Not sure why the task cannot be executed
|
||||
# successfully as wrsroot on remote host.
|
||||
- block:
|
||||
- name: Update info of available charts locally from chart repos (remote host)
|
||||
command: helm repo update
|
||||
environment:
|
||||
HOME: /home/wrsroot
|
||||
|
||||
- name: Change helm directory ownership (remote host)
|
||||
file:
|
||||
dest: /home/wrsroot/.helm
|
||||
owner: wrsroot
|
||||
group: wrs
|
||||
mode: 0755
|
||||
recurse: yes
|
||||
when: inventory_hostname != 'localhost'
|
@ -1,245 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Bring up Kubernetes master
|
||||
# - Update iptables
|
||||
# - Create daemon.json for insecure unified registry if applicable
|
||||
# - Create manifest directory
|
||||
# - Set up pods cgroups for minimal set of controllers
|
||||
# - Enable kubelet service (with default/custom registry)
|
||||
# - Run kubeadm init
|
||||
# - Prepare admin.conf
|
||||
# - Set k8s environment variable for new shell
|
||||
# - Prepare Calico config and activate Calico networking
|
||||
# - Prepare Multus config and activate Multus networking
|
||||
# - Prepare SRIOV config and activate SRIOV networking
|
||||
# - Prepare SRIOV device plugin config and activate SRIOV device plugin
|
||||
# - Restrict coredns to master node and set anti-affnity (duplex system)
|
||||
# - Restrict coredns to 1 pod (simplex system)
|
||||
# - Remove taint from master node
|
||||
# - Add kubelet service override
|
||||
# - Register kubelet with pmond
|
||||
# - Reload systemd
|
||||
#
|
||||
|
||||
- name: Setup iptables for Kubernetes
|
||||
lineinfile:
|
||||
path: /etc/sysctl.d/k8s.conf
|
||||
line: "{{ item }}"
|
||||
create: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-ip6tables = 1
|
||||
- net.bridge.bridge-nf-call-iptables = 1
|
||||
- net.ipv4.ip_forward = 1
|
||||
- net.ipv4.conf.default.rp_filter = 0
|
||||
- net.ipv4.conf.all.rp_filter = 0
|
||||
- net.ipv6.conf.all.forwarding = 1
|
||||
|
||||
- block:
|
||||
- block:
|
||||
- name: Create daemon.json file for insecure registry
|
||||
copy:
|
||||
src: "{{ insecure_docker_registry_template }}"
|
||||
dest: /etc/docker/daemon.json
|
||||
remote_src: yes
|
||||
mode: 0644
|
||||
|
||||
- name: Update daemon.json with registry IP
|
||||
command: "sed -i -e 's|<%= @insecure_registries %>|\"$DOCKER_REGISTRY_IP\"|g' /etc/docker/daemon.json"
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Restart docker
|
||||
systemd:
|
||||
name: docker
|
||||
state: restarted
|
||||
when: not is_secure_registry
|
||||
|
||||
environment:
|
||||
DOCKER_REGISTRY_IP: "{{ docker_registry }}"
|
||||
when: use_unified_registry
|
||||
|
||||
- name: Update kernel parameters for iptables
|
||||
command: sysctl --system &>/dev/null
|
||||
|
||||
- name: Create manifests directory required by kubelet
|
||||
file:
|
||||
path: /etc/kubernetes/manifests
|
||||
state: directory
|
||||
mode: 0700
|
||||
|
||||
- name: Create kubelet cgroup for minimal set
|
||||
file:
|
||||
path: "{{ cgroup_root }}/{{ item }}/{{ k8s_cgroup_name }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
with_items:
|
||||
- cpuset
|
||||
- cpu
|
||||
- cpuacct
|
||||
- memory
|
||||
- systemd
|
||||
|
||||
- name: Get default k8s cpuset
|
||||
command: cat /sys/devices/system/cpu/online
|
||||
register: k8s_cpuset
|
||||
|
||||
- name: Get default k8s nodeset
|
||||
command: cat /sys/devices/system/node/online
|
||||
register: k8s_nodeset
|
||||
|
||||
- name: Set mems for cpuset controller
|
||||
shell: "echo {{ k8s_nodeset.stdout_lines[0] }} > {{ cgroup_root }}/cpuset/{{ k8s_cgroup_name }}/cpuset.mems || :"
|
||||
|
||||
- name: Set cpus for cpuset controller
|
||||
shell: "echo {{ k8s_cpuset.stdout_lines[0] }} > {{ cgroup_root }}/cpuset/{{ k8s_cgroup_name }}/cpuset.cpus || :"
|
||||
|
||||
- name: Create a tasks file for cpuset controller
|
||||
file:
|
||||
path: "{{ cgroup_root }}/cpuset/{{ k8s_cgroup_name }}/tasks"
|
||||
state: touch
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Set kubelet node configuration
|
||||
set_fact:
|
||||
node_ip: "{{ controller_0_cluster_host }}"
|
||||
|
||||
- name: Create kubelet override config file
|
||||
template:
|
||||
src: "kubelet.conf.j2"
|
||||
dest: /etc/sysconfig/kubelet
|
||||
|
||||
- name: Enable kubelet
|
||||
systemd:
|
||||
name: kubelet
|
||||
enabled: yes
|
||||
|
||||
- name: Create Kube admin yaml
|
||||
copy:
|
||||
src: "{{ kube_admin_yaml_template }}"
|
||||
dest: /etc/kubernetes/kubeadm.yaml
|
||||
remote_src: yes
|
||||
|
||||
- name: Update Kube admin yaml with network info
|
||||
command: "{{ item }}"
|
||||
args:
|
||||
warn: false
|
||||
with_items:
|
||||
- "sed -i -e 's|<%= @apiserver_advertise_address %>|'$CLUSTER_IP'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
- "sed -i -e 's|<%= @apiserver_loopback_address %>|'$LOOPBACK_IP'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
- "sed -i -e 's|<%= @etcd_endpoint %>|'$ETCD_ENDPOINT'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
- "sed -i -e 's|<%= @service_domain %>|'cluster.local'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
- "sed -i -e 's|<%= @pod_network_cidr %>|'$POD_NETWORK_CIDR'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
- "sed -i -e 's|<%= @service_network_cidr %>|'$SERVICE_NETWORK_CIDR'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
- "sed -i -e 's|<%= @k8s_registry %>|'$K8S_REGISTRY'|g' /etc/kubernetes/kubeadm.yaml"
|
||||
environment:
|
||||
CLUSTER_IP: "{{ cluster_floating_address }}"
|
||||
LOOPBACK_IP: "{{ '127.0.0.1' if ipv6_addressing == False else '::1' }}"
|
||||
ETCD_ENDPOINT: "http://{{ cluster_floating_address | ipwrap }}:2379"
|
||||
POD_NETWORK_CIDR: "{{ cluster_pod_subnet }}"
|
||||
SERVICE_NETWORK_CIDR: "{{ cluster_service_subnet }}"
|
||||
K8S_REGISTRY: "{{ k8s_registry }}"
|
||||
|
||||
- name: Initializing Kubernetes master
|
||||
command: kubeadm init --config=/etc/kubernetes/kubeadm.yaml
|
||||
|
||||
- name: Update kube admin.conf file mode and owner
|
||||
file:
|
||||
path: /etc/kubernetes/admin.conf
|
||||
mode: 0640
|
||||
group: wrs_protected
|
||||
|
||||
- name: Set up k8s environment variable
|
||||
copy:
|
||||
src: /usr/share/puppet/modules/platform/files/kubeconfig.sh
|
||||
dest: /etc/profile.d/kubeconfig.sh
|
||||
remote_src: yes
|
||||
|
||||
- name: Set Calico cluster configuration
|
||||
set_fact:
|
||||
cluster_network_ipv4: "{{ cluster_pod_subnet | ipv4 }}"
|
||||
cluster_network_ipv6: "{{ cluster_pod_subnet | ipv6 }}"
|
||||
|
||||
# Configure calico networking using the Kubernetes API datastore.
|
||||
- name: Create Calico config file
|
||||
template:
|
||||
src: "calico-cni.yaml.j2"
|
||||
dest: /etc/kubernetes/calico.yaml
|
||||
|
||||
- name: Activate Calico Networking
|
||||
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml"
|
||||
|
||||
- name: Create Multus config file
|
||||
template:
|
||||
src: "multus-cni.yaml.j2"
|
||||
dest: /etc/kubernetes/multus.yaml
|
||||
|
||||
- name: Activate Multus Networking
|
||||
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/multus.yaml"
|
||||
|
||||
- name: Create SRIOV Networking config file
|
||||
template:
|
||||
src: "sriov-cni.yaml.j2"
|
||||
dest: /etc/kubernetes/sriov-cni.yaml
|
||||
|
||||
- name: Activate SRIOV Networking
|
||||
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriov-cni.yaml"
|
||||
|
||||
- name: Create SRIOV device plugin config file
|
||||
template:
|
||||
src: "sriov-plugin.yaml.j2"
|
||||
dest: /etc/kubernetes/sriovdp-daemonset.yaml
|
||||
|
||||
- name: Activate SRIOV device plugin
|
||||
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriovdp-daemonset.yaml"
|
||||
|
||||
# Restrict coredns to master node and use anti-affinity for core dns for duplex systems
|
||||
- block:
|
||||
- name: Restrict coredns to master node
|
||||
command: >-
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p
|
||||
'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}'
|
||||
|
||||
- name: Use anti-affinity for coredns pods
|
||||
command: >-
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p
|
||||
'{"spec":{"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"k8s-app","operator":"In","values":["kube-dns"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'
|
||||
when: system_mode != 'simplex'
|
||||
|
||||
- name: Restrict coredns to 1 pod for simplex
|
||||
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system scale --replicas=1 deployment coredns
|
||||
when: system_mode == 'simplex'
|
||||
|
||||
- name: Remove taint from master node
|
||||
shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node controller-0 node-role.kubernetes.io/master- || true"
|
||||
|
||||
- name: Add kubelet service override
|
||||
copy:
|
||||
src: "{{ kubelet_override_template }}"
|
||||
dest: /etc/systemd/system/kubelet.service.d/kube-stx-override.conf
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
|
||||
- name: Register kubelet with pmond
|
||||
copy:
|
||||
src: "{{ kubelet_pmond_template }}"
|
||||
dest: /etc/pmon.d/kubelet.conf
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
|
||||
- name: Reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: Mark Kubernetes config complete
|
||||
file:
|
||||
path: /etc/platform/.initial_k8s_config_complete
|
||||
state: touch
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Load system images needed for Kubernetes and Helm bringup from archive
|
||||
# directory
|
||||
#
|
||||
|
||||
- name: Set default directory for image files copy
|
||||
set_fact:
|
||||
images_dir: /home/wrsroot
|
||||
when: (images_dir is not defined) or (images_dir is none)
|
||||
|
||||
- name: Copy Docker images to remote host
|
||||
copy:
|
||||
src: "{{ docker_images_archive_source }}"
|
||||
dest: "{{ images_dir }}"
|
||||
when: inventory_hostname != 'localhost'
|
||||
|
||||
- name: Adjust the images directory fact for local host
|
||||
set_fact:
|
||||
images_dir: "{{ docker_images_archive_source }}"
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
- name: Get list of archived files
|
||||
find:
|
||||
paths: "{{ images_dir }}"
|
||||
patterns: "*.tar"
|
||||
register: archive_find_output
|
||||
#run_once: true
|
||||
#delegate_to: localhost
|
||||
|
||||
- name: Load system images
|
||||
# Due to docker_image module deficiency, resort to shell
|
||||
shell: docker load < {{ images_dir }}/{{ item.path | basename }}
|
||||
with_items: "{{ archive_find_output.files }}"
|
@ -1,177 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role is to bring up Kubernetes and essential flock services required
|
||||
# for initial controller unlock.
|
||||
#
|
||||
|
||||
- block:
|
||||
- name: Add loopback interface
|
||||
# Use shell instead of command module as source is an internal shell command
|
||||
shell: "{{ item }}"
|
||||
with_items:
|
||||
- source /etc/platform/openrc; system host-if-add controller-0 lo virtual none lo -c platform -m 1500
|
||||
- source /etc/platform/openrc; system interface-network-assign controller-0 lo mgmt
|
||||
- source /etc/platform/openrc; system interface-network-assign controller-0 lo cluster-host
|
||||
- ip addr add {{ cluster_virtual }} brd {{ cluster_broadcast }} dev lo scope host label lo:5
|
||||
- ip addr add {{ mgmt_virtual }} brd {{ management_broadcast }} dev lo scope host label lo:1
|
||||
- ip addr add {{ pxe_virtual }} dev lo scope host
|
||||
- ip addr add {{ cluster_floating_virtual }} dev lo scope host
|
||||
- ip addr add {{ mgmt_nfs_1_virtual }} dev lo scope host
|
||||
- ip addr add {{ mgmt_nfs_2_virtual }} dev lo scope host
|
||||
|
||||
- name: Remove previous management floating address if management network config has changed
|
||||
command: ip addr delete {{ prev_mgmt_floating_virtual }} dev lo scope host
|
||||
when: reconfigure_endpoints and
|
||||
(mgmt_floating_virtual != prev_mgmt_floating_virtual)
|
||||
|
||||
- name: Refresh local DNS (i.e. /etc/hosts)
|
||||
include: refresh_local_dns.yml
|
||||
|
||||
- name: Load images from archives if configured
|
||||
include: load_images_from_archive.yml
|
||||
when: images_archive_exists
|
||||
|
||||
- name: Bring up Kubernetes master
|
||||
include: bringup_kubemaster.yml
|
||||
|
||||
- name: Bring up Helm
|
||||
include: bringup_helm.yml
|
||||
|
||||
- name: Set up controller registry certificate and keys
|
||||
include: setup_registry_certificate_and_keys.yml
|
||||
|
||||
- name: Bring up essential flock services
|
||||
include: bringup_flock_services.yml
|
||||
|
||||
- name: Set dnsmasq.leases flag for unlock
|
||||
file:
|
||||
path: "{{ config_permdir }}/dnsmasq.leases"
|
||||
state: touch
|
||||
|
||||
- name: Update resolv.conf file for unlock
|
||||
lineinfile:
|
||||
path: /etc/resolv.conf
|
||||
line: "nameserver {{ controller_floating_address }}"
|
||||
insertbefore: BOF
|
||||
|
||||
- name: Check for controller-0 online status
|
||||
shell: source /etc/platform/openrc; system host-list | grep online
|
||||
register: online_check
|
||||
until: online_check.rc == 0
|
||||
retries: 10
|
||||
|
||||
- name: Wait for {{ pods_wait_time }} seconds to ensure kube-system pods are all started
|
||||
wait_for:
|
||||
timeout: "{{ pods_wait_time }}"
|
||||
|
||||
- name: Start parallel tasks to wait for Kubernetes component, Networking and Tiller pods to reach ready state
|
||||
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf wait --namespace=kube-system --for=condition=Ready pods --selector {{ item }} --timeout=30s
|
||||
async: 30
|
||||
poll: 0
|
||||
with_items:
|
||||
- k8s-app=calico-node
|
||||
- k8s-app=calico-kube-controllers
|
||||
- k8s-app=kube-proxy
|
||||
- app=multus
|
||||
- app=sriov-cni
|
||||
- app=helm
|
||||
- component=kube-apiserver
|
||||
- component=kube-controller-manager
|
||||
- component=kube-scheduler
|
||||
register: wait_for_pods
|
||||
|
||||
- name: Get wait tasks results
|
||||
async_status:
|
||||
jid: "{{ item.ansible_job_id }}"
|
||||
register: wait_job_result
|
||||
until: wait_job_result.finished
|
||||
# Set the retry to 10 times (60 seconds) but the async jobs above will
|
||||
# complete (success or failure) within 30 seconds
|
||||
retries: 10
|
||||
with_items: "{{ wait_for_pods.results }}"
|
||||
|
||||
- name: Fail if any of the Kubernetes component, Networking and Tiller pods is not ready by this time
|
||||
fail:
|
||||
msg: "Pod {{ item._ansible_item_label._ansible_item_label }} is still not ready."
|
||||
when: item.stdout is not search(" condition met")
|
||||
with_items: "{{ wait_job_result.results }}"
|
||||
|
||||
# Have to check for kube-dns pods separately as at most only one is
|
||||
# running at this point so checking for "Ready" condition at kube-dns
|
||||
# app level won't work
|
||||
- name: Fail if no kube-dns pod is running
|
||||
shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods --namespace=kube-system | grep coredns | grep Running
|
||||
register: dns_pod_result
|
||||
failed_when: dns_pod_result.rc != 0
|
||||
|
||||
when: (not replayed) or (restart_services)
|
||||
|
||||
|
||||
- block:
|
||||
- name: Remove config file from previous play
|
||||
file:
|
||||
path: "{{ last_bootstrap_config_file }}"
|
||||
state: absent
|
||||
|
||||
- name: Save the current system and network config for reference in subsequent replays
|
||||
lineinfile:
|
||||
# This file should be cleared upon host reboot
|
||||
path: "{{ last_bootstrap_config_file }}"
|
||||
line: "{{ item }}"
|
||||
create: yes
|
||||
with_items:
|
||||
- "prev_system_mode: {{ system_mode }}"
|
||||
- "prev_timezone: {{ timezone }}"
|
||||
- "prev_dynamic_address_allocation: {{ dynamic_address_allocation }}"
|
||||
- "prev_pxeboot_subnet: {{ pxeboot_subnet }}"
|
||||
- "prev_management_subnet: {{ management_subnet }}"
|
||||
- "prev_cluster_host_subnet: {{ cluster_host_subnet }}"
|
||||
- "prev_cluster_pod_subnet: {{ cluster_pod_subnet }}"
|
||||
- "prev_cluster_service_subnet: {{ cluster_service_subnet }}"
|
||||
- "prev_external_oam_subnet: {{ external_oam_subnet }}"
|
||||
- "prev_external_oam_gateway_address: {{ external_oam_gateway_address }}"
|
||||
- "prev_external_oam_floating_address: {{ external_oam_floating_address }}"
|
||||
- "prev_management_multicast_subnet: {{ management_multicast_subnet }}"
|
||||
- "prev_dns_servers: {{ dns_servers | join(',') }}"
|
||||
- "prev_docker_http_proxy: {{ docker_http_proxy }}"
|
||||
- "prev_docker_https_proxy: {{ docker_https_proxy }}"
|
||||
- "prev_docker_no_proxy: {{ docker_no_proxy | join(',') }}"
|
||||
- "prev_admin_username: {{ username | hash('sha1') }}"
|
||||
- "prev_admin_password: {{ password | hash('sha1') }}"
|
||||
# Store the addresses as values determined in prepare-env stage not as merged values in
|
||||
# validate-config stage as the latter requires subnet validation.
|
||||
- "prev_pxeboot_start_address: {{ pxeboot_start_address }}"
|
||||
- "prev_pxeboot_end_address: {{ pxeboot_end_address }}"
|
||||
- "prev_management_start_address: {{ management_start_address }}"
|
||||
- "prev_management_end_address: {{ management_end_address }}"
|
||||
- "prev_cluster_host_start_address: {{ cluster_host_start_address }}"
|
||||
- "prev_cluster_host_end_address: {{ cluster_host_end_address }}"
|
||||
- "prev_cluster_pod_start_address: {{ cluster_pod_start_address }}"
|
||||
- "prev_cluster_pod_end_address: {{ cluster_pod_end_address }}"
|
||||
- "prev_cluster_service_start_address: {{ cluster_service_start_address }}"
|
||||
- "prev_cluster_service_end_address: {{ cluster_service_end_address }}"
|
||||
- "prev_external_oam_start_address: {{ external_oam_start_address }}"
|
||||
- "prev_external_oam_end_address: {{ external_oam_end_address }}"
|
||||
- "prev_management_multicast_start_address: {{ management_multicast_start_address }}"
|
||||
- "prev_management_multicast_end_address: {{ management_multicast_end_address }}"
|
||||
- "prev_external_oam_node_0_address: {{ external_oam_node_0_address }}"
|
||||
- "prev_external_oam_node_1_address: {{ external_oam_node_1_address }}"
|
||||
|
||||
# Need to save the dictionary this way for proper comparison during replay
|
||||
- name: Save previous docker registries header
|
||||
lineinfile:
|
||||
path: "{{ last_bootstrap_config_file }}"
|
||||
line: "prev_docker_registries:"
|
||||
|
||||
- name: Save previous docker registries content
|
||||
lineinfile:
|
||||
path: "{{ last_bootstrap_config_file }}"
|
||||
line: " {{ item.key }}: {{ item.value }}"
|
||||
with_dict: "{{ docker_registries }}"
|
||||
|
||||
when: save_config
|
@ -1,44 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASK DESCRIPTION:
|
||||
# This tasks is to update /etc/hosts for local name lookup.
|
||||
#
|
||||
|
||||
# Check host connectivity, change password if provided
|
||||
|
||||
- name: Remove existing /etc/hosts
|
||||
file:
|
||||
path: /etc/hosts
|
||||
state: absent
|
||||
|
||||
- name: Populate /etc/hosts
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
line: "{{ item }}"
|
||||
create: yes
|
||||
with_items:
|
||||
- "{{ localhost_name_ip_mapping }}"
|
||||
- "{{ controller_floating_address }}\tcontroller"
|
||||
# May not need this entry
|
||||
- "{{ controller_0_cluster_host }}\tcontroller-0-infra"
|
||||
- "{{ controller_pxeboot_floating_address }}\tpxecontroller"
|
||||
- "{{ external_oam_floating_address }}\toamcontroller"
|
||||
- "{{ derived_network_params.nfs_management_address_1 }}\tcontroller-platform-nfs"
|
||||
- "{{ derived_network_params.controller_1_address }}\tcontroller-1"
|
||||
- "{{ derived_network_params.controller_0_address }}\tcontroller-0"
|
||||
# May not need this entry
|
||||
- "{{ controller_1_cluster_host }}\tcontroller-1-infra"
|
||||
- "{{ derived_network_params.nfs_management_address_2 }}\tcontroller-nfs"
|
||||
|
||||
- name: Save hosts file to permanent location
|
||||
copy:
|
||||
src: /etc/hosts
|
||||
dest: "{{ config_permdir }}"
|
||||
remote_src: yes
|
||||
|
||||
- name: Update name service caching server
|
||||
command: nscd -i hosts
|
@ -1,68 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Set up docker registry certificate and keys required for the unlock
|
||||
#
|
||||
|
||||
- name: Generate cnf file from template
|
||||
copy:
|
||||
src: "{{ cert_cnf_template }}"
|
||||
dest: "{{ cert_cnf_file }}"
|
||||
remote_src: yes
|
||||
|
||||
- name: Update cnf file with network info
|
||||
command: "sed -i -e 's|<%= @docker_registry_ip %>|'$DOCKER_REGISTRY_IP'|g' {{ cert_cnf_file }}"
|
||||
args:
|
||||
warn: false
|
||||
environment:
|
||||
DOCKER_REGISTRY_IP: "{{ controller_floating_address }}"
|
||||
|
||||
- name: Generate certificate and key files
|
||||
command: >-
|
||||
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout {{ registry_cert_key }}
|
||||
-out {{ registry_cert_crt }} -config {{ cert_cnf_file }}
|
||||
|
||||
- name: Generate pkcs1 key file
|
||||
command: openssl rsa -in {{ registry_cert_key }} -out {{ registry_cert_pkcs1_key }}
|
||||
|
||||
- name: Remove extfile used in certificate generation
|
||||
file:
|
||||
path: "{{ cert_cnf_file }}"
|
||||
state: absent
|
||||
|
||||
- name: Set certificate file and key permissions to root read-only
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
mode: 0400
|
||||
with_items:
|
||||
- "{{ registry_cert_key }}"
|
||||
- "{{ registry_cert_crt }}"
|
||||
- "{{ registry_cert_pkcs1_key }}"
|
||||
|
||||
- name: Copy certificate and keys to shared filesystem for mate
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ config_permdir }}"
|
||||
remote_src: yes
|
||||
mode: preserve
|
||||
with_items:
|
||||
- "{{ registry_cert_key }}"
|
||||
- "{{ registry_cert_crt }}"
|
||||
- "{{ registry_cert_pkcs1_key }}"
|
||||
|
||||
- name: Create docker certificate directory
|
||||
file:
|
||||
path: "{{ docker_cert_dir }}/registry.local:9001"
|
||||
state: directory
|
||||
recurse: yes
|
||||
mode: 0700
|
||||
|
||||
- name: Copy certificate file to docker certificate directory
|
||||
copy:
|
||||
src: "{{ registry_cert_crt }}"
|
||||
dest: "{{ docker_cert_dir }}/registry.local:9001"
|
||||
remote_src: yes
|
@ -1,817 +0,0 @@
|
||||
---
|
||||
# Calico Version v3.6
|
||||
# Based off:
|
||||
# https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/
|
||||
# hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
|
||||
#
|
||||
# This is the calico configuration file for systems with less than 50 nodes.
|
||||
#
|
||||
# Notes when upversioning calico:
|
||||
#
|
||||
# Refer to configuration instructions here:
|
||||
# https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/
|
||||
# calico
|
||||
#
|
||||
# It is important to test in a multi-controller environment (ie: AIO-DX) that
|
||||
# the pods can be pinged by their endpoint. ie: A pod running on controller-1
|
||||
# can be pinged from controller-0, and vice versa.
|
||||
#
|
||||
# An additional test (run on controller-0) that queries the calico daemon
|
||||
# health and status
|
||||
#
|
||||
# curl -O -L https://github.com/projectcalico/calicoctl/releases/download/
|
||||
# v3.6.2/calicoctl
|
||||
# chmod +x calicoctl
|
||||
# sudo mv calicoctl /usr/local/bin
|
||||
# export DATASTORE_TYPE=kubernetes
|
||||
# sudo calicoctl node status
|
||||
#
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "none"
|
||||
# Configure the Calico backend to use.
|
||||
calico_backend: "bird"
|
||||
|
||||
# Configure the MTU to use
|
||||
veth_mtu: "1440"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "calico-ipam",
|
||||
"assign_ipv4": "{{ "true" if cluster_network_ipv4 else "false" }}",
|
||||
"assign_ipv6": "{{ "true" if cluster_network_ipv6 else "false" }}"
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
# Source: calico/templates/kdd-crds.yaml
|
||||
# Create all the CustomResourceDefinitions needed for
|
||||
# Calico policy and networking mode.
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamblocks.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMBlock
|
||||
plural: ipamblocks
|
||||
singular: ipamblock
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: blockaffinities.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BlockAffinity
|
||||
plural: blockaffinities
|
||||
singular: blockaffinity
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamhandles.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMHandle
|
||||
plural: ipamhandles
|
||||
singular: ipamhandle
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamconfigs.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMConfig
|
||||
plural: ipamconfigs
|
||||
singular: ipamconfig
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgppeers.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPPeer
|
||||
plural: bgppeers
|
||||
singular: bgppeer
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgpconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPConfiguration
|
||||
plural: bgpconfigurations
|
||||
singular: bgpconfiguration
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
||||
---
|
||||
# Source: calico/templates/rbac.yaml
|
||||
|
||||
# Include a clusterrole for the kube-controllers component,
|
||||
# and bind it to the calico-kube-controllers serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
# Nodes are watched to monitor for deletions.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
# Pods are queried to check for existence.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
# IPAM resources are manipulated when nodes are deleted.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- list
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# Needs access to update clusterinformations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
---
|
||||
# Include a clusterrole for the calico-node DaemonSet,
|
||||
# and bind it to the calico-node serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
rules:
|
||||
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
# Used to discover service IPs for advertisement.
|
||||
- watch
|
||||
- list
|
||||
# Used to discover Typhas.
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Needed for clearing NodeNetworkUnavailable flag.
|
||||
- patch
|
||||
# Calico stores some configuration information in node annotations.
|
||||
- update
|
||||
# Watch for changes to Kubernetes NetworkPolicies.
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# The CNI plugin patches pods/status.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico monitors various CRDs for config.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico must create and update some CRDs on startup.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico stores some configuration information on the node.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# These permissions are only requried for upgrade from v2.6, and can
|
||||
# be removed after upgrade or on fresh installations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# These permissions are required for Calico CNI to perform IPAM allocations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
# Block affinities must also be watchable by confd for route aggregation.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
# The Calico IPAM migration needs to get daemonsets. These permissions can be
|
||||
# removed if not upgrading from an installation using host-local IPAM.
|
||||
- apiGroups: ["apps"]
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
---
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-node.yaml
|
||||
# This manifest installs the node container, as well
|
||||
# as the Calico CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
# This, along with the CriticalAddonsOnly toleration below,
|
||||
# marks the pod as a critical add-on, ensuring it gets
|
||||
# priority scheduling and that its resources are reserved
|
||||
# if it ever gets evicted.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico-node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: calico-node
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
initContainers:
|
||||
# This container performs upgrade from host-local IPAM to calico-ipam.
|
||||
# It can be deleted if this is a fresh installation, or if you have already
|
||||
# upgraded to use calico-ipam.
|
||||
- name: upgrade-ipam
|
||||
image: "{{ quay_registry }}/calico/cni:v3.6.2"
|
||||
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
||||
env:
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/cni/networks
|
||||
name: host-local-net-dir
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: "{{ quay_registry }}/calico/cni:v3.6.2"
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
containers:
|
||||
# Runs node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: "{{ quay_registry }}/calico/node:v3.6.2"
|
||||
env:
|
||||
# Configure inbound failsafe rules
|
||||
- name: FELIX_FAILSAFEINBOUNDHOSTPORTS
|
||||
value: "tcp:22, udp:68, tcp:179"
|
||||
# Configure output failsafe rules
|
||||
- name: FELIX_FAILSAFEOUTBOUNDHOSTPORTS
|
||||
value: "udp:53, udp:67, tcp:179"
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,bgp"
|
||||
{% if cluster_network_ipv4 -%}
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "can-reach={{ cluster_floating_address }}"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Always"
|
||||
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "{{ cluster_pod_subnet }}"
|
||||
- name: CALICO_IPV4POOL_NAT_OUTGOING
|
||||
value: "true"
|
||||
{% else -%}
|
||||
# Disable IPv4
|
||||
- name: IP
|
||||
value: "none"
|
||||
{% endif -%}
|
||||
{% if cluster_network_ipv6 -%}
|
||||
- name: IP6
|
||||
value: "autodetect"
|
||||
- name: IP6_AUTODETECTION_METHOD
|
||||
value: "can-reach={{ cluster_floating_address }}"
|
||||
# The default IPv6 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
- name: CALICO_IPV6POOL_CIDR
|
||||
value: "{{ cluster_pod_subnet }}"
|
||||
- name: CALICO_IPV6POOL_NAT_OUTGOING
|
||||
value: "true"
|
||||
{% else -%}
|
||||
# Disable IPv6
|
||||
- name: IP6
|
||||
value: "none"
|
||||
{% endif -%}
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
{% if cluster_network_ipv6 -%}
|
||||
# Enable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "true"
|
||||
- name: CALICO_ROUTER_ID
|
||||
value: "hash"
|
||||
{% else -%}
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
{% endif -%}
|
||||
# Set Felix logging to "info"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9099
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -bird-ready
|
||||
- -felix-ready
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
volumes:
|
||||
# Used by node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Mount in the directory for host-local IPAM allocations. This is
|
||||
# used when upgrading from host-local to calico-ipam, and can be removed
|
||||
# if not using the upgrade-ipam init container.
|
||||
- name: host-local-net-dir
|
||||
hostPath:
|
||||
path: /var/lib/cni/networks
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-kube-controllers.yaml
|
||||
# This manifest deploys the Calico node controller.
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
# The controller can only have a single active instance.
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: "{{ quay_registry }}/calico/kube-controllers:v3.6.2"
|
||||
env:
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
value: node
|
||||
- name: DATASTORE_TYPE
|
||||
value: kubernetes
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -r
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: calico/templates/calico-etcd-secrets.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-typha.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/configure-canal.yaml
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
# Overrides config file for kubelet
|
||||
KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}
|
@ -1,176 +0,0 @@
|
||||
# Multus Version v3.2
|
||||
# Based on:
|
||||
# https://github.com/intel/multus-cni/blob/release-v3/images/multus-daemonset.yml
|
||||
#
|
||||
# The following modifications have been made:
|
||||
#
|
||||
# - The multus CNI configuration file has been explicitly specified to ensure
|
||||
# it has a lower lexographic order than the calico CNI configuration file.
|
||||
#
|
||||
# - The configMap has been modified to work with Calico rather than Flannel
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: network-attachment-definitions.k8s.cni.cncf.io
|
||||
spec:
|
||||
group: k8s.cni.cncf.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: network-attachment-definitions
|
||||
singular: network-attachment-definition
|
||||
kind: NetworkAttachmentDefinition
|
||||
shortNames:
|
||||
- net-attach-def
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
config:
|
||||
type: string
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: multus
|
||||
rules:
|
||||
- apiGroups: ["k8s.cni.cncf.io"]
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/status
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: multus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: multus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: multus
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: multus
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: multus-cni-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "multus-cni-network",
|
||||
"type": "multus",
|
||||
"delegates": [
|
||||
{
|
||||
"cniVersion": "0.3.0",
|
||||
"name": "k8s-pod-network",
|
||||
"type": "calico",
|
||||
"masterplugin": true,
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": 1500,
|
||||
"ipam": {
|
||||
"type": "calico-ipam",
|
||||
"assign_ipv4": "{{ "true" if cluster_network_ipv4 else "false" }}",
|
||||
"assign_ipv6": "{{ "true" if cluster_network_ipv6 else "false" }}"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
|
||||
}
|
||||
}
|
||||
],
|
||||
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-amd64
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
image: "{{ docker_registry }}/nfvpe/multus:v3.2"
|
||||
env:
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /bin/bash
|
||||
- -cex
|
||||
- |
|
||||
#!/bin/bash
|
||||
sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/05-multus.conf > /usr/src/multus-cni/images/05-multus.conf
|
||||
/entrypoint.sh --multus-conf-file=/usr/src/multus-cni/images/05-multus.conf
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 05-multus.conf
|
@ -1,45 +0,0 @@
|
||||
# SRIOV-CNI Release v1
|
||||
# Based on:
|
||||
# https://github.com/intel/sriov-cni/blob/master/images/sriov-cni-daemonset.yaml
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-sriov-cni-ds-amd64
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: sriov-cni
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: sriov-cni
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: kube-sriov-cni
|
||||
image: "{{ docker_registry }}/starlingx/k8s-cni-sriov:master-centos-stable-latest"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
volumeMounts:
|
||||
- name: cnibin
|
||||
mountPath: /host/opt/cni/bin
|
||||
volumes:
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
@ -1,68 +0,0 @@
|
||||
# SRIOV device CNI plugin
|
||||
# Based on:
|
||||
# https://github.com/intel/sriov-cni/blob/master/images/sriov-cni-daemonset.yaml
|
||||
#
|
||||
# The following modifications have been made:
|
||||
#
|
||||
# - A nodeSelector of 'sriovdp' has been added to ensure the sriov device plugin
|
||||
# pods only run on appropriately labelled nodes.
|
||||
# - The config hostPath is explicitly set to 'File'
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: sriov-device-plugin
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-sriov-device-plugin-amd64
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: sriovdp
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: sriovdp
|
||||
spec:
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: amd64
|
||||
sriovdp: enabled
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: sriov-device-plugin
|
||||
containers:
|
||||
- name: kube-sriovdp
|
||||
image: "{{ docker_registry }}/starlingx/k8s-plugins-sriov-network-device:master-centos-stable-latest"
|
||||
args:
|
||||
- --log-level=10
|
||||
securityContext:
|
||||
privileged: false
|
||||
volumeMounts:
|
||||
- name: devicesock
|
||||
mountPath: /var/lib/kubelet/device-plugins/
|
||||
readOnly: false
|
||||
- name: sysfs
|
||||
mountPath: /sys
|
||||
readOnly: true
|
||||
- name: config
|
||||
mountPath: /etc/pcidp/config.json
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: devicesock
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins/
|
||||
- name: sysfs
|
||||
hostPath:
|
||||
path: /sys
|
||||
- name: config
|
||||
hostPath:
|
||||
path: /etc/pcidp/config.json
|
||||
type: File
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
tiller_img: gcr.io/kubernetes-helm/tiller:v2.13.1
|
||||
armada_img: quay.io/airshipit/armada:af8a9ffd0873c2fbc915794e235dbd357f2adab1
|
||||
source_helm_bind_dir: /opt/cgcs/helm_charts
|
||||
target_helm_bind_dir: /www/pages/helm_charts
|
||||
helm_repo_name_apps: starlingx
|
||||
helm_repo_name_platform: stx-platform
|
||||
kube_admin_yaml_template: /usr/share/puppet/modules/platform/templates/kubeadm.yaml.erb
|
||||
kubelet_override_template: /usr/share/puppet/modules/platform/templates/kube-stx-override.conf.erb
|
||||
kubelet_pmond_template: /usr/share/puppet/modules/platform/templates/kubelet-pmond-conf.erb
|
||||
lighttpd_conf_template: /usr/share/puppet/modules/openstack/templates/lighttpd.conf.erb
|
||||
lighttpd_inc_conf_template: /usr/share/puppet/modules/openstack/templates/lighttpd-inc.conf.erb
|
||||
cert_cnf_template: /usr/share/puppet/modules/platform/templates/registry-cert-extfile.erb
|
||||
insecure_docker_registry_template: /usr/share/puppet/modules/platform/templates/insecuredockerregistry.conf.erb
|
||||
cert_cnf_file: /etc/ssl/private/registry-cert-extfile.cnf
|
||||
registry_cert_key: /etc/ssl/private/registry-cert.key
|
||||
registry_cert_crt: /etc/ssl/private/registry-cert.crt
|
||||
registry_cert_pkcs1_key: /etc/ssl/private/registry-cert-pkcs1.key
|
||||
docker_cert_dir: /etc/docker/certs.d
|
||||
cgroup_root: /sys/fs/cgroup
|
||||
k8s_cgroup_name: k8s-infra
|
@ -1,793 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# OpenStack Keystone and Sysinv interactions
|
||||
#
|
||||
|
||||
import os
|
||||
import pyudev
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
# The following imports are to make use of the OpenStack cgtsclient and some
|
||||
# constants in controllerconfig. When it is time to remove/deprecate these
|
||||
# packages, classes OpenStack, Token and referenced constants need to be moved
|
||||
# to this standalone script.
|
||||
from controllerconfig.common import constants
|
||||
from controllerconfig import ConfigFail
|
||||
from controllerconfig import openstack
|
||||
from controllerconfig import sysinv_api as sysinv
|
||||
|
||||
from netaddr import IPNetwork
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
|
||||
try:
|
||||
from ConfigParser import ConfigParser
|
||||
except ImportError:
|
||||
from configparser import ConfigParser
|
||||
|
||||
|
||||
COMBINED_LOAD = 'All-in-one'
|
||||
RECONFIGURE_SYSTEM = False
|
||||
RECONFIGURE_NETWORK = False
|
||||
RECONFIGURE_SERVICE = False
|
||||
INITIAL_POPULATION = True
|
||||
CONF = ConfigParser()
|
||||
|
||||
|
||||
def wait_system_config(client):
|
||||
for _ in range(constants.SYSTEM_CONFIG_TIMEOUT):
|
||||
try:
|
||||
systems = client.sysinv.isystem.list()
|
||||
if systems:
|
||||
# only one system (default)
|
||||
return systems[0]
|
||||
except Exception:
|
||||
pass
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise ConfigFail('Timeout waiting for default system '
|
||||
'configuration')
|
||||
|
||||
|
||||
def populate_system_config(client):
|
||||
if not INITIAL_POPULATION and not RECONFIGURE_SYSTEM:
|
||||
return
|
||||
# Wait for pre-populated system
|
||||
system = wait_system_config(client)
|
||||
|
||||
if INITIAL_POPULATION:
|
||||
print("Populating system config...")
|
||||
else:
|
||||
print("Updating system config...")
|
||||
# Update system attributes
|
||||
capabilities = {'region_config': False,
|
||||
'vswitch_type': 'none',
|
||||
'shared_services': '[]',
|
||||
'sdn_enabled': False,
|
||||
'https_enabled': False}
|
||||
|
||||
values = {
|
||||
'system_mode': CONF.get('BOOTSTRAP_CONFIG', 'SYSTEM_MODE'),
|
||||
'capabilities': capabilities,
|
||||
'timezone': CONF.get('BOOTSTRAP_CONFIG', 'TIMEZONE'),
|
||||
'region_name': 'RegionOne',
|
||||
'service_project_name': 'services'
|
||||
}
|
||||
|
||||
if INITIAL_POPULATION:
|
||||
values.update(
|
||||
{'system_type': CONF.get('BOOTSTRAP_CONFIG', 'SYSTEM_TYPE')}
|
||||
)
|
||||
|
||||
patch = sysinv.dict_to_patch(values)
|
||||
client.sysinv.isystem.update(system.uuid, patch)
|
||||
|
||||
|
||||
def populate_load_config(client):
|
||||
if not INITIAL_POPULATION:
|
||||
return
|
||||
print("Populating load config...")
|
||||
patch = {'software_version': CONF.get('BOOTSTRAP_CONFIG', 'SW_VERSION'),
|
||||
'compatible_version': "N/A",
|
||||
'required_patches': "N/A"}
|
||||
client.sysinv.load.create(**patch)
|
||||
|
||||
|
||||
def delete_network_and_addrpool(client, network_name):
|
||||
networks = client.sysinv.network.list()
|
||||
network_uuid = addrpool_uuid = None
|
||||
for network in networks:
|
||||
if network.name == network_name:
|
||||
network_uuid = network.uuid
|
||||
addrpool_uuid = network.pool_uuid
|
||||
if network_uuid:
|
||||
print("Deleting network and address pool for network %s..." %
|
||||
network_name)
|
||||
host = client.sysinv.ihost.get('controller-0')
|
||||
host_addresses = client.sysinv.address.list_by_host(host.uuid)
|
||||
for addr in host_addresses:
|
||||
print("Deleting address %s" % addr.uuid)
|
||||
client.sysinv.address.delete(addr.uuid)
|
||||
client.sysinv.network.delete(network_uuid)
|
||||
client.sysinv.address_pool.delete(addrpool_uuid)
|
||||
|
||||
|
||||
def populate_mgmt_network(client):
|
||||
management_subnet = IPNetwork(
|
||||
CONF.get('BOOTSTRAP_CONFIG', 'MANAGEMENT_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'MANAGEMENT_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'MANAGEMENT_END_ADDRESS')
|
||||
dynamic_allocation = CONF.getboolean(
|
||||
'BOOTSTRAP_CONFIG', 'DYNAMIC_ADDRESS_ALLOCATION')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'mgmt')
|
||||
print("Updating management network...")
|
||||
else:
|
||||
print("Populating management network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'management',
|
||||
'network': str(management_subnet.network),
|
||||
'prefix': management_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
}
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_MGMT,
|
||||
'name': sysinv_constants.NETWORK_TYPE_MGMT,
|
||||
'dynamic': dynamic_allocation,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_pxeboot_network(client):
|
||||
pxeboot_subnet = IPNetwork(CONF.get('BOOTSTRAP_CONFIG', 'PXEBOOT_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'PXEBOOT_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'PXEBOOT_END_ADDRESS')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'pxeboot')
|
||||
print("Updating pxeboot network...")
|
||||
else:
|
||||
print("Populating pxeboot network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'pxeboot',
|
||||
'network': str(pxeboot_subnet.network),
|
||||
'prefix': pxeboot_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
}
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_PXEBOOT,
|
||||
'name': sysinv_constants.NETWORK_TYPE_PXEBOOT,
|
||||
'dynamic': True,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_infra_network(client):
|
||||
return
|
||||
|
||||
|
||||
def populate_oam_network(client):
|
||||
external_oam_subnet = IPNetwork(CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'EXTERNAL_OAM_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'EXTERNAL_OAM_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'EXTERNAL_OAM_END_ADDRESS')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'oam')
|
||||
print("Updating oam network...")
|
||||
else:
|
||||
print("Populating oam network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'oam',
|
||||
'network': str(external_oam_subnet.network),
|
||||
'prefix': external_oam_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
'floating_address': CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'EXTERNAL_OAM_FLOATING_ADDRESS'),
|
||||
}
|
||||
|
||||
system_mode = CONF.get('BOOTSTRAP_CONFIG', 'SYSTEM_MODE')
|
||||
if system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX:
|
||||
values.update({
|
||||
'controller0_address': CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'EXTERNAL_OAM_0_ADDRESS'),
|
||||
'controller1_address': CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'EXTERNAL_OAM_1_ADDRESS'),
|
||||
})
|
||||
values.update({
|
||||
'gateway_address': CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'EXTERNAL_OAM_GATEWAY_ADDRESS'),
|
||||
})
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_OAM,
|
||||
'name': sysinv_constants.NETWORK_TYPE_OAM,
|
||||
'dynamic': False,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_multicast_network(client):
|
||||
management_multicast_subnet = IPNetwork(CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'MANAGEMENT_MULTICAST_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'MANAGEMENT_MULTICAST_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'MANAGEMENT_MULTICAST_END_ADDRESS')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'multicast')
|
||||
print("Updating multicast network...")
|
||||
else:
|
||||
print("Populating multicast network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'multicast-subnet',
|
||||
'network': str(management_multicast_subnet.network),
|
||||
'prefix': management_multicast_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
}
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_MULTICAST,
|
||||
'name': sysinv_constants.NETWORK_TYPE_MULTICAST,
|
||||
'dynamic': False,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_cluster_host_network(client):
|
||||
cluster_host_subnet = IPNetwork(CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'CLUSTER_HOST_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'CLUSTER_HOST_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'CLUSTER_HOST_END_ADDRESS')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'cluster-host')
|
||||
print("Updating cluster host network...")
|
||||
else:
|
||||
print("Populating cluster host network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'cluster-host-subnet',
|
||||
'network': str(cluster_host_subnet.network),
|
||||
'prefix': cluster_host_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
}
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
'name': sysinv_constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
'dynamic': True,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_cluster_pod_network(client):
|
||||
cluster_pod_subnet = IPNetwork(CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'CLUSTER_POD_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'CLUSTER_POD_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'CLUSTER_POD_END_ADDRESS')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'cluster-pod')
|
||||
print("Updating cluster pod network...")
|
||||
else:
|
||||
print("Populating cluster pod network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'cluster-pod-subnet',
|
||||
'network': str(cluster_pod_subnet.network),
|
||||
'prefix': cluster_pod_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
}
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_CLUSTER_POD,
|
||||
'name': sysinv_constants.NETWORK_TYPE_CLUSTER_POD,
|
||||
'dynamic': False,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_cluster_service_network(client):
|
||||
cluster_service_subnet = IPNetwork(CONF.get(
|
||||
'BOOTSTRAP_CONFIG', 'CLUSTER_SERVICE_SUBNET'))
|
||||
start_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'CLUSTER_SERVICE_START_ADDRESS')
|
||||
end_address = CONF.get('BOOTSTRAP_CONFIG',
|
||||
'CLUSTER_SERVICE_END_ADDRESS')
|
||||
|
||||
if RECONFIGURE_NETWORK:
|
||||
delete_network_and_addrpool(client, 'cluster-service')
|
||||
print("Updating cluster service network...")
|
||||
else:
|
||||
print("Populating cluster service network...")
|
||||
|
||||
# create the address pool
|
||||
values = {
|
||||
'name': 'cluster-service-subnet',
|
||||
'network': str(cluster_service_subnet.network),
|
||||
'prefix': cluster_service_subnet.prefixlen,
|
||||
'ranges': [(start_address, end_address)],
|
||||
}
|
||||
pool = client.sysinv.address_pool.create(**values)
|
||||
|
||||
# create the network for the pool
|
||||
values = {
|
||||
'type': sysinv_constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
'name': sysinv_constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
'dynamic': False,
|
||||
'pool_uuid': pool.uuid,
|
||||
}
|
||||
client.sysinv.network.create(**values)
|
||||
|
||||
|
||||
def populate_network_config(client):
|
||||
if not INITIAL_POPULATION and not RECONFIGURE_NETWORK:
|
||||
return
|
||||
populate_mgmt_network(client)
|
||||
populate_pxeboot_network(client)
|
||||
populate_infra_network(client)
|
||||
populate_oam_network(client)
|
||||
populate_multicast_network(client)
|
||||
populate_cluster_host_network(client)
|
||||
populate_cluster_pod_network(client)
|
||||
populate_cluster_service_network(client)
|
||||
print("Network config completed.")
|
||||
|
||||
|
||||
def populate_dns_config(client):
|
||||
if not INITIAL_POPULATION and not RECONFIGURE_SYSTEM:
|
||||
return
|
||||
|
||||
if INITIAL_POPULATION:
|
||||
print("Populating DNS config...")
|
||||
else:
|
||||
print("Updating DNS config...")
|
||||
|
||||
nameservers = CONF.get('BOOTSTRAP_CONFIG', 'NAMESERVERS')
|
||||
|
||||
dns_list = client.sysinv.idns.list()
|
||||
dns_record = dns_list[0]
|
||||
values = {
|
||||
'nameservers': nameservers.rstrip(','),
|
||||
'action': 'apply'
|
||||
}
|
||||
patch = sysinv.dict_to_patch(values)
|
||||
client.sysinv.idns.update(dns_record.uuid, patch)
|
||||
|
||||
|
||||
def populate_docker_config(client):
|
||||
if not INITIAL_POPULATION and not RECONFIGURE_SERVICE:
|
||||
return
|
||||
|
||||
if INITIAL_POPULATION:
|
||||
print("Populating docker config...")
|
||||
else:
|
||||
print("Updating docker config...")
|
||||
|
||||
http_proxy = CONF.get('BOOTSTRAP_CONFIG', 'DOCKER_HTTP_PROXY')
|
||||
https_proxy = CONF.get('BOOTSTRAP_CONFIG', 'DOCKER_HTTPS_PROXY')
|
||||
no_proxy = CONF.get('BOOTSTRAP_CONFIG', 'DOCKER_NO_PROXY')
|
||||
|
||||
if http_proxy != 'undef' or https_proxy != 'undef':
|
||||
parameters = {}
|
||||
if http_proxy != 'undef':
|
||||
parameters['http_proxy'] = http_proxy
|
||||
if https_proxy != 'undef':
|
||||
parameters['https_proxy'] = https_proxy
|
||||
|
||||
parameters['no_proxy'] = no_proxy
|
||||
values = {
|
||||
'service': sysinv_constants.SERVICE_TYPE_DOCKER,
|
||||
'section': sysinv_constants.SERVICE_PARAM_SECTION_DOCKER_PROXY,
|
||||
'personality': None,
|
||||
'resource': None,
|
||||
'parameters': parameters
|
||||
}
|
||||
if RECONFIGURE_SERVICE:
|
||||
parameters = client.sysinv.service_parameter.list()
|
||||
for parameter in parameters:
|
||||
if (parameter.name == 'http_proxy' or
|
||||
parameter.name == 'https_proxy' or
|
||||
parameter.name == 'no_proxy'):
|
||||
client.sysinv.service_parameter.delete(parameter.uuid)
|
||||
client.sysinv.service_parameter.create(**values)
|
||||
print("Docker proxy config completed.")
|
||||
|
||||
use_default_registries = CONF.getboolean(
|
||||
'BOOTSTRAP_CONFIG', 'USE_DEFAULT_REGISTRIES')
|
||||
|
||||
if not use_default_registries:
|
||||
secure_registry = CONF.getboolean('BOOTSTRAP_CONFIG',
|
||||
'IS_SECURE_REGISTRY')
|
||||
parameters = {}
|
||||
|
||||
# TODO(tngo): The following 4 service parameters will be removed when
|
||||
# we switch to the long term solution using a single "registries"
|
||||
# service parameter that is extensible.
|
||||
parameters['k8s'] = CONF.get('BOOTSTRAP_CONFIG', 'K8S_REGISTRY')
|
||||
parameters['gcr'] = CONF.get('BOOTSTRAP_CONFIG', 'GCR_REGISTRY')
|
||||
parameters['quay'] = CONF.get('BOOTSTRAP_CONFIG', 'QUAY_REGISTRY')
|
||||
parameters['docker'] = CONF.get('BOOTSTRAP_CONFIG', 'DOCKER_REGISTRY')
|
||||
|
||||
if not secure_registry:
|
||||
parameters['insecure_registry'] = "True"
|
||||
|
||||
values = {
|
||||
'service': sysinv_constants.SERVICE_TYPE_DOCKER,
|
||||
'section': sysinv_constants.SERVICE_PARAM_SECTION_DOCKER_REGISTRY,
|
||||
'personality': None,
|
||||
'resource': None,
|
||||
'parameters': parameters
|
||||
}
|
||||
if RECONFIGURE_SERVICE:
|
||||
parameters = client.sysinv.service_parameter.list()
|
||||
for parameter in parameters:
|
||||
if (parameter.name == 'k8s' or
|
||||
parameter.name == 'gcr' or
|
||||
parameter.name == 'quay' or
|
||||
parameter.name == 'docker' or
|
||||
parameter.name == 'insecure_registry'):
|
||||
client.sysinv.service_parameter.delete(
|
||||
parameter.uuid)
|
||||
client.sysinv.service_parameter.create(**values)
|
||||
print("Docker registry config completed.")
|
||||
|
||||
|
||||
def get_management_mac_address():
|
||||
ifname = CONF.get('BOOTSTRAP_CONFIG', 'MANAGEMENT_INTERFACE')
|
||||
|
||||
try:
|
||||
filename = '/sys/class/net/%s/address' % ifname
|
||||
with open(filename, 'r') as f:
|
||||
return f.readline().rstrip()
|
||||
except Exception:
|
||||
raise ConfigFail("Failed to obtain mac address of %s" % ifname)
|
||||
|
||||
|
||||
def get_rootfs_node():
|
||||
"""Cloned from sysinv"""
|
||||
cmdline_file = '/proc/cmdline'
|
||||
device = None
|
||||
|
||||
with open(cmdline_file, 'r') as f:
|
||||
for line in f:
|
||||
for param in line.split():
|
||||
params = param.split("=", 1)
|
||||
if params[0] == "root":
|
||||
if "UUID=" in params[1]:
|
||||
key, uuid = params[1].split("=")
|
||||
symlink = "/dev/disk/by-uuid/%s" % uuid
|
||||
device = os.path.basename(os.readlink(symlink))
|
||||
else:
|
||||
device = os.path.basename(params[1])
|
||||
|
||||
if device is not None:
|
||||
if sysinv_constants.DEVICE_NAME_NVME in device:
|
||||
re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)')
|
||||
else:
|
||||
re_line = re.compile(r'^(\D*)')
|
||||
match = re_line.search(device)
|
||||
if match:
|
||||
return os.path.join("/dev", match.group(1))
|
||||
|
||||
return
|
||||
|
||||
|
||||
def find_boot_device():
|
||||
"""Determine boot device """
|
||||
boot_device = None
|
||||
|
||||
context = pyudev.Context()
|
||||
|
||||
# Get the boot partition
|
||||
# Unfortunately, it seems we can only get it from the logfile.
|
||||
# We'll parse the device used from a line like the following:
|
||||
# BIOSBoot.create: device: /dev/sda1 ; status: False ; type: biosboot ;
|
||||
# or
|
||||
# EFIFS.create: device: /dev/sda1 ; status: False ; type: efi ;
|
||||
#
|
||||
logfile = '/var/log/anaconda/storage.log'
|
||||
|
||||
re_line = re.compile(r'(BIOSBoot|EFIFS).create: device: ([^\s;]*)')
|
||||
boot_partition = None
|
||||
with open(logfile, 'r') as f:
|
||||
for line in f:
|
||||
match = re_line.search(line)
|
||||
if match:
|
||||
boot_partition = match.group(2)
|
||||
break
|
||||
if boot_partition is None:
|
||||
raise ConfigFail("Failed to determine the boot partition")
|
||||
|
||||
# Find the boot partition and get its parent
|
||||
for device in context.list_devices(DEVTYPE='partition'):
|
||||
if device.device_node == boot_partition:
|
||||
boot_device = device.find_parent('block').device_node
|
||||
break
|
||||
|
||||
if boot_device is None:
|
||||
raise ConfigFail("Failed to determine the boot device")
|
||||
|
||||
return boot_device
|
||||
|
||||
|
||||
def device_node_to_device_path(dev_node):
|
||||
device_path = None
|
||||
cmd = ["find", "-L", "/dev/disk/by-path/", "-samefile", dev_node]
|
||||
|
||||
try:
|
||||
out = subprocess.check_output(cmd)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Could not retrieve device information: %s" % e)
|
||||
return device_path
|
||||
|
||||
device_path = out.rstrip()
|
||||
return device_path
|
||||
|
||||
|
||||
def get_device_from_function(get_disk_function):
|
||||
device_node = get_disk_function()
|
||||
device_path = device_node_to_device_path(device_node)
|
||||
device = device_path if device_path else os.path.basename(device_node)
|
||||
|
||||
return device
|
||||
|
||||
|
||||
def get_console_info():
|
||||
"""Determine console info """
|
||||
cmdline_file = '/proc/cmdline'
|
||||
|
||||
re_line = re.compile(r'^.*\s+console=([^\s]*)')
|
||||
|
||||
with open(cmdline_file, 'r') as f:
|
||||
for line in f:
|
||||
match = re_line.search(line)
|
||||
if match:
|
||||
console_info = match.group(1)
|
||||
return console_info
|
||||
return ''
|
||||
|
||||
|
||||
def get_tboot_info():
|
||||
"""Determine whether we were booted with a tboot value """
|
||||
cmdline_file = '/proc/cmdline'
|
||||
|
||||
# tboot=true, tboot=false, or no tboot parameter expected
|
||||
re_line = re.compile(r'^.*\s+tboot=([^\s]*)')
|
||||
|
||||
with open(cmdline_file, 'r') as f:
|
||||
for line in f:
|
||||
match = re_line.search(line)
|
||||
if match:
|
||||
tboot = match.group(1)
|
||||
return tboot
|
||||
return ''
|
||||
|
||||
|
||||
def get_orig_install_mode():
|
||||
"""Determine original install mode, text vs graphical """
|
||||
# Post-install, the only way to detemine the original install mode
|
||||
# will be to check the anaconda install log for the parameters passed
|
||||
logfile = '/var/log/anaconda/anaconda.log'
|
||||
|
||||
search_str = 'Display mode = t'
|
||||
try:
|
||||
subprocess.check_call(['grep', '-q', search_str, logfile])
|
||||
return 'text'
|
||||
except subprocess.CalledProcessError:
|
||||
return 'graphical'
|
||||
|
||||
|
||||
def populate_controller_config(client):
|
||||
if not INITIAL_POPULATION:
|
||||
return
|
||||
|
||||
mgmt_mac = get_management_mac_address()
|
||||
print("Management mac = %s" % mgmt_mac)
|
||||
rootfs_device = get_device_from_function(get_rootfs_node)
|
||||
print("Root fs device = %s" % rootfs_device)
|
||||
boot_device = get_device_from_function(find_boot_device)
|
||||
print("Boot device = %s" % boot_device)
|
||||
console = get_console_info()
|
||||
print("Console = %s" % console)
|
||||
tboot = get_tboot_info()
|
||||
print("Tboot = %s" % tboot)
|
||||
install_output = get_orig_install_mode()
|
||||
print("Install output = %s" % install_output)
|
||||
|
||||
provision_state = sysinv.HOST_PROVISIONED
|
||||
system_type = CONF.get('BOOTSTRAP_CONFIG', 'SYSTEM_TYPE')
|
||||
if system_type == COMBINED_LOAD:
|
||||
provision_state = sysinv.HOST_PROVISIONING
|
||||
|
||||
values = {
|
||||
'personality': sysinv.HOST_PERSONALITY_CONTROLLER,
|
||||
'hostname': CONF.get('BOOTSTRAP_CONFIG', 'CONTROLLER_HOSTNAME'),
|
||||
'mgmt_ip': CONF.get('BOOTSTRAP_CONFIG', 'CONTROLLER_0_ADDRESS'),
|
||||
'mgmt_mac': mgmt_mac,
|
||||
'administrative': sysinv.HOST_ADMIN_STATE_LOCKED,
|
||||
'operational': sysinv.HOST_OPERATIONAL_STATE_DISABLED,
|
||||
'availability': sysinv.HOST_AVAIL_STATE_OFFLINE,
|
||||
'invprovision': provision_state,
|
||||
'rootfs_device': rootfs_device,
|
||||
'boot_device': boot_device,
|
||||
'console': console,
|
||||
'tboot': tboot,
|
||||
'install_output': install_output,
|
||||
}
|
||||
print("Host values = %s" % values)
|
||||
controller = client.sysinv.ihost.create(**values)
|
||||
return controller
|
||||
|
||||
|
||||
def wait_disk_config(client, host):
|
||||
count = 0
|
||||
for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10):
|
||||
try:
|
||||
disks = client.sysinv.idisk.list(host.uuid)
|
||||
if disks and count == len(disks):
|
||||
return disks
|
||||
count = len(disks)
|
||||
except Exception:
|
||||
pass
|
||||
if disks:
|
||||
time.sleep(1) # We don't need to wait that long
|
||||
else:
|
||||
time.sleep(10)
|
||||
else:
|
||||
raise ConfigFail('Timeout waiting for controller disk '
|
||||
'configuration')
|
||||
|
||||
|
||||
def wait_pv_config(client, host):
|
||||
count = 0
|
||||
for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10):
|
||||
try:
|
||||
pvs = client.sysinv.ipv.list(host.uuid)
|
||||
if pvs and count == len(pvs):
|
||||
return pvs
|
||||
count = len(pvs)
|
||||
except Exception:
|
||||
pass
|
||||
if pvs:
|
||||
time.sleep(1) # We don't need to wait that long
|
||||
else:
|
||||
time.sleep(10)
|
||||
else:
|
||||
raise ConfigFail('Timeout waiting for controller PV '
|
||||
'configuration')
|
||||
|
||||
|
||||
def inventory_config_complete_wait(client, controller):
|
||||
# Wait for sysinv-agent to populate disks and PVs
|
||||
if not INITIAL_POPULATION:
|
||||
return
|
||||
|
||||
wait_disk_config(client, controller)
|
||||
wait_pv_config(client, controller)
|
||||
|
||||
|
||||
def populate_default_storage_backend(client, controller):
|
||||
if not INITIAL_POPULATION:
|
||||
return
|
||||
|
||||
print("Populating ceph-mon config for controller-0...")
|
||||
values = {'ihost_uuid': controller.uuid}
|
||||
client.sysinv.ceph_mon.create(**values)
|
||||
|
||||
print("Populating ceph storage backend config...")
|
||||
values = {'confirmed': True}
|
||||
client.sysinv.storage_ceph.create(**values)
|
||||
|
||||
|
||||
def handle_invalid_input():
|
||||
raise Exception("Invalid input!\nUsage: <bootstrap-config-file> "
|
||||
"[--system] [--network] [--service]")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
argc = len(sys.argv)
|
||||
if argc < 2 or argc > 5:
|
||||
print("Failed")
|
||||
handle_invalid_input()
|
||||
|
||||
arg = 2
|
||||
while arg < argc:
|
||||
if sys.argv[arg] == "--system":
|
||||
RECONFIGURE_SYSTEM = True
|
||||
elif sys.argv[arg] == "--network":
|
||||
RECONFIGURE_NETWORK = True
|
||||
elif sys.argv[arg] == "--service":
|
||||
RECONFIGURE_SERVICE = True
|
||||
else:
|
||||
handle_invalid_input()
|
||||
arg += 1
|
||||
|
||||
INITIAL_POPULATION = not (RECONFIGURE_SYSTEM or RECONFIGURE_NETWORK or
|
||||
RECONFIGURE_SERVICE)
|
||||
|
||||
config_file = sys.argv[1]
|
||||
if not os.path.exists(config_file):
|
||||
raise Exception("Config file is not found!")
|
||||
|
||||
CONF.read(config_file)
|
||||
|
||||
# Puppet manifest might be applied as part of initial host
|
||||
# config, set INITIAL_CONFIG_PRIMARY variable just in case.
|
||||
os.environ["INITIAL_CONFIG_PRIMARY"] = "true"
|
||||
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
populate_system_config(client)
|
||||
populate_load_config(client)
|
||||
populate_network_config(client)
|
||||
populate_dns_config(client)
|
||||
populate_docker_config(client)
|
||||
controller = populate_controller_config(client)
|
||||
inventory_config_complete_wait(client, controller)
|
||||
populate_default_storage_backend(client, controller)
|
||||
os.remove(config_file)
|
||||
if INITIAL_POPULATION:
|
||||
print("Successfully updated the initial system config.")
|
||||
else:
|
||||
print("Successfully provisioned the initial system config.")
|
||||
except Exception:
|
||||
# Print the marker string for Ansible and re raise the exception
|
||||
if INITIAL_POPULATION:
|
||||
print("Failed to update the initial system config.")
|
||||
else:
|
||||
print("Failed to provision the initial system config.")
|
||||
raise
|
@ -1,240 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role is to persist the bootstrap configurations on filesystem and
|
||||
# system inventory database.
|
||||
#
|
||||
|
||||
# Keyring config
|
||||
- block:
|
||||
- name: Delete the previous python_keyring directory if exists
|
||||
file:
|
||||
path: "{{ keyring_permdir + '/' + keyring_workdir | basename }}"
|
||||
state: absent
|
||||
|
||||
- name: Persist keyring data
|
||||
command: "mv {{ keyring_workdir }} {{ keyring_permdir }}"
|
||||
when: save_password
|
||||
|
||||
- name: Ensure replicated config parent directory exists
|
||||
file:
|
||||
path: "{{ config_permdir }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Get list of new config files
|
||||
find:
|
||||
paths: "{{ config_workdir }}"
|
||||
file_type: any
|
||||
register: config_find
|
||||
|
||||
- name: Remove existing config files from permanent location
|
||||
file:
|
||||
path: "{{ config_permdir }}/{{ item.path | basename}}"
|
||||
state: absent
|
||||
with_items: "{{ config_find.files }}"
|
||||
|
||||
- name: Move new config files to permanent location
|
||||
# Can't use command module due to wildcard
|
||||
shell: mv {{ config_workdir }}/* {{ config_permdir }}
|
||||
|
||||
- name: Delete working config directory
|
||||
file:
|
||||
path: "{{ config_workdir }}"
|
||||
state: absent
|
||||
|
||||
# Postgres, PXE, Branding, Grub config tasks and filesystem resizing are
|
||||
# moved to a separate file as they don't need to be executed again on replay.
|
||||
- include: one_time_config_tasks.yml
|
||||
when: not reconfigured
|
||||
|
||||
- block:
|
||||
- name: Set input parameters to populate config script
|
||||
set_fact:
|
||||
script_input: "{{ config_permdir + '/' + bootstrap_config_file|basename }}"
|
||||
|
||||
- name: Update input parameters with reconfigure system flag
|
||||
set_fact:
|
||||
script_input: "{{ script_input + ' --system' }}"
|
||||
when: system_config_update
|
||||
|
||||
- name: Update input parameters with reconfigure network flag
|
||||
set_fact:
|
||||
script_input: "{{ script_input + ' --network' }}"
|
||||
when: network_config_update
|
||||
|
||||
- name: Update input parameters with reconfigure service flag
|
||||
set_fact:
|
||||
script_input: "{{ script_input + ' --service' }}"
|
||||
when: docker_config_update
|
||||
|
||||
- name: Update input parameters if config from previous play is missing
|
||||
set_fact:
|
||||
script_input: "{{ script_input + ' --system --network --service' }}"
|
||||
when: reconfigured and not last_config_file.stat.exists
|
||||
|
||||
- debug: var=script_input
|
||||
|
||||
- name: Remove the endpoint reconfig flag before reconfiguring the service endpoints
|
||||
file:
|
||||
path: /etc/platform/.service_endpoint_reconfigured
|
||||
state: absent
|
||||
when: reconfigure_endpoints
|
||||
|
||||
- name: Shuting down services for reconfiguration
|
||||
include: shutdown_services.yml
|
||||
when: restart_services
|
||||
|
||||
- name: Saving config in sysinv database
|
||||
script: populate_initial_config.py {{ script_input }}
|
||||
register: populate_result
|
||||
failed_when: false
|
||||
|
||||
- debug: var=populate_result
|
||||
|
||||
- name: Fail if populate config script throws an exception
|
||||
fail:
|
||||
msg: "Failed to provision initial system configuration."
|
||||
when: populate_result.rc != 0
|
||||
|
||||
- block:
|
||||
- name: Add management floating address if this is the initial play
|
||||
command: ip addr add {{ mgmt_floating_virtual }} dev lo scope host
|
||||
when: not replayed
|
||||
|
||||
# If this is initial play or replay with management and/or oam network config change, must
|
||||
# wait for the keystone endpoint runtime manifest to complete and restart
|
||||
# sysinv agent and api.
|
||||
- name: Wait for service endpoints reconfiguration to complete
|
||||
wait_for:
|
||||
path: /etc/platform/.service_endpoint_reconfigured
|
||||
state: present
|
||||
timeout: 360
|
||||
msg: Timeout waiting for service endpoints reconfiguration to complete
|
||||
|
||||
- name: Update sysinv API bind host with new management floating IP
|
||||
replace:
|
||||
path: /etc/sysinv/sysinv.conf
|
||||
regexp: "sysinv_api_bind_ip=.*$"
|
||||
replace: "sysinv_api_bind_ip={{ controller_floating_address }}"
|
||||
|
||||
- name: Restart sysinv-agent and sysinv-api to pick up sysinv.conf update
|
||||
command: "{{ item }}"
|
||||
with_items:
|
||||
- /etc/init.d/sysinv-agent restart
|
||||
- /usr/lib/ocf/resource.d/platform/sysinv-api reload
|
||||
environment:
|
||||
OCF_ROOT: "/usr/lib/ocf"
|
||||
|
||||
when: not replayed or reconfigure_endpoints
|
||||
|
||||
- block:
|
||||
- name: Ensure docker config directory exists
|
||||
file:
|
||||
path: /etc/systemd/system/docker.service.d
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Ensure docker proxy config exists
|
||||
copy:
|
||||
content: ""
|
||||
dest: "{{ docker_proxy_conf }}"
|
||||
force: no
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
remote_src: yes
|
||||
|
||||
- name: Write header to docker proxy conf file
|
||||
lineinfile:
|
||||
path: "{{ docker_proxy_conf }}"
|
||||
line: "[Service]"
|
||||
|
||||
- name: Add http proxy URL to docker proxy conf file
|
||||
lineinfile:
|
||||
path: "{{ docker_proxy_conf }}"
|
||||
line: "Environment='HTTP_PROXY={{ docker_http_proxy }}'"
|
||||
when: docker_http_proxy != 'undef'
|
||||
|
||||
- name: Add https proxy URL to docker proxy conf file
|
||||
lineinfile:
|
||||
path: "{{ docker_proxy_conf }}"
|
||||
line: "Environment='HTTPS_PROXY={{ docker_https_proxy }}'"
|
||||
when: docker_https_proxy != 'undef'
|
||||
|
||||
- name: Add no proxy address list to docker proxy config file
|
||||
lineinfile:
|
||||
path: "{{ docker_proxy_conf }}"
|
||||
line: "Environment='NO_PROXY={{ docker_no_proxy_combined | join(',') }}'"
|
||||
|
||||
- name: Restart Docker
|
||||
systemd:
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
name: docker
|
||||
|
||||
when: use_docker_proxy
|
||||
|
||||
when: save_config
|
||||
|
||||
# PXE boot files
|
||||
- name: Set pxeboot files source if address allocation is dynamic
|
||||
set_fact:
|
||||
pxe_default: pxelinux.cfg.files/default
|
||||
pxe_grub_cfg: pxelinux.cfg.files/grub.cfg
|
||||
when: dynamic_address_allocation
|
||||
|
||||
- name: Set pxeboot files source if address allocation is static
|
||||
set_fact:
|
||||
pxe_default: pxelinux.cfg.files/default.static
|
||||
pxe_grub_cfg: pxelinux.cfg.files/grub.cfg.static
|
||||
when: not dynamic_address_allocation
|
||||
|
||||
- name: Set pxeboot files symlinks
|
||||
file:
|
||||
src: "/pxeboot/{{ item.src }}"
|
||||
dest: "/pxeboot/{{ item.dest }}"
|
||||
state: link
|
||||
force: yes
|
||||
with_items:
|
||||
- { src: '{{ pxe_default }}', dest: 'pxelinux.cfg/default' }
|
||||
- { src: '{{ pxe_grub_cfg }}', dest: 'pxelinux.cfg/grub.cfg' }
|
||||
|
||||
- name: Update the management_interface in platform.conf
|
||||
lineinfile:
|
||||
path: /etc/platform/platform.conf
|
||||
regexp: "management_interface"
|
||||
line: "management_interface=lo"
|
||||
|
||||
- name: Add new entries to platform.conf
|
||||
lineinfile:
|
||||
path: /etc/platform/platform.conf
|
||||
line: "{{ item }}"
|
||||
with_items:
|
||||
- region_config=no
|
||||
- sw_version={{ software_version }}
|
||||
- vswitch_type=none
|
||||
|
||||
- name: Update resolv.conf with list of dns servers
|
||||
lineinfile:
|
||||
path: /etc/resolv.conf
|
||||
line: "nameserver {{ item }}"
|
||||
with_items: "{{ dns_servers }}"
|
||||
|
||||
- name: Remove localhost address from resolv.conf
|
||||
lineinfile:
|
||||
path: /etc/resolv.conf
|
||||
regex: "nameserver ::1"
|
||||
state: absent
|
||||
|
||||
- name: Invalidate name service caching server
|
||||
command: nscd -i hosts
|
@ -1,127 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# - Save Postgres config to replicated filesystem for mate
|
||||
# - Save branding config to replicated filesystem
|
||||
# - Set up symlink for PXE boot
|
||||
# - Add default security feature to kernel command line parameters
|
||||
# - Resize some filesytems
|
||||
#
|
||||
|
||||
- name: Set Postgres, PXE, branding config directory fact
|
||||
set_fact:
|
||||
postgres_config_dir: "{{ config_permdir + '/postgresql' }}"
|
||||
pxe_config_dir: "{{ config_permdir + '/pxelinux.cfg' }}"
|
||||
branding_config_dir: "{{ config_permdir + '/branding' }}"
|
||||
|
||||
- debug:
|
||||
msg: >-
|
||||
postgres_config_dir: {{ postgres_config_dir }}
|
||||
pxe_config_dir: {{ pxe_config_dir }}
|
||||
branding_config_dir: {{ pxe_config_dir }}
|
||||
|
||||
- name: Ensure Postres, PXE config directories exist
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
with_items:
|
||||
- "{{ postgres_config_dir }}"
|
||||
- "{{ pxe_config_dir }}"
|
||||
|
||||
- name: Get list of Postgres conf files
|
||||
find:
|
||||
paths: /etc/postgresql
|
||||
patterns: '*.conf'
|
||||
register: postgres_result
|
||||
|
||||
- name: Copy postgres conf files for mate
|
||||
copy:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ postgres_config_dir }}"
|
||||
mode: preserve
|
||||
owner: postgres
|
||||
group: postgres
|
||||
remote_src: yes
|
||||
# with_fileglob can only be used for local lookup
|
||||
# with_fileglob:
|
||||
# - /etc/postgresql/*
|
||||
with_items:
|
||||
- "{{ postgres_result.files }}"
|
||||
|
||||
- name: Create a symlink to PXE config files
|
||||
file:
|
||||
src: "{{ pxe_config_dir }}"
|
||||
dest: /pxeboot/pxelinux.cfg
|
||||
state: link
|
||||
|
||||
- name: Check if copying of branding files for mate is required
|
||||
stat:
|
||||
path: /opt/branding
|
||||
register: branding_result
|
||||
|
||||
- block:
|
||||
- name: Ensure branding config directory exists
|
||||
file:
|
||||
path: "{{ branding_config_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Check if branding tar files exist (there should be only one)
|
||||
find:
|
||||
paths: /opt/branding
|
||||
patterns: '*.tgz'
|
||||
register: bt_result
|
||||
|
||||
- name: Copy branding tar files
|
||||
copy:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ branding_config_dir }}"
|
||||
remote_src: yes
|
||||
mode: preserve
|
||||
with_items:
|
||||
- "{{ bt_result.files }}"
|
||||
|
||||
when: branding_result.stat.exists and branding_result.stat.isdir
|
||||
|
||||
- name: Get grub default kernel
|
||||
command: grubby --default-kernel
|
||||
register: grub_kernel_output
|
||||
|
||||
- name: Add default security feature to kernel parameters
|
||||
command: "{{ item }}"
|
||||
with_items:
|
||||
- "grubby --update-kernel={{ grub_kernel_output.stdout_lines[0] }} --args='{{ default_security_feature }}'"
|
||||
- "grubby --efi --update-kernel={{ grub_kernel_output.stdout_lines[0] }} --args='{{ default_security_feature }}'"
|
||||
|
||||
- name: Resize filesystems (default)
|
||||
command: "{{ item }}"
|
||||
with_items:
|
||||
- lvextend -L20G /dev/cgts-vg/pgsql-lv
|
||||
- lvextend -L10G /dev/cgts-vg/cgcs-lv
|
||||
- lvextend -L16G /dev/cgts-vg/dockerdistribution-lv
|
||||
- lvextend -L40G /dev/cgts-vg/backup-lv
|
||||
- drbdadm -- --assume-peer-has-space resize all
|
||||
- resize2fs /dev/drbd0
|
||||
- resize2fs /dev/drbd3
|
||||
- resize2fs /dev/drbd8
|
||||
|
||||
- name: Further resize if root disk size is larger than 240G
|
||||
command: "{{ item }}"
|
||||
with_items:
|
||||
- lvextend -L40G /dev/cgts-vg/pgsql-lv
|
||||
- lvextend -L20G /dev/cgts-vg/cgcs-lv
|
||||
- lvextend -L50G /dev/cgts-vg/backup-lv
|
||||
- drbdadm -- --assume-peer-has-space resize all
|
||||
- resize2fs /dev/drbd0
|
||||
- resize2fs /dev/drbd3
|
||||
when: root_disk_size|int > minimum_root_disk_size
|
@ -1,88 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Shut down flock services, helm, kubernetes and revert configurations
|
||||
# against loopback interface upon network/docker config changes.
|
||||
#
|
||||
|
||||
- block: # Shut down essential flock services
|
||||
- name: Shutdown Maintenance services
|
||||
command: /usr/lib/ocf/resource.d/platform/mtcAgent stop
|
||||
|
||||
- name: Shutdown FM services
|
||||
command: "{{ item }}"
|
||||
with_items:
|
||||
- /etc/init.d/fminit stop
|
||||
- /etc/init.d/fm-api stop
|
||||
|
||||
environment:
|
||||
OCF_ROOT: "/usr/lib/ocf"
|
||||
OCF_RESKEY_state: "active"
|
||||
|
||||
|
||||
- block: # Revert changes done by kubeadm init, clear data cache
|
||||
- name: Shut down and remove Kubernetes components
|
||||
command: kubeadm reset -f
|
||||
|
||||
- name: Clear etcd data cache
|
||||
shell: /bin/rm -rf /opt/etcd/{{ software_version }}/controller.etcd/*
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Restart etcd
|
||||
systemd:
|
||||
name: etcd
|
||||
state: restarted
|
||||
|
||||
# Revert configuration to loopback interface
|
||||
- block:
|
||||
- name: Set facts derived from previous network configurations
|
||||
set_fact:
|
||||
prev_management_subnet_prefix: "{{ prev_management_subnet | ipaddr('prefix') }}"
|
||||
prev_controller_floating_address: "{{ (prev_management_subnet | ipaddr(2)).split('/')[0] if prev_management_start_address == 'derived' else prev_management_start_address }}"
|
||||
prev_cluster_floating_address: "{{ (prev_cluster_host_subnet | ipaddr(2)).split('/')[0] if prev_cluster_host_start_address == 'derived' else prev_cluster_host_start_address }}"
|
||||
prev_cluster_subnet_prefix: "{{ prev_cluster_host_subnet | ipaddr('prefix') }}"
|
||||
prev_controller_pxeboot_floating_address: "{{ (prev_pxeboot_subnet | ipaddr(2)).split('/')[0] if prev_pxeboot_start_address == 'derived' else prev_pxeboot_start_address }}"
|
||||
prev_pxe_subnet_prefix: "{{ prev_pxeboot_subnet | ipaddr('prefix') }}"
|
||||
|
||||
- name: Set facts derived from previous floating addresses
|
||||
set_fact:
|
||||
prev_controller_0_address: "{{ prev_controller_floating_address|ipmath(1) }}"
|
||||
prev_controller_0_cluster_host: "{{ prev_cluster_floating_address|ipmath(1) }}"
|
||||
|
||||
- name: Set facts for the removal of addresses assigned to loopback interface
|
||||
set_fact:
|
||||
prev_mgmt_nfs_1_virtual: "{{ prev_controller_floating_address|ipmath(3) }}/{{ prev_management_subnet_prefix }}"
|
||||
prev_mgmt_nfs_2_virtual: "{{ prev_controller_floating_address|ipmath(4) }}/{{ prev_management_subnet_prefix }}"
|
||||
prev_mgmt_floating_virtual: "{{ prev_controller_floating_address }}/{{ prev_management_subnet_prefix }}"
|
||||
prev_cluster_floating_virtual: "{{ prev_cluster_floating_address }}/{{ prev_cluster_subnet_prefix }}"
|
||||
prev_pxe_virtual: "{{ prev_controller_pxeboot_floating_address }}/{{ prev_pxe_subnet_prefix }}"
|
||||
prev_mgmt_virtual: "{{ prev_controller_0_address }}/{{ prev_management_subnet_prefix }}"
|
||||
prev_cluster_virtual: "{{ prev_controller_0_cluster_host }}/{{ prev_cluster_subnet_prefix }}"
|
||||
|
||||
# Remove previous addresses associated with lo interface except the previous mgmt floating address
|
||||
# as we still need sysinv-api to be reachable at the previous address until the service endpoints
|
||||
# are reconfigured.
|
||||
- name: Remove loopback interface in sysinv db and associated addresses
|
||||
shell: "{{ item }}"
|
||||
with_items:
|
||||
- source /etc/platform/openrc; system host-if-delete controller-0 lo
|
||||
- "ip addr delete {{ prev_mgmt_nfs_2_virtual }} dev lo scope host"
|
||||
- "ip addr delete {{ prev_mgmt_nfs_1_virtual }} dev lo scope host"
|
||||
- "ip addr delete {{ prev_cluster_floating_virtual }} dev lo scope host"
|
||||
- "ip addr delete {{ prev_pxe_virtual }} dev lo scope host"
|
||||
- "ip addr delete {{ prev_mgmt_virtual }} brd {{ management_broadcast }} dev lo:1 scope host"
|
||||
- "ip addr delete {{ prev_cluster_virtual }} brd {{ cluster_broadcast }} dev lo:5 scope host"
|
||||
|
||||
- block:
|
||||
# Enable the new management floating address so that sysinv-api is reachable at this IP when
|
||||
# service endpoints have been reconfigured and sysinv-api restarted.
|
||||
- name: Add the new management address for service endpoints reconfiguration
|
||||
command: ip addr add {{ mgmt_floating_virtual }} dev lo scope host
|
||||
when: mgmt_floating_virtual != prev_mgmt_floating_virtual
|
||||
|
||||
when: reconfigure_endpoints
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
keyring_workdir: /tmp/python_keyring
|
||||
docker_proxy_conf: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||
minimum_root_disk_size: 240
|
||||
default_security_feature: "nopti nospectre_v2"
|
@ -1,96 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import os
|
||||
import pyudev
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
DEVICE_NAME_NVME = 'nvme'
|
||||
|
||||
|
||||
def get_rootfs_node():
|
||||
"""Cloned from sysinv"""
|
||||
cmdline_file = '/proc/cmdline'
|
||||
device = None
|
||||
|
||||
with open(cmdline_file, 'r') as f:
|
||||
for line in f:
|
||||
for param in line.split():
|
||||
params = param.split("=", 1)
|
||||
if params[0] == "root":
|
||||
if "UUID=" in params[1]:
|
||||
key, uuid = params[1].split("=")
|
||||
symlink = "/dev/disk/by-uuid/%s" % uuid
|
||||
device = os.path.basename(os.readlink(symlink))
|
||||
else:
|
||||
device = os.path.basename(params[1])
|
||||
|
||||
if device is not None:
|
||||
if DEVICE_NAME_NVME in device:
|
||||
re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)')
|
||||
else:
|
||||
re_line = re.compile(r'^(\D*)')
|
||||
match = re_line.search(device)
|
||||
if match:
|
||||
return os.path.join("/dev", match.group(1))
|
||||
|
||||
return
|
||||
|
||||
|
||||
def parse_fdisk(device_node):
|
||||
"""Cloned/modified from sysinv"""
|
||||
# Run command
|
||||
fdisk_command = ('fdisk -l %s 2>/dev/null | grep "Disk %s:"' %
|
||||
(device_node, device_node))
|
||||
fdisk_process = subprocess.Popen(fdisk_command, stdout=subprocess.PIPE,
|
||||
shell=True)
|
||||
fdisk_output = fdisk_process.stdout.read()
|
||||
|
||||
# Parse output
|
||||
secnd_half = fdisk_output.split(',')[1]
|
||||
size_bytes = secnd_half.split()[0].strip()
|
||||
|
||||
# Convert bytes to GiB (1 GiB = 1024*1024*1024 bytes)
|
||||
int_size = int(size_bytes)
|
||||
size_gib = int_size / 1073741824
|
||||
|
||||
return int(size_gib)
|
||||
|
||||
|
||||
def get_root_disk_size():
|
||||
"""Get size of the root disk """
|
||||
context = pyudev.Context()
|
||||
rootfs_node = get_rootfs_node()
|
||||
print(rootfs_node)
|
||||
size_gib = 0
|
||||
|
||||
for device in context.list_devices(DEVTYPE='disk'):
|
||||
# /dev/nvmeXn1 259 are for NVME devices
|
||||
major = device['MAJOR']
|
||||
if (major == '8' or major == '3' or major == '253' or
|
||||
major == '259'):
|
||||
devname = device['DEVNAME']
|
||||
if devname == rootfs_node:
|
||||
try:
|
||||
size_gib = parse_fdisk(devname)
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
return size_gib
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
raise Exception("Invalid input!")
|
||||
|
||||
rds = get_root_disk_size()
|
||||
print(rds)
|
||||
if rds < int(sys.argv[1]):
|
||||
raise Exception("Failed validation!")
|
@ -1,4 +0,0 @@
|
||||
- name: 'Fail if cgts-vg group is not found'
|
||||
fail: msg='Volume groups not configured.'
|
||||
when: vg_result.rc != 0
|
||||
listen: 'volume group check'
|
@ -1,490 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role is to check the target host environment before proceeding to
|
||||
# the next step.
|
||||
#
|
||||
|
||||
# Check host connectivity, change password if provided
|
||||
- block:
|
||||
- name: Set SSH port
|
||||
set_fact:
|
||||
ansible_port: "{{ ansible_port | default(22) }}"
|
||||
|
||||
- name: Update SSH known hosts
|
||||
lineinfile:
|
||||
path: ~/.ssh/known_hosts
|
||||
state: absent
|
||||
regexp: '^{{ ansible_host }}|^\[{{ ansible_host }}\]:{{ ansible_port }}'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Check connectivity
|
||||
local_action: command ping -c 1 {{ ansible_host }}
|
||||
failed_when: false
|
||||
register: ping_result
|
||||
|
||||
- name: Fail if host is unreachable
|
||||
fail: msg='Host {{ ansible_host }} is unreachable!'
|
||||
with_items:
|
||||
- "{{ ping_result.stdout_lines|list }}"
|
||||
when: ping_result.rc != 0 and item is search('100% packet loss')
|
||||
|
||||
- block:
|
||||
- debug:
|
||||
msg: "Changing the initial password.."
|
||||
- name: Change initial password
|
||||
expect:
|
||||
echo: yes
|
||||
command: "ssh -p {{ ansible_port }} {{ ansible_ssh_user }}@{{ ansible_host }}"
|
||||
responses: "{{ password_change_responses }}"
|
||||
failed_when: false
|
||||
delegate_to: localhost
|
||||
|
||||
rescue:
|
||||
# Initial password has been changed and the user forgot to exclude
|
||||
# password_change option in the command line for the replay.
|
||||
- debug:
|
||||
msg: "Password has already been changed"
|
||||
|
||||
when: change_password
|
||||
|
||||
when: inventory_hostname != 'localhost'
|
||||
|
||||
# Check for one of unmistakenly StarlingX packages
|
||||
- name: "Look for unmistakenly {{ image_brand }} package"
|
||||
command: rpm -q controllerconfig
|
||||
args:
|
||||
warn: false
|
||||
failed_when: false
|
||||
register: controllerconfig_installed
|
||||
|
||||
- name: Fail if the become password is incorrect
|
||||
fail:
|
||||
msg: >-
|
||||
The provided sudo password does not match with {{ ansible_ssh_user }} password!.
|
||||
If {{ ansible_ssh_user }} password differs from StarlingX default ansible_become_pass
|
||||
(St8rlingX*), please overwrite the default ansible_become_pass either in host/site
|
||||
secret/override file or at the command line using playbook --extra-vars option.
|
||||
when: controllerconfig_installed.module_stderr is defined and
|
||||
controllerconfig_installed.module_stderr is search(' incorrect password attempts')
|
||||
|
||||
- name: Fail if host is not running the right image
|
||||
fail: msg='Host {{ ansible_host }} does not have the right image!.'
|
||||
when: controllerconfig_installed.rc > 0
|
||||
|
||||
# Bail if the host has been unlocked
|
||||
- name: Check initial config flag
|
||||
stat:
|
||||
path: /etc/platform/.initial_config_complete
|
||||
register: initial_config_complete
|
||||
|
||||
- block:
|
||||
- name: Set skip_play flag for host
|
||||
set_fact:
|
||||
skip_play: true
|
||||
|
||||
- name: Skip remaining tasks if host is already unlocked
|
||||
debug: msg="Host {{ ansible_host }} has been unlocked. There's nothing to play!"
|
||||
|
||||
- name: Stop playing if this is the only target host
|
||||
meta: end_play
|
||||
when: play_hosts | length == 1
|
||||
|
||||
when: initial_config_complete.stat.exists
|
||||
|
||||
|
||||
# Proceed only if skip_play flag is not turned on
|
||||
- block:
|
||||
# The following parameters should exist in default.yml. If any of them is
|
||||
# not available, the file is invalid.
|
||||
- name: Fail if any of the mandatory configurations are not defined
|
||||
fail:
|
||||
msg: "Mandatory configuration parameter {{ item }} is not defined."
|
||||
when: item is not defined
|
||||
with_items:
|
||||
- system_mode
|
||||
- timezone
|
||||
- pxeboot_subnet
|
||||
- management_subnet
|
||||
- cluster_host_subnet
|
||||
- cluster_pod_subnet
|
||||
- cluster_service_subnet
|
||||
- external_oam_subnet
|
||||
- external_oam_gateway_address
|
||||
- external_oam_floating_address
|
||||
- management_multicast_subnet
|
||||
- dynamic_address_allocation
|
||||
- dns_servers
|
||||
- docker_registries
|
||||
- admin_username
|
||||
- admin_password
|
||||
- override_files_dir
|
||||
|
||||
- name: Set initial address facts if not defined. They will be updated later
|
||||
set_fact:
|
||||
pxeboot_start_address: "{{ pxeboot_start_address | default('derived') }}"
|
||||
pxeboot_end_address: "{{ pxeboot_end_address | default('derived') }}"
|
||||
management_start_address: "{{ management_start_address | default('derived') }}"
|
||||
management_end_address: "{{ management_end_address | default('derived') }}"
|
||||
cluster_host_start_address: "{{ cluster_host_start_address | default('derived') }}"
|
||||
cluster_host_end_address: "{{ cluster_host_end_address | default('derived') }}"
|
||||
cluster_pod_start_address: "{{ cluster_pod_start_address | default('derived') }}"
|
||||
cluster_pod_end_address: "{{ cluster_pod_end_address | default('derived') }}"
|
||||
cluster_service_start_address: "{{ cluster_service_start_address | default('derived') }}"
|
||||
cluster_service_end_address: "{{ cluster_service_end_address | default('derived') }}"
|
||||
external_oam_start_address: "{{ external_oam_start_address | default('derived') }}"
|
||||
external_oam_end_address: "{{ external_oam_end_address | default('derived') }}"
|
||||
management_multicast_start_address: "{{ management_multicast_start_address | default('derived') }}"
|
||||
management_multicast_end_address: "{{ management_multicast_end_address | default('derived') }}"
|
||||
external_oam_node_0_address: "{{ external_oam_node_0_address | default('derived') }}"
|
||||
external_oam_node_1_address: "{{ external_oam_node_1_address | default('derived') }}"
|
||||
|
||||
- name: Set default registries dictionary
|
||||
set_fact:
|
||||
default_docker_registries:
|
||||
k8s.gcr.io: k8s.gcr.io
|
||||
gcr.io: gcr.io
|
||||
quay.io: quay.io
|
||||
docker.io: docker.io
|
||||
|
||||
- name: Merge user and default registries dictionaries
|
||||
set_fact:
|
||||
docker_registries: "{{ default_docker_registries | combine(docker_registries) }}"
|
||||
|
||||
- name: Initialize some flags to be used in subsequent roles/tasks
|
||||
set_fact:
|
||||
reconfigured: false
|
||||
system_config_update: false
|
||||
network_config_update: false
|
||||
docker_config_update: false
|
||||
save_password: true
|
||||
save_config: true
|
||||
use_docker_proxy: false
|
||||
use_unified_registry: false
|
||||
restart_services: false
|
||||
reconfigure_endpoints: false
|
||||
|
||||
- name: Set initial facts
|
||||
set_fact:
|
||||
system_params:
|
||||
'system_mode': "{{ system_mode }}"
|
||||
'timezone': "{{ timezone }}"
|
||||
root_disk_size: "{{ standard_root_disk_size }}"
|
||||
root_disk_idx: 0
|
||||
localhost_name_ip_mapping: "127.0.0.1\tlocalhost\tlocalhost.localdomain localhost4 localhost4.localdomain4"
|
||||
network_params:
|
||||
'pxeboot_subnet': "{{ pxeboot_subnet }}"
|
||||
'management_subnet': "{{ management_subnet }}"
|
||||
'cluster_host_subnet': "{{ cluster_host_subnet }}"
|
||||
'cluster_pod_subnet': "{{ cluster_pod_subnet }}"
|
||||
'cluster_service_subnet': "{{ cluster_service_subnet }}"
|
||||
'external_oam_subnet': "{{ external_oam_subnet }}"
|
||||
'external_oam_gateway_address': "{{ external_oam_gateway_address }}"
|
||||
'external_oam_floating_address': "{{ external_oam_floating_address }}"
|
||||
'management_multicast_subnet': "{{ management_multicast_subnet }}"
|
||||
# Set this placeholder here to workaround an Ansible quirk
|
||||
derived_network_params:
|
||||
place_holder: place_holder
|
||||
ansible_remote_tmp: "{{ ansible_remote_tmp | default(lookup('ini', 'remote_tmp section=defaults file={{ playbook_dir }}/ansible.cfg')) }}"
|
||||
pods_wait_time: "{{ pods_wait_time | default(30) }}"
|
||||
|
||||
- name: Turn on use_docker_proxy flag
|
||||
set_fact:
|
||||
use_docker_proxy: true
|
||||
when: (docker_http_proxy is defined and docker_http_proxy is not none) or
|
||||
(docker_https_proxy is defined and docker_https_proxy is not none)
|
||||
|
||||
- name: Set default values for platform registries
|
||||
set_fact:
|
||||
default_k8s_registry: k8s.gcr.io
|
||||
default_gcr_registry: gcr.io
|
||||
default_quay_registry: quay.io
|
||||
default_docker_registry: docker.io
|
||||
|
||||
- name: Set default values for docker proxies if not defined
|
||||
set_fact:
|
||||
docker_http_proxy: "{{ docker_http_proxy | default('undef') }}"
|
||||
docker_https_proxy: "{{ docker_https_proxy | default('undef') }}"
|
||||
docker_no_proxy: "{{ docker_no_proxy | default([]) }}"
|
||||
|
||||
- name: Retrieve software version number
|
||||
# lookup module does not work with /etc/build.info as it does not have ini
|
||||
# format. Resort to shell source.
|
||||
shell: source /etc/build.info; echo $SW_VERSION
|
||||
register: sw_version_result
|
||||
|
||||
- name: Fail if software version is not defined
|
||||
fail:
|
||||
msg: "SW_VERSION is missing in /etc/build.info"
|
||||
when: sw_version_result.stdout_lines|length == 0
|
||||
|
||||
- name: Retrieve system type
|
||||
shell: source /etc/platform/platform.conf; echo $system_type
|
||||
register: system_type_result
|
||||
|
||||
- name: Fail if system type is not defined
|
||||
fail:
|
||||
msg: "system_type is missing in /etc/platform/platform.conf"
|
||||
when: system_type_result.stdout_lines|length == 0
|
||||
|
||||
- name: Set software version, system type config path facts
|
||||
set_fact:
|
||||
software_version: "{{ sw_version_result.stdout_lines[0] }}"
|
||||
system_type: "{{ system_type_result.stdout_lines[0] }}"
|
||||
|
||||
- name: Set config path facts
|
||||
set_fact:
|
||||
keyring_permdir: "{{ platform_path + '/.keyring/' + software_version }}"
|
||||
config_permdir: "{{ platform_path + '/config/' + software_version }}"
|
||||
puppet_permdir: "{{ platform_path + '/puppet/' + software_version }}"
|
||||
|
||||
# Give the bootstrap config output file on the host a generic name so the
|
||||
# same file is referenced if the host is bootstrapped locally and remotely
|
||||
# in whatever order.
|
||||
- name: Set bootstrap output file
|
||||
set_fact:
|
||||
last_bootstrap_config_file: "{{ config_permdir }}/last_bootstrap_config.yml"
|
||||
|
||||
- name: Check Docker status
|
||||
command: systemctl status docker
|
||||
failed_when: false
|
||||
register: docker
|
||||
|
||||
- name: Look for openrc file
|
||||
stat:
|
||||
path: /etc/platform/openrc
|
||||
register: openrc_file
|
||||
|
||||
- name: Turn on replayed flag
|
||||
set_fact:
|
||||
replayed: true
|
||||
when: openrc_file.stat.exists and docker.rc == 0
|
||||
|
||||
- block:
|
||||
- name: Check if the controller-0 host has been successfully provisioned
|
||||
shell: source /etc/platform/openrc; system host-list|grep controller-0
|
||||
failed_when: false
|
||||
register: host_check
|
||||
|
||||
- block: # system has been configured
|
||||
- name: Set flag to indicate that this host has been previously configured
|
||||
set_fact:
|
||||
reconfigured: true
|
||||
|
||||
- name: Find previous config file for this host
|
||||
stat:
|
||||
path: "{{ last_bootstrap_config_file }}"
|
||||
register: last_config_file
|
||||
|
||||
- block:
|
||||
- name: Set last config file to import (local)
|
||||
set_fact:
|
||||
last_config: "{{ last_bootstrap_config_file }}"
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
# Currently Ansible include_vars only works with local file
|
||||
- block:
|
||||
# Give a host specific name in case the playbook is used to bootstrap
|
||||
# multiple remote hosts simultaneously
|
||||
- name: Set last config file to import (remote)
|
||||
set_fact:
|
||||
last_config: "/tmp/{{ (last_bootstrap_config_file | basename | splitext)[0] }}_{{ inventory_hostname }}.yml"
|
||||
|
||||
- name: Fetch previous config file from this host
|
||||
fetch:
|
||||
src: "{{ last_bootstrap_config_file }}"
|
||||
dest: "{{ last_config }}"
|
||||
flat: yes
|
||||
when: inventory_hostname != 'localhost'
|
||||
|
||||
- name: Read in last config values
|
||||
include_vars:
|
||||
file: "{{ last_config }}"
|
||||
|
||||
- name: Turn on system attributes reconfiguration flag
|
||||
set_fact:
|
||||
system_config_update: true
|
||||
when: (prev_system_mode != system_mode) or
|
||||
(prev_timezone != timezone) or
|
||||
(prev_dns_servers.split(',') | sort != dns_servers | sort)
|
||||
|
||||
- name: Turn on docker reconfiguration flag if docker config is changed
|
||||
set_fact:
|
||||
docker_config_update: true
|
||||
when: (prev_docker_registries != docker_registries) or
|
||||
((use_docker_proxy) and
|
||||
(prev_docker_http_proxy != docker_http_proxy or
|
||||
prev_docker_https_proxy != docker_https_proxy or
|
||||
prev_docker_no_proxy != docker_no_proxy))
|
||||
|
||||
- name: Turn on service endpoints reconfiguration flag if management and/or oam network config is changed
|
||||
set_fact:
|
||||
reconfigure_endpoints: true
|
||||
when: (prev_management_subnet != management_subnet) or
|
||||
(prev_management_start_address != management_start_address) or
|
||||
(prev_external_oam_subnet != external_oam_subnet) or
|
||||
(prev_external_oam_gateway_address != external_oam_gateway_address) or
|
||||
(prev_external_oam_floating_address != external_oam_floating_address) or
|
||||
(prev_external_oam_start_address != external_oam_start_address) or
|
||||
(prev_external_oam_end_address != external_oam_end_address) or
|
||||
(prev_external_oam_node_0_address != external_oam_node_0_address) or
|
||||
(prev_external_oam_node_1_address != external_oam_node_1_address)
|
||||
|
||||
- name: Turn on network reconfiguration flag if any of the network related config is changed
|
||||
set_fact:
|
||||
network_config_update: true
|
||||
when: reconfigure_endpoints or
|
||||
(prev_dynamic_address_allocation != dynamic_address_allocation) or
|
||||
(prev_management_end_address != management_end_address) or
|
||||
(prev_pxeboot_subnet != pxeboot_subnet) or
|
||||
(prev_pxeboot_start_address != pxeboot_start_address) or
|
||||
(prev_pxeboot_end_address != pxeboot_end_address) or
|
||||
(prev_management_multicast_subnet != management_multicast_subnet) or
|
||||
(prev_management_multicast_start_address != management_multicast_start_address) or
|
||||
(prev_management_multicast_end_address != management_multicast_end_address) or
|
||||
(prev_cluster_host_subnet != cluster_host_subnet) or
|
||||
(prev_cluster_host_start_address != cluster_host_start_address) or
|
||||
(prev_cluster_host_end_address != cluster_host_end_address) or
|
||||
(prev_cluster_pod_subnet != cluster_pod_subnet) or
|
||||
(prev_cluster_pod_start_address != cluster_pod_start_address) or
|
||||
(prev_cluster_pod_end_address != cluster_pod_end_address) or
|
||||
(prev_cluster_service_subnet != cluster_service_subnet) or
|
||||
(prev_cluster_service_start_address != cluster_service_start_address) or
|
||||
(prev_cluster_service_end_address != cluster_service_end_address)
|
||||
|
||||
- name: Turn on restart services flag if management/oam/cluster network or docker config is changed
|
||||
set_fact:
|
||||
restart_services: true
|
||||
pods_wait_time: "{{ pods_wait_time|int + 30 }}"
|
||||
when: reconfigure_endpoints or
|
||||
docker_config_update or
|
||||
(prev_cluster_host_subnet != cluster_host_subnet) or
|
||||
(prev_cluster_pod_subnet != cluster_pod_subnet) or
|
||||
(prev_cluster_service_subnet != cluster_service_subnet)
|
||||
|
||||
# Re-evaluate the condition to generate the python keyring
|
||||
- name: Turn off save_password flag if admin password has not changed
|
||||
set_fact:
|
||||
save_password: false
|
||||
username: "{{ prev_admin_username }}"
|
||||
password: "{{ prev_admin_password }}"
|
||||
# TODO(tngo): there seems to be a puppet/sysinv limitation that prevents password
|
||||
# reconfiguration to work without an extra boot. Temporarily disable
|
||||
# it for replay for now.
|
||||
when: prev_admin_password == admin_password|hash('sha1')
|
||||
or replayed
|
||||
|
||||
# Re-evaluate condition to persist config data to sysinv database
|
||||
- name: Turn off save_config flag if system, network, and docker configurations have not changed
|
||||
set_fact:
|
||||
save_config: false
|
||||
when: not system_config_update and
|
||||
not network_config_update and
|
||||
not docker_config_update
|
||||
|
||||
- block:
|
||||
- debug:
|
||||
msg: "Configurations are unchanged. There's nothing to play!"
|
||||
|
||||
- name: Stop playing if this is the only target host
|
||||
meta: end_play
|
||||
when: play_hosts|length == 1
|
||||
|
||||
- name: Turn on skip_play flag
|
||||
set_fact:
|
||||
skip_play: true
|
||||
when: not save_password and not save_config
|
||||
|
||||
when: last_config_file.stat.exists
|
||||
when: host_check.rc == 0
|
||||
when: replayed # bootstrap manifest has been applied
|
||||
|
||||
- name: Check volume groups
|
||||
command: vgdisplay cgts-vg
|
||||
register: vg_result
|
||||
failed_when: false
|
||||
|
||||
- name: Fail if volume groups are not configured
|
||||
fail: msg='Volume groups not configured.'
|
||||
when: vg_result.rc != 0
|
||||
|
||||
- name: Check size of root disk
|
||||
script: check_root_disk_size.py {{ standard_root_disk_size }}
|
||||
register: disk_size_check_result
|
||||
failed_when: false
|
||||
|
||||
# Workaround an Ansible quirk
|
||||
- name: Update root disk index for remote play
|
||||
set_fact:
|
||||
root_disk_idx: "{{ root_disk_idx + 1 }}"
|
||||
when: ansible_connection != "local"
|
||||
|
||||
- name: Set root disk and root disk size facts
|
||||
set_fact:
|
||||
root_disk: "{{ disk_size_check_result.stdout_lines[root_disk_idx|int] }}"
|
||||
root_disk_size: "{{ disk_size_check_result.stdout_lines[root_disk_idx|int + 1] }}"
|
||||
|
||||
- debug:
|
||||
msg: >-
|
||||
[WARNING]: Root disk {{ root_disk }} size is {{ root_disk_size }}GB which is
|
||||
less than the standard size of {{ standard_root_disk_size }}GB. Please consult
|
||||
the Software Installation Guide for details.
|
||||
when: disk_size_check_result.rc != 0
|
||||
|
||||
- name: Look for branding tar file
|
||||
find:
|
||||
paths: /opt/branding
|
||||
patterns: '*.tgz'
|
||||
register: find_tar_result
|
||||
|
||||
- name: Fail if there are more than one branding tar files
|
||||
fail:
|
||||
msg: >-
|
||||
Only one branding tarball is permitted in /opt/branding. Refer to
|
||||
the branding section of the documentation.
|
||||
when: find_tar_result.matched > 1
|
||||
|
||||
- name: Look for other branding files
|
||||
find:
|
||||
paths: /opt/branding
|
||||
excludes: '*.tgz,applied'
|
||||
register: find_result
|
||||
|
||||
- name: Fail if the branding filename is not valid
|
||||
fail:
|
||||
msg: >
|
||||
{{ find_result.files[0].path }} is not a valid branding
|
||||
filename. Refer to the branding section of the documentation.
|
||||
when: find_result.matched > 0
|
||||
|
||||
- name: Mark environment as Ansible bootstrap
|
||||
file:
|
||||
path: /var/run/.ansible_bootstrap
|
||||
state: touch
|
||||
|
||||
# Set up the remote tmp dir beforehand to get rid of the annoying warning
|
||||
# when pipelining is turned on for better performance.
|
||||
- name: Set up Ansible remote tmp dir
|
||||
file:
|
||||
path: "{{ ansible_remote_tmp }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- debug:
|
||||
msg: >-
|
||||
system_config_update flag: {{ system_config_update }},
|
||||
network_config_update flag: {{ network_config_update }},
|
||||
docker_config_update flag: {{ docker_config_update }},
|
||||
restart_services flag: {{ restart_services }},
|
||||
endpoints_reconfiguration_flag: {{ reconfigure_endpoints }},
|
||||
save_password flag: {{ save_password }},
|
||||
save_config flag: {{ save_config }},
|
||||
skip_play flag: {{ skip_play }}
|
||||
|
||||
when: not skip_play
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
image_brand: StarlingX
|
||||
platform_path: /opt/platform
|
||||
puppet_path: /opt/platform/puppet
|
||||
standard_root_disk_size: 500
|
@ -1,99 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role is to validate and store admin credentials using python keyring.
|
||||
#
|
||||
|
||||
# Setting admin username and password
|
||||
- block:
|
||||
- debug:
|
||||
msg: "Use encrypted admin username and password."
|
||||
- set_fact:
|
||||
username: "{{ vault_admin_username }}"
|
||||
password: "{{ vault_admin_password }}"
|
||||
use_vault_credentials: true
|
||||
when: (vault_admin_password is defined) and (vault_admin_username is defined)
|
||||
|
||||
- block:
|
||||
- name: Print warning if admin credentials are not stored in vault
|
||||
debug:
|
||||
msg: >-
|
||||
[WARNING: Default admin username and password (unencrypted) are
|
||||
used. Consider storing both of these variables in Ansible vault.]
|
||||
- name: Set admin username and password facts
|
||||
set_fact:
|
||||
username: "{{ admin_username }}"
|
||||
password: "{{ admin_password }}"
|
||||
when: not use_vault_credentials
|
||||
|
||||
# Validating password per configured rules
|
||||
- name: Look for password rules file
|
||||
stat:
|
||||
path: "{{ password_rules_file }}"
|
||||
register: password_rules
|
||||
|
||||
- name: Fail if password rules file is missing
|
||||
fail: msg="Password rules file {{ password_rules_file }} is missing."
|
||||
when: not password_rules.stat.exists
|
||||
|
||||
- name: Get password rules
|
||||
shell: grep -w password_regex {{ password_rules_file }} | awk '{print $3}'
|
||||
register: pattern_result
|
||||
|
||||
- name: Get password rules description
|
||||
shell: >
|
||||
grep -w password_regex_description {{ password_rules_file }} |
|
||||
cut -d'=' -f2
|
||||
register: description_result
|
||||
|
||||
- name: Set password regex facts
|
||||
set_fact:
|
||||
password_regex: "{{ pattern_result.stdout }}"
|
||||
password_regex_desc: "{{ 'ADMIN_PASSWORD: ' + description_result.stdout }}"
|
||||
|
||||
- name: Fail if password regex cannot be found
|
||||
fail: msg="Required option password_regex not found in {{ password_rules_file }}."
|
||||
when: pattern_result.stdout == ""
|
||||
|
||||
- name: Set password regex description fact
|
||||
set_fact:
|
||||
password_regex_desc: "ADMIN_PASSWORD: Password does not meet complexity criteria."
|
||||
when: description_result.stdout == ""
|
||||
|
||||
- name: Validate admin password
|
||||
# Have to use a small python script, Ansible regex_search filter does not accept the
|
||||
# keystone regex pattern.
|
||||
vars:
|
||||
script_content: |
|
||||
import re
|
||||
prx = "{{ password_regex }}"
|
||||
prx = prx.strip('"')
|
||||
if not re.match(prx, "{{ password }}"):
|
||||
raise Exception()
|
||||
shell: "{{ script_content }}"
|
||||
args:
|
||||
executable: /usr/bin/python
|
||||
failed_when: false
|
||||
register: password_validation_result
|
||||
|
||||
- name: Fail if provided admin password does not meet required complexity
|
||||
fail:
|
||||
msg: "{{ password_regex_desc }}"
|
||||
when: password_validation_result.rc != 0
|
||||
|
||||
- name: Store admin password
|
||||
vars:
|
||||
script_content: |
|
||||
import keyring
|
||||
import os
|
||||
os.environ['XDG_DATA_HOME'] = '/tmp'
|
||||
keyring.set_password("CGCS", "{{ username }}", "{{ password }}")
|
||||
del os.environ['XDG_DATA_HOME']
|
||||
shell: "{{ script_content }}"
|
||||
args:
|
||||
executable: /usr/bin/python
|
||||
no_log: true
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
use_vault_credentials: false
|
||||
password_rules_file: /etc/keystone/password-rules.conf
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
allow_duplicates: false
|
@ -1,459 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role is to validate amd save host (non secure) config.
|
||||
#
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- System mode is {{ system_mode }}
|
||||
- Timezone is {{ timezone }}
|
||||
- DNS servers is {{ dns_servers }}
|
||||
- PXE boot subnet is {{ pxeboot_subnet }}
|
||||
- Management subnet is {{ management_subnet }}
|
||||
- Cluster host subnet is {{ cluster_host_subnet }}
|
||||
- Cluster pod subnet is {{ cluster_pod_subnet }}
|
||||
- Cluster service subnet is {{ cluster_service_subnet }}
|
||||
- OAM subnet is {{ external_oam_subnet }}
|
||||
- OAM gateway is {{ external_oam_gateway_address }}
|
||||
- OAM floating ip is {{ external_oam_floating_address }}
|
||||
- Dynamic address allocation is {{ dynamic_address_allocation }}
|
||||
- Docker registries is {{ docker_registries }}
|
||||
- Docker HTTP proxy is {{ docker_http_proxy }}
|
||||
- Docker HTTPS proxy is {{ docker_https_proxy }}
|
||||
- Docker no proxy list is {{ docker_no_proxy }}
|
||||
|
||||
# System parameters config validation
|
||||
- block:
|
||||
- name: Set system mode fact
|
||||
set_fact:
|
||||
system_mode: "{{ system_mode|lower }}"
|
||||
|
||||
- block:
|
||||
- debug:
|
||||
msg: "System type is Standard, system mode will be set to duplex."
|
||||
- name: Set system mode to duplex for Standard system
|
||||
set_fact:
|
||||
system_mode: duplex
|
||||
when: system_type == 'Standard'
|
||||
|
||||
- name: Validate system mode if system type is All-in-one
|
||||
fail:
|
||||
msg: "Invalid system mode. Valid values are: simplex, duplex or duplex-direct."
|
||||
when: >
|
||||
(system_mode != 'simplex' and
|
||||
system_mode != 'duplex' and
|
||||
system_mode != 'duplex-direct') and
|
||||
(system_type == 'All-in-one')
|
||||
|
||||
- name: Checking registered timezones
|
||||
stat:
|
||||
path: "{{ '/usr/share/zoneinfo/' + timezone }}"
|
||||
register: timezone_file
|
||||
|
||||
- name: Fail if provided timezone is unknown
|
||||
fail: msg="The provided timezone {{ timezone }} is invalid."
|
||||
when: not timezone_file.stat.exists
|
||||
|
||||
- name: Fail if the number of dns servers provided is not at least 1 and no more than 3
|
||||
fail:
|
||||
msg: "The number of DNS servers exceeds maximum allowable number of 3."
|
||||
when: (dns_servers | length == 0) or (dns_servers | length > 3)
|
||||
|
||||
|
||||
# DNS servers config validation
|
||||
- name: Check format of DNS server IP(s)
|
||||
debug:
|
||||
msg: "DNS Server: {{ item }}"
|
||||
failed_when: item | ipaddr == False
|
||||
with_items: "{{ dns_servers }}"
|
||||
|
||||
|
||||
# Networks config validation
|
||||
- block:
|
||||
- name: Validate provided subnets (both IPv4 & IPv6 notations)
|
||||
debug:
|
||||
msg: "{{ item.key }}: {{ item.value }}"
|
||||
failed_when: item.value|ipaddr == False
|
||||
with_dict: "{{ network_params }}"
|
||||
|
||||
- set_fact:
|
||||
ipv4_addressing: "{{ network_params.management_subnet|ipv4 }}"
|
||||
ipv6_addressing: "{{ network_params.management_subnet|ipv6 }}"
|
||||
|
||||
- name: Validate all network subnets are IPv4
|
||||
debug:
|
||||
msg: "All infrastructure and cluster subnets must be the same IP version"
|
||||
failed_when: item|ipv4 == False
|
||||
with_items:
|
||||
- "{{ network_params.management_subnet }}"
|
||||
- "{{ network_params.cluster_host_subnet }}"
|
||||
- "{{ network_params.cluster_pod_subnet }}"
|
||||
- "{{ network_params.cluster_service_subnet }}"
|
||||
- "{{ network_params.external_oam_subnet }}"
|
||||
- "{{ network_params.management_multicast_subnet }}"
|
||||
when: ipv4_addressing != False
|
||||
|
||||
- name: Validate all network subnets are IPv6
|
||||
debug:
|
||||
msg: "All infrastructure and cluster subnets must be the same IP version"
|
||||
failed_when: item|ipv6 == False
|
||||
with_items:
|
||||
- "{{ network_params.management_subnet }}"
|
||||
- "{{ network_params.cluster_host_subnet }}"
|
||||
- "{{ network_params.cluster_pod_subnet }}"
|
||||
- "{{ network_params.cluster_service_subnet }}"
|
||||
- "{{ network_params.external_oam_subnet }}"
|
||||
- "{{ network_params.management_multicast_subnet }}"
|
||||
when: ipv6_addressing != False
|
||||
|
||||
- name: Validate pxeboot subnet is IPv4
|
||||
debug:
|
||||
msg: "pxeboot_subnet subnet must always be IPv4"
|
||||
failed_when: network_params.pxeboot_subnet|ipv4 == False
|
||||
|
||||
- name: Fail if cluster pod/service subnet size is too small (minimum size = 65536)
|
||||
fail:
|
||||
msg: "Subnet size is too small, must have minimum {{ min_pod_service_num_addresses }} addresses."
|
||||
when: item|ipaddr('size') < min_pod_service_num_addresses
|
||||
with_items:
|
||||
- "{{ network_params.cluster_pod_subnet }}"
|
||||
- "{{ network_params.cluster_service_subnet }}"
|
||||
|
||||
- name: Fail if pxeboot/management/multicast subnet size is too small (minimum size = 16)
|
||||
fail:
|
||||
msg: "Subnet size is too small, must have minimum {{ min_16_addresses }} addresses."
|
||||
when: item|ipaddr('size') < min_16_addresses
|
||||
with_items:
|
||||
- "{{ network_params.pxeboot_subnet }}"
|
||||
- "{{ network_params.management_subnet }}"
|
||||
- "{{ network_params.management_multicast_subnet }}"
|
||||
|
||||
- name: Fail if the size of the remaining subnets is too small (minimum size = 8)
|
||||
fail:
|
||||
msg: "Subnet size is too small, must have minimum {{ min_8_addresses }} addresses."
|
||||
when: item|ipaddr('size') < min_8_addresses
|
||||
with_items:
|
||||
- "{{ network_params.cluster_host_subnet }}"
|
||||
- "{{ network_params.external_oam_subnet }}"
|
||||
|
||||
- name: Generate warning if subnet prefix is not typical for Standard systems
|
||||
debug:
|
||||
msg: "WARNING: Subnet prefix of less than /24 is not typical. This will affect scaling of the system!"
|
||||
when: item|ipaddr('prefix')|int < typical_subnet_prefix and system_type == 'Standard'
|
||||
with_items:
|
||||
- "{{ network_params.pxeboot_subnet }}"
|
||||
- "{{ network_params.management_subnet }}"
|
||||
- "{{ network_params.cluster_host_subnet }}"
|
||||
- "{{ network_params.external_oam_subnet }}"
|
||||
- "{{ network_params.management_multicast_subnet }}"
|
||||
|
||||
- block:
|
||||
- name: Fail if IPv6 prefix length is too short
|
||||
fail:
|
||||
msg: "IPv6 minimum prefix length is {{ minimum_prefix_length }}"
|
||||
when: network_params.management_subnet|ipaddr('prefix')|int < minimum_ipv6_prefix_length
|
||||
|
||||
when: ipv6_addressing != False
|
||||
|
||||
- name: Fail if address allocation is misconfigured
|
||||
fail:
|
||||
msg: "dynamic_address_allocation is misconfigured. Valid value is either 'True' or 'False'."
|
||||
when: not dynamic_address_allocation | type_debug == 'bool'
|
||||
|
||||
# The provided subnets have passed validation, set the default addresses
|
||||
# based on the subnet values
|
||||
- name: Set default start and end addresses based on provided subnets
|
||||
set_fact:
|
||||
default_pxeboot_start_address: "{{ (pxeboot_subnet | ipaddr(2)).split('/')[0] }}"
|
||||
default_pxeboot_end_address: "{{ (pxeboot_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_management_start_address: "{{ (management_subnet | ipaddr(2)).split('/')[0] }}"
|
||||
default_management_end_address: "{{ (management_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_cluster_host_start_address: "{{ (cluster_host_subnet | ipaddr(2)).split('/')[0] }}"
|
||||
default_cluster_host_end_address: "{{ (cluster_host_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_cluster_pod_start_address: "{{ (cluster_pod_subnet | ipaddr(1)).split('/')[0] }}"
|
||||
default_cluster_pod_end_address: "{{ (cluster_pod_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_cluster_service_start_address: "{{ (cluster_service_subnet | ipaddr(1)).split('/')[0] }}"
|
||||
default_cluster_service_end_address: "{{ (cluster_service_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_external_oam_start_address: "{{ (external_oam_subnet | ipaddr(1)).split('/')[0] }}"
|
||||
default_external_oam_end_address: "{{ (external_oam_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_management_multicast_start_address: "{{ (management_multicast_subnet | ipaddr(1)).split('/')[0] }}"
|
||||
default_management_multicast_end_address: "{{ (management_multicast_subnet | ipaddr(-2)).split('/')[0] }}"
|
||||
default_external_oam_node_0_address: "{{ external_oam_floating_address | ipmath(1) }}"
|
||||
default_external_oam_node_1_address: "{{ external_oam_floating_address | ipmath(2) }}"
|
||||
|
||||
- name: Build address pairs for validation, merging default and user provided values
|
||||
set_fact:
|
||||
address_pairs:
|
||||
pxeboot:
|
||||
start: "{{ pxeboot_start_address if pxeboot_start_address != 'derived' else default_pxeboot_start_address }}"
|
||||
end: "{{ pxeboot_end_address if pxeboot_end_address != 'derived' else default_pxeboot_end_address }}"
|
||||
subnet: "{{ network_params.pxeboot_subnet }}"
|
||||
use_default: "{{ true if pxeboot_start_address == 'derived' and pxeboot_end_address == 'derived' else false }}"
|
||||
management:
|
||||
start: "{{ management_start_address if management_start_address != 'derived' else default_management_start_address }}"
|
||||
end: "{{ management_end_address if management_end_address != 'derived' else default_management_end_address }}"
|
||||
subnet: "{{ network_params.management_subnet }}"
|
||||
use_default: "{{ true if management_start_address == 'derived' and management_end_address == 'derived' else false }}"
|
||||
cluster_host:
|
||||
start: "{{ cluster_host_start_address if cluster_host_start_address != 'derived' else default_cluster_host_start_address }}"
|
||||
end: "{{ cluster_host_end_address if cluster_host_end_address != 'derived' else default_cluster_host_end_address}}"
|
||||
subnet: "{{ network_params.cluster_host_subnet }}"
|
||||
use_default: "{{ true if cluster_host_start_address == 'derived' and cluster_host_end_address == 'derived' else false }}"
|
||||
cluster_pod:
|
||||
start: "{{ cluster_pod_start_address if cluster_pod_start_address != 'derived' else default_cluster_pod_start_address }}"
|
||||
end: "{{ cluster_pod_end_address if cluster_pod_end_address != 'derived' else default_cluster_pod_end_address }}"
|
||||
subnet: "{{ network_params.cluster_pod_subnet }}"
|
||||
use_default: "{{ true if cluster_pod_start_address == 'derived' and cluster_pod_end_address == 'derived' else false }}"
|
||||
cluster_service:
|
||||
start: "{{ cluster_service_start_address if cluster_service_start_address != 'derived' else default_cluster_service_start_address }}"
|
||||
end: "{{ cluster_service_end_address if cluster_service_end_address != 'derived' else default_cluster_service_end_address }}"
|
||||
subnet: "{{ network_params.cluster_service_subnet }}"
|
||||
use_default: "{{ true if cluster_service_start_address == 'derived' and cluster_service_end_address == 'derived' else false }}"
|
||||
oam:
|
||||
start: "{{ external_oam_start_address if external_oam_start_address != 'derived' else default_external_oam_start_address }}"
|
||||
end: "{{ external_oam_end_address if external_oam_end_address != 'derived' else default_external_oam_end_address }}"
|
||||
subnet: "{{ network_params.external_oam_subnet }}"
|
||||
use_default: "{{ true if external_oam_start_address == 'derived' and external_oam_end_address == 'derived' else false }}"
|
||||
multicast:
|
||||
start: "{{ management_multicast_start_address if management_multicast_start_address != 'derived' else default_management_multicast_start_address }}"
|
||||
end: "{{ management_multicast_end_address if management_multicast_end_address != 'derived' else default_management_multicast_end_address }}"
|
||||
subnet: "{{ network_params.management_multicast_subnet }}"
|
||||
use_default: "{{ true if management_multicast_start_address == 'derived' and management_multicast_end_address == 'derived' else false }}"
|
||||
oam_node:
|
||||
start: "{{ external_oam_node_0_address if external_oam_node_0_address != 'derived' else default_external_oam_node_0_address }}"
|
||||
end: "{{ external_oam_node_1_address if external_oam_node_1_address != 'derived' else default_external_oam_node_1_address }}"
|
||||
subnet: "{{ network_params.external_oam_subnet }}"
|
||||
use_default: "{{ true if external_oam_node_0_address == 'derived' and external_oam_node_1_address == 'derived' else false }}"
|
||||
|
||||
- include: validate_address_range.yml
|
||||
with_dict: "{{ address_pairs }}"
|
||||
|
||||
- name: Set floating addresses based on subnets or start addresses
|
||||
set_fact:
|
||||
# Not sure why ipaddr('address') and ipsubnet filter did not extract the IP from CIDR input. Resort to string split for now.
|
||||
controller_floating_address: "{{ (management_subnet | ipaddr(2)).split('/')[0] if management_start_address == 'derived' else management_start_address }}"
|
||||
controller_pxeboot_floating_address: "{{ (pxeboot_subnet | ipaddr(2)).split('/')[0] if pxeboot_start_address == 'derived' else pxeboot_start_address }}"
|
||||
cluster_floating_address: "{{ (cluster_host_subnet | ipaddr(2)).split('/')[0] if cluster_host_start_address == 'derived' else cluster_host_start_address }}"
|
||||
|
||||
- name: Set derived facts for subsequent tasks/roles
|
||||
set_fact:
|
||||
derived_network_params:
|
||||
'management_interface': lo
|
||||
'management_interface_name': lo
|
||||
'controller_0_address': "{{ controller_floating_address|ipmath(1) }}"
|
||||
'controller_1_address': "{{ controller_floating_address|ipmath(2) }}"
|
||||
'nfs_management_address_1': "{{ controller_floating_address|ipmath(3) }}"
|
||||
'nfs_management_address_2': "{{ controller_floating_address|ipmath(4) }}"
|
||||
'controller_pxeboot_address_0': "{{ controller_pxeboot_floating_address|ipmath(1) }}"
|
||||
'controller_pxeboot_address_1': "{{ controller_pxeboot_floating_address|ipmath(2) }}"
|
||||
|
||||
# Make common facts available to other roles
|
||||
config_workdir: "{{ config_workdir }}"
|
||||
dns_servers: "{{ dns_servers }}"
|
||||
|
||||
# Derived network parameters that don't apply to bootstrap_config but are required for
|
||||
# subsequent roles
|
||||
management_subnet_prefix: "{{ management_subnet | ipaddr('prefix') }}"
|
||||
management_broadcast: "{{ management_subnet | ipaddr('broadcast') }}"
|
||||
pxe_subnet_prefix: "{{ pxeboot_subnet | ipaddr('prefix') }}"
|
||||
cluster_subnet_prefix: "{{ cluster_host_subnet | ipaddr('prefix') }}"
|
||||
cluster_broadcast: "{{ cluster_host_subnet | ipaddr('broadcast') }}"
|
||||
controller_0_cluster_host: "{{ cluster_floating_address|ipmath(1) }}"
|
||||
controller_1_cluster_host: "{{ cluster_floating_address|ipmath(2) }}"
|
||||
|
||||
- name: Set facts for IP address provisioning against loopback interface
|
||||
set_fact:
|
||||
mgmt_virtual: "{{ derived_network_params.controller_0_address }}/{{ management_subnet_prefix }}"
|
||||
cluster_virtual: "{{ controller_0_cluster_host }}/{{ cluster_subnet_prefix }}"
|
||||
pxe_virtual: "{{ controller_pxeboot_floating_address }}/{{ pxe_subnet_prefix }}"
|
||||
cluster_floating_virtual: "{{ cluster_floating_address }}/{{ cluster_subnet_prefix }}"
|
||||
mgmt_floating_virtual: "{{ controller_floating_address }}/{{ management_subnet_prefix }}"
|
||||
mgmt_nfs_1_virtual: "{{ derived_network_params.nfs_management_address_1 }}/{{ management_subnet_prefix }}"
|
||||
mgmt_nfs_2_virtual: "{{ derived_network_params.nfs_management_address_2 }}/{{ management_subnet_prefix }}"
|
||||
|
||||
# Docker config validation
|
||||
- block:
|
||||
- set_fact:
|
||||
use_default_registries: true
|
||||
k8s_registry: "{{ docker_registries[default_k8s_registry] if docker_registries[default_k8s_registry] is not none else default_k8s_registry }}"
|
||||
gcr_registry: "{{ docker_registries[default_gcr_registry] if docker_registries[default_gcr_registry] is not none else default_gcr_registry }}"
|
||||
quay_registry: "{{ docker_registries[default_quay_registry] if docker_registries[default_quay_registry] is not none else default_quay_registry }}"
|
||||
docker_registry: "{{ docker_registries[default_docker_registry] if docker_registries[default_docker_registry] is not none else default_docker_registry }}"
|
||||
default_no_proxy:
|
||||
- localhost
|
||||
- 127.0.0.1
|
||||
- registry.local
|
||||
- "{{ controller_floating_address }}"
|
||||
- "{{ derived_network_params.controller_0_address }}"
|
||||
- "{{ external_oam_floating_address }}"
|
||||
- "{{ address_pairs['oam_node']['start'] }}"
|
||||
non_sx_proxy_addons:
|
||||
- "{{ derived_network_params.controller_1_address }}"
|
||||
- "{{ address_pairs['oam_node']['end'] }}"
|
||||
docker_no_proxy_combined: []
|
||||
|
||||
- block:
|
||||
- name: Set default no-proxy address list (non simplex)
|
||||
set_fact:
|
||||
default_no_proxy: "{{ default_no_proxy + non_sx_proxy_addons }}"
|
||||
when: system_mode != 'simplex'
|
||||
|
||||
- block:
|
||||
- name: Validate http proxy urls
|
||||
include: validate_url.yml input_url={{ item }}
|
||||
with_items:
|
||||
- "{{ docker_http_proxy }}"
|
||||
- "{{ docker_https_proxy }}"
|
||||
|
||||
- block:
|
||||
- name: Validate no proxy addresses
|
||||
include: validate_address.yml input_address={{ item }}
|
||||
with_items: "{{ docker_no_proxy }}"
|
||||
when: docker_no_proxy|length > 0
|
||||
|
||||
- name: Add user defined no-proxy address list to default
|
||||
set_fact:
|
||||
docker_no_proxy_combined: "{{ default_no_proxy | union(docker_no_proxy) | unique }}"
|
||||
|
||||
when: use_docker_proxy
|
||||
|
||||
- block:
|
||||
- name: Fail if secure registry flag is misconfigured
|
||||
fail:
|
||||
msg: "is_secure_registry is misconfigured. Valid value is either 'True' or 'False'."
|
||||
when: (is_secure_registry is defined) and
|
||||
(not is_secure_registry |type_debug == 'bool')
|
||||
|
||||
- name: Default the unified registry to secure if not specified
|
||||
set_fact:
|
||||
is_secure_registry: True
|
||||
when: is_secure_registry is not defined
|
||||
|
||||
- name: Turn on use_unified_registry flag
|
||||
set_fact:
|
||||
use_unified_registry: true
|
||||
k8s_registry: "{{ docker_registries['unified'] }}"
|
||||
gcr_registry: "{{ docker_registries['unified'] }}"
|
||||
quay_registry: "{{ docker_registries['unified'] }}"
|
||||
docker_registry: "{{ docker_registries['unified'] }}"
|
||||
|
||||
when: docker_registries['unified'] is defined and docker_registries['unified'] is not none
|
||||
|
||||
- name: Update use_default_registries flag
|
||||
set_fact:
|
||||
use_default_registries: false
|
||||
when: use_unified_registry or
|
||||
docker_registries|length != 4 or
|
||||
k8s_registry != default_k8s_registry or
|
||||
gcr_registry != default_gcr_registry or
|
||||
quay_registry != default_quay_registry or
|
||||
docker_registry != default_docker_registry
|
||||
|
||||
- block:
|
||||
- include: validate_address.yml input_address={{ item.value }}
|
||||
with_dict: "{{ docker_registries }}"
|
||||
when: not use_default_registries
|
||||
|
||||
|
||||
# Docker images archive source validation
|
||||
- block:
|
||||
- set_fact:
|
||||
images_archive_exists: false
|
||||
|
||||
- block:
|
||||
- name: Check if images archive location exists
|
||||
stat:
|
||||
path: "{{ docker_images_archive_source }}"
|
||||
register: archive_source
|
||||
|
||||
- block:
|
||||
- name: Get list of archived files
|
||||
find:
|
||||
paths: "{{ docker_images_archive_source }}"
|
||||
patterns: "*.tar"
|
||||
register: archive_find_output
|
||||
|
||||
- name: Turn on images archive flag
|
||||
set_fact:
|
||||
images_archive_exists: true
|
||||
when: archive_find_output.matched > 0
|
||||
|
||||
when: archive_source.stat.exists
|
||||
delegate_to: localhost
|
||||
when: (docker_images_archive_source is defined) and
|
||||
(docker_images_archive_source is not none)
|
||||
|
||||
|
||||
# bootstrap_config ini file generation
|
||||
- block:
|
||||
- name: Create config workdir
|
||||
file:
|
||||
path: "{{ config_workdir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Generate config ini file for python sysinv db population script
|
||||
lineinfile:
|
||||
path: "{{ bootstrap_config_file }}"
|
||||
line: "{{ item }}"
|
||||
create: yes
|
||||
with_items:
|
||||
- "[BOOTSTRAP_CONFIG]"
|
||||
- "CONTROLLER_HOSTNAME=controller-0"
|
||||
- "SYSTEM_TYPE={{ system_type }}"
|
||||
- "SYSTEM_MODE={{ system_mode }}"
|
||||
- "TIMEZONE={{ timezone }}"
|
||||
- "SW_VERSION={{ software_version }}"
|
||||
- "NAMESERVERS={{ dns_servers| join(',') }}"
|
||||
- "PXEBOOT_SUBNET={{ pxeboot_subnet }}"
|
||||
- "PXEBOOT_START_ADDRESS={{ address_pairs['pxeboot']['start'] }}"
|
||||
- "PXEBOOT_END_ADDRESS={{ address_pairs['pxeboot']['end'] }}"
|
||||
- "MANAGEMENT_SUBNET={{ management_subnet }}"
|
||||
- "MANAGEMENT_START_ADDRESS={{ address_pairs['management']['start'] }}"
|
||||
- "MANAGEMENT_END_ADDRESS={{ address_pairs['management']['end'] }}"
|
||||
- "DYNAMIC_ADDRESS_ALLOCATION={{ dynamic_address_allocation }}"
|
||||
- "MANAGEMENT_INTERFACE=lo"
|
||||
- "CONTROLLER_0_ADDRESS={{ derived_network_params.controller_0_address }}"
|
||||
- "CLUSTER_HOST_SUBNET={{ cluster_host_subnet }}"
|
||||
- "CLUSTER_HOST_START_ADDRESS={{ address_pairs['cluster_host']['start'] }}"
|
||||
- "CLUSTER_HOST_END_ADDRESS={{ address_pairs['cluster_host']['end'] }}"
|
||||
- "CLUSTER_POD_SUBNET={{ cluster_pod_subnet }}"
|
||||
- "CLUSTER_POD_START_ADDRESS={{ address_pairs['cluster_pod']['start'] }}"
|
||||
- "CLUSTER_POD_END_ADDRESS={{ address_pairs['cluster_pod']['end'] }}"
|
||||
- "CLUSTER_SERVICE_SUBNET={{ cluster_service_subnet }}"
|
||||
- "CLUSTER_SERVICE_START_ADDRESS={{ address_pairs['cluster_service']['start'] }}"
|
||||
- "CLUSTER_SERVICE_END_ADDRESS={{ address_pairs['cluster_service']['start'] }}"
|
||||
- "EXTERNAL_OAM_SUBNET={{ external_oam_subnet }}"
|
||||
- "EXTERNAL_OAM_START_ADDRESS={{ address_pairs['oam']['start'] }}"
|
||||
- "EXTERNAL_OAM_END_ADDRESS={{ address_pairs['oam']['end'] }}"
|
||||
- "EXTERNAL_OAM_GATEWAY_ADDRESS={{ external_oam_gateway_address }}"
|
||||
- "EXTERNAL_OAM_FLOATING_ADDRESS={{ external_oam_floating_address }}"
|
||||
- "EXTERNAL_OAM_0_ADDRESS={{ address_pairs['oam_node']['start'] }}"
|
||||
- "EXTERNAL_OAM_1_ADDRESS={{ address_pairs['oam_node']['end'] }}"
|
||||
- "MANAGEMENT_MULTICAST_SUBNET={{ management_multicast_subnet }}"
|
||||
- "MANAGEMENT_MULTICAST_START_ADDRESS={{ address_pairs['multicast']['start'] }}"
|
||||
- "MANAGEMENT_MULTICAST_END_ADDRESS={{ address_pairs['multicast']['end'] }}"
|
||||
- "DOCKER_HTTP_PROXY={{ docker_http_proxy }}"
|
||||
- "DOCKER_HTTPS_PROXY={{ docker_https_proxy }}"
|
||||
- "DOCKER_NO_PROXY={{ docker_no_proxy_combined | join(',') }}"
|
||||
- "K8S_REGISTRY={{ k8s_registry }}"
|
||||
- "GCR_REGISTRY={{ gcr_registry }}"
|
||||
- "QUAY_REGISTRY={{ quay_registry }}"
|
||||
- "DOCKER_REGISTRY={{ docker_registry }}"
|
||||
- "USE_DEFAULT_REGISTRIES={{ use_default_registries }}"
|
||||
- "IS_SECURE_REGISTRY={{ is_secure_registry | default(True) }}"
|
||||
- "RECONFIGURE_ENDPOINTS={{ reconfigure_endpoints }}"
|
||||
|
||||
- name: Write simplex flag
|
||||
file:
|
||||
path: /etc/platform/simplex
|
||||
state: touch
|
||||
|
||||
when: save_config
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Validate the format of docker registry/no-proxy address
|
||||
#
|
||||
|
||||
- name: Check if the supplied address is a valid domain name or ipv4 address
|
||||
vars:
|
||||
script_content: |
|
||||
# Use this utility to be consistent with the current config_controller
|
||||
# though the underlying regex used is not flexible.
|
||||
from controllerconfig.utils import is_valid_domain_or_ip
|
||||
if not is_valid_domain_or_ip( "{{ input_address }}" ):
|
||||
raise Exception("Invalid domain name!")
|
||||
shell: "{{ script_content }}"
|
||||
args:
|
||||
executable: /usr/bin/python
|
||||
failed_when: false
|
||||
register: domain_name_ipv4_check
|
||||
|
||||
# The domain name check above should cover the domain name as well as
|
||||
# IPv4 addressing with/without port. If it fails, check if it's ipv6 format
|
||||
- block:
|
||||
- name: Check if the supplied address is of ipv6 with port format
|
||||
set_fact:
|
||||
ipv6_with_port: true
|
||||
when: input_address is search("\[") and input_address is search("\]")
|
||||
|
||||
- name: Fail if the supplied address is not a valid ipv6
|
||||
fail:
|
||||
msg: "{{ input_address }} is an invalid address!."
|
||||
when: (not ipv6_with_port) and (input_address|ipv6 == false)
|
||||
|
||||
- name: Fail if the supplied address is not a valid ipv6 with port
|
||||
fail:
|
||||
msg: "{{ input_address }} is an invalid address!."
|
||||
when: (ipv6_with_port) and
|
||||
((input_address.split('[')[1]).split(']')[0]|ipv6 == false)
|
||||
when: domain_name_ipv4_check.rc != 0
|
@ -1,66 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Validate addresses in provided range and the range size
|
||||
#
|
||||
|
||||
- set_fact:
|
||||
network: "{{ item.key }}"
|
||||
start_addr: "{{ item.value['start'] }}"
|
||||
end_addr: "{{ item.value['end'] }}"
|
||||
subnet: "{{ item.value['subnet'] }}"
|
||||
use_default: "{{ item.value['use_default'] }}"
|
||||
|
||||
- block:
|
||||
- name: Validate {{ network }} start and end address format
|
||||
debug:
|
||||
msg: "{{ network }}: {{ start_addr }} {{ end_addr }}"
|
||||
failed_when: (start_addr | ipaddr == False) or (end_addr | ipaddr == False)
|
||||
|
||||
- block:
|
||||
- name: Validate {{ network }} start and end range
|
||||
vars:
|
||||
script_content: |
|
||||
from netaddr import IPAddress
|
||||
from netaddr import IPNetwork
|
||||
from netaddr import IPRange
|
||||
|
||||
start = IPAddress("{{ start_addr }}")
|
||||
end = IPAddress("{{ end_addr }}")
|
||||
subnet = IPNetwork("{{ subnet }}")
|
||||
|
||||
if not start < end:
|
||||
raise Exception("Failed validation, {{ network }} start address must be less than end address.")
|
||||
|
||||
if start not in subnet or end not in subnet:
|
||||
raise Exception("Failed validation, {{ network }} start or end address must be within its subnet range.")
|
||||
|
||||
range = IPRange("{{ start_addr }}", "{{ end_addr }}")
|
||||
if (("{{ network }}" == 'cluster_pod' or "{{ network }}" == 'cluster_service') and
|
||||
range.size < {{ min_pod_service_num_addresses|int }}):
|
||||
raise Exception("Failed validation, {{ network }} address range must contain at least %d addresses." %
|
||||
int("{{ min_pod_service_num_addresses }}"))
|
||||
elif (("{{ network }}" == 'pxeboot' or "{{ network }}" == 'multicast' or "{{ network }}" == 'management') and
|
||||
range.size < {{ min_16_addresses|int }}):
|
||||
raise Exception("Failed validation, {{ network }} address range must contain at least %d addresses." %
|
||||
int("{{ min_16_addresses }}"))
|
||||
elif range.size < {{ min_8_addresses|int }}:
|
||||
raise Exception("Failed validation, {{ network }} address range must contain at least %d addresses." %
|
||||
int("{{ min_8_addresses }}"))
|
||||
shell: "{{ script_content }}"
|
||||
args:
|
||||
executable: /usr/bin/python
|
||||
failed_when: false
|
||||
register: range_check_result
|
||||
|
||||
- name: Fail if address range did not meet required criteria
|
||||
fail:
|
||||
msg: "{{ range_check_result.stderr_lines[-1] }}"
|
||||
when: range_check_result.rc != 0
|
||||
|
||||
when: network != 'oam_node'
|
||||
when: not use_default
|
@ -1,30 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASKS DESCRIPTION:
|
||||
# Validate docker proxy url format
|
||||
#
|
||||
|
||||
- block:
|
||||
- name: Check if the supplied proxy is a valid URL
|
||||
vars:
|
||||
script_content: |
|
||||
# Use this utility to be consistent with the current config_controller
|
||||
# and sysinv
|
||||
from controllerconfig.utils import is_valid_url
|
||||
if not is_valid_url( "{{ input_url }}" ):
|
||||
raise Exception("Invalid url format!")
|
||||
shell: "{{ script_content }}"
|
||||
args:
|
||||
executable: /usr/bin/python
|
||||
failed_when: false
|
||||
register: proxy_url_check
|
||||
|
||||
- name: Fail if proxy has the wrong format
|
||||
fail:
|
||||
msg: "{{ input_url }} is an invalid URL."
|
||||
when: proxy_url_check.rc != 0
|
||||
when: input_url != 'undef'
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
config_workdir: /tmp/config
|
||||
bootstrap_config_file: /tmp/config/bootstrap_config
|
||||
typical_subnet_prefix: 24
|
||||
min_8_addresses: 8
|
||||
min_16_addresses: 16
|
||||
min_pod_service_num_addresses: 65536
|
||||
minimum_ipv6_prefix_length: 64
|
||||
|
||||
private_pxeboot_subnet: 169.254.202.0/24
|
||||
pxecontroller_floating_hostname: pxecontroller
|
||||
use_entire_pxeboot_subnet: True
|
Loading…
Reference in New Issue
Block a user