Adding exec role to support installing/updating packages
The below role can be used to upgrade/install new packages or binaries using predefined shell scripts built in the hostconfig-operator. The script name has to be specified in the CR so that it can be used to perform required configuration on the nodes. The scripts can be customized to perform security upgrades or any other configuration required on the host. Change-Id: I4400d2f278749082805bb1131bf2c3bc7f95fadb
This commit is contained in:
parent
acf1abd11e
commit
1bea379007
15
Dockerfile
15
Dockerfile
|
@ -6,12 +6,6 @@ COPY airship-host-config/requirements.yml ${HOME}/requirements.yml
|
|||
RUN ansible-galaxy collection install -r ${HOME}/requirements.yml \
|
||||
&& chmod -R ug+rwx ${HOME}/.ansible
|
||||
|
||||
# Configuration for ansible
|
||||
COPY airship-host-config/build/ansible.cfg /etc/ansible/ansible.cfg
|
||||
|
||||
# CRD entrypoint definition YAML file
|
||||
COPY airship-host-config/watches.yaml ${HOME}/watches.yaml
|
||||
|
||||
# Installing ssh clients - used to connect to kubernetes nodes
|
||||
USER root
|
||||
RUN dnf install openssh-clients -y
|
||||
|
@ -19,6 +13,12 @@ RUN rpm -ivh https://archives.fedoraproject.org/pub/archive/epel/6/x86_64/epel-r
|
|||
&& dnf -y install sshpass
|
||||
USER ansible-operator
|
||||
|
||||
# Configuration for ansible
|
||||
COPY airship-host-config/build/ansible.cfg /etc/ansible/ansible.cfg
|
||||
|
||||
# CRD entrypoint definition YAML file
|
||||
COPY airship-host-config/watches.yaml ${HOME}/watches.yaml
|
||||
|
||||
# Copying the configuration roles
|
||||
COPY airship-host-config/roles/ ${HOME}/roles/
|
||||
|
||||
|
@ -36,5 +36,8 @@ COPY airship-host-config/plugins/ ${HOME}/plugins/
|
|||
# https://github.com/ansible/ansible-runner/blob/stable/1.3.x/ansible_runner/runner_config.py#L178
|
||||
COPY airship-host-config/plugins/callback/hostconfig_k8_cr_status.py /usr/local/lib/python3.6/site-packages/ansible/plugins/callback/
|
||||
|
||||
# Copying scripts folder used by exec configuration
|
||||
COPY airship-host-config/scripts/ ${HOME}/scripts/
|
||||
|
||||
# Intializing ssh folder
|
||||
RUN mkdir ${HOME}/.ssh
|
||||
|
|
|
@ -6,19 +6,19 @@ COPY requirements.yml ${HOME}/requirements.yml
|
|||
RUN ansible-galaxy collection install -r ${HOME}/requirements.yml \
|
||||
&& chmod -R ug+rwx ${HOME}/.ansible
|
||||
|
||||
# Installing ssh clients - used to connect to kubernetes nodes
|
||||
USER root
|
||||
RUN dnf install openssh-clients -y
|
||||
RUN rpm -ivh https://archives.fedoraproject.org/pub/archive/epel/6/x86_64/epel-release-6-8.noarch.rpm \
|
||||
&& dnf -y install sshpass
|
||||
USER ansible-operator
|
||||
|
||||
# Configuration for ansible
|
||||
COPY build/ansible.cfg /etc/ansible/ansible.cfg
|
||||
|
||||
# CRD entrypoint definition YAML file
|
||||
COPY watches.yaml ${HOME}/watches.yaml
|
||||
|
||||
# Installing ssh clients - used to connect to kubernetes nodes
|
||||
USER root
|
||||
RUN usermod --password rhEpSyEyZ9rxc root
|
||||
RUN dnf install openssh-clients -y
|
||||
RUN rpm -ivh https://archives.fedoraproject.org/pub/archive/epel/6/x86_64/epel-release-6-8.noarch.rpm && yum --enablerepo=epel -y install sshpass
|
||||
USER ansible-operator
|
||||
|
||||
# Copying the configuration roles
|
||||
COPY roles/ ${HOME}/roles/
|
||||
|
||||
|
@ -36,5 +36,8 @@ COPY plugins/ ${HOME}/plugins/
|
|||
# https://github.com/ansible/ansible-runner/blob/stable/1.3.x/ansible_runner/runner_config.py#L178
|
||||
COPY plugins/callback/hostconfig_k8_cr_status.py /usr/local/lib/python3.6/site-packages/ansible/plugins/callback/
|
||||
|
||||
# Copying scripts folder used by exec configuration
|
||||
COPY scripts/ ${HOME}/scripts/
|
||||
|
||||
# Intializing ssh folder
|
||||
RUN mkdir ${HOME}/.ssh
|
||||
|
|
|
@ -74,6 +74,20 @@ spec:
|
|||
type: object
|
||||
description: "The configuration details that needs to be performed on the targeted kubernetes nodes."
|
||||
properties:
|
||||
exec:
|
||||
description: "An array of script configuration that would be executed on the target nodes"
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
args:
|
||||
type: string
|
||||
environment:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
ulimit:
|
||||
description: "An array of ulimit configuration to be performed on the target nodes."
|
||||
type: array
|
||||
|
|
|
@ -138,25 +138,28 @@ class CallbackModule(CallbackBase):
|
|||
]
|
||||
for res in task_result['results']:
|
||||
stat = dict()
|
||||
stat[task_name] = dict()
|
||||
for key in check_keys_res:
|
||||
if key in res.keys() and res[key]:
|
||||
stat[key] = res[key]
|
||||
stat[task_name][key] = res[key]
|
||||
if 'failed' in res.keys() and res['failed']:
|
||||
stat['status'] = "Failed"
|
||||
stat[task_name]['status'] = "Failed"
|
||||
elif 'unreachable' in res.keys() and res['unreachable']:
|
||||
stat['status'] = "Unreachable"
|
||||
stat[task_name]['status'] = "Unreachable"
|
||||
else:
|
||||
stat['status'] = "Successful"
|
||||
stat[task_name]['status'] = "Successful"
|
||||
if "vars" in result._task_fields.keys() and \
|
||||
"cr_status_vars" in \
|
||||
result._task_fields["vars"].keys():
|
||||
for var in result._task_fields["vars"]["cr_status_vars"]:
|
||||
if var in res.keys():
|
||||
stat[var] = res[var]
|
||||
stat[task_name][var] = res[var]
|
||||
if "ansible_facts" in res.keys() and \
|
||||
var in res["ansible_facts"].keys():
|
||||
stat[var] = res["ansible_facts"][var]
|
||||
status[task_name]['results'].append(stat)
|
||||
stat[task_name][var] = res["ansible_facts"][var]
|
||||
stat_list = list()
|
||||
stat_list.append(stat)
|
||||
status[task_name]['results'].append(stat_list)
|
||||
if failed:
|
||||
status[task_name]['status'] = "Failed"
|
||||
elif unreachable:
|
||||
|
@ -171,7 +174,26 @@ class CallbackModule(CallbackBase):
|
|||
if "ansible_facts" in task_result.keys() and \
|
||||
var in task_result["ansible_facts"].keys():
|
||||
status[var] = task_result["ansible_facts"][var]
|
||||
self.host_config_status[k8_hostname].update(status)
|
||||
if result._task.get_first_parent_include() and \
|
||||
result._task.get_first_parent_include().name in \
|
||||
self.host_config_status[k8_hostname].keys():
|
||||
parent_task = result._task.get_first_parent_include().name
|
||||
for i in range(len(self.host_config_status[k8_hostname][parent_task]['results'])):
|
||||
res = self.host_config_status[k8_hostname][parent_task]['results'][i]
|
||||
if len(res) == 0:
|
||||
self.host_config_status[k8_hostname][parent_task]['results'][i].append(status)
|
||||
break
|
||||
task_names = list()
|
||||
for j in range(len(res)):
|
||||
task_names.append(list(res[j].keys())[0])
|
||||
if task_name not in task_names:
|
||||
self.host_config_status[k8_hostname][parent_task]['results'][i].append(status)
|
||||
break
|
||||
if status[task_name]['status'] != "Successful":
|
||||
self.host_config_status[k8_hostname][parent_task]['status'] = \
|
||||
status[task_name]['status']
|
||||
else:
|
||||
self.host_config_status[k8_hostname].update(status)
|
||||
self._display.display(str(status))
|
||||
return
|
||||
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
# Exec Role
|
||||
|
||||
This role can be used to perform configuration on the nodes using scripts.
|
||||
The CR takes scriptname, script arguments and environment variables as
|
||||
possible options to perform execution of the specified script on the node.
|
||||
|
||||
The script that is used to perform the configuration has to be present in
|
||||
the hostconfig-operator before using the CR.
|
||||
|
||||
Sample CR object:
|
||||
|
||||
```
|
||||
apiVersion: hostconfig.airshipit.org/v1alpha1
|
||||
kind: HostConfig
|
||||
metadata:
|
||||
name: example-exec
|
||||
spec:
|
||||
host_groups:
|
||||
- name: "kubernetes.io/hostname"
|
||||
values:
|
||||
- "hostconfig-worker"
|
||||
config:
|
||||
exec:
|
||||
- name: example.sh
|
||||
```
|
||||
|
||||
## Adding scripts to hostconfig-operator
|
||||
|
||||
To add custom scripts to hostconfig-operator which can be used later
|
||||
as part of CR to perform configuration, please follow the below steps:
|
||||
|
||||
1. Add the script file to the [scripts](../../scripts) directory, the script
|
||||
has to be executable.
|
||||
2. Build the hostconfig-operator image, `make images`
|
||||
3. Use this image to deploy the hostconfig-operator, make any changes
|
||||
if necessary to the [operator.yaml](../../deploy/operator.yaml)
|
||||
4. Once you have the deployment ready, use the appropriate CR, so that
|
||||
the scripts can be executed. Example [CR example_exec.yaml](../../../demo_examples/example_exec.yaml)
|
|
@ -0,0 +1,14 @@
|
|||
- block:
|
||||
- name: copy script
|
||||
copy:
|
||||
src: /opt/ansible/scripts/{{ exec_item.name }}
|
||||
dest: ~/.
|
||||
mode: '0755'
|
||||
- name: exec script command
|
||||
command: "~/{{ exec_item.name }} {{ exec_item.args | default('') }}"
|
||||
environment: "{{ exec_item.environment | default('') }}"
|
||||
always:
|
||||
- name: delete the file
|
||||
file:
|
||||
state: absent
|
||||
path: "~/{{ exec_item.name }}"
|
|
@ -0,0 +1,5 @@
|
|||
- name: loop over each exec
|
||||
include_tasks: exec.yml
|
||||
with_items: "{{ config.exec }}"
|
||||
loop_control:
|
||||
loop_var: exec_item
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
echo script name: ${BASH_SOURCE[0]}
|
||||
echo args: $@
|
||||
echo env: $test $env_1
|
|
@ -0,0 +1,19 @@
|
|||
# In this example we are executing sample script
|
||||
# on kubernetes host using exec config.
|
||||
|
||||
apiVersion: hostconfig.airshipit.org/v1alpha1
|
||||
kind: HostConfig
|
||||
metadata:
|
||||
name: example-exec
|
||||
spec:
|
||||
host_groups:
|
||||
- name: "kubernetes.io/hostname"
|
||||
values:
|
||||
- "hostconfig-worker"
|
||||
config:
|
||||
exec:
|
||||
- name: example.sh
|
||||
args: "test1 test2"
|
||||
environment:
|
||||
test: "testing"
|
||||
env1: "hello"
|
|
@ -44,7 +44,7 @@ check_status(){
|
|||
hosts=( "${@:2:$1}" ); shift "$(( $1 + 1 ))"
|
||||
pre_host_date=""
|
||||
for j in "${!hosts[@]}"; do
|
||||
kubectl_stdout=$(kubectl get hostconfig $hostconfig -o "jsonpath={.status.hostConfigStatus.${hosts[j]}.Execute\ shell\ command\ on\ nodes.results[0].stdout}" | head -1)
|
||||
kubectl_stdout=$(kubectl get hostconfig $hostconfig -o "jsonpath={.status.hostConfigStatus.${hosts[j]}.Execute\ shell\ command\ on\ nodes.results[0][0].Execute\ shell\ command\ on\ nodes.stdout}" | head -1)
|
||||
echo $kubectl_stdout
|
||||
host_date=$(date --date="$kubectl_stdout" +"%s")
|
||||
if [ ! -z "$pre_host_date" ]; then
|
||||
|
|
Loading…
Reference in New Issue