Resolves all of the current linting issues

This PR goes through all of the files and looks for lint violations
and updates all of them.

Updated per-feedback

Change-Id: I5bc4ee6bcb19f367415425fdf2b1d77a2a9ab4ba
Closes-Bug: 1398601
This commit is contained in:
Kevin Carter 2014-12-02 18:30:47 -06:00 committed by Andy McCrae
parent 38d775ec26
commit 6389c643ee
23 changed files with 232 additions and 208 deletions

View File

@ -16,17 +16,17 @@
# Example usage:
# ansible-playbook -i inventory/dynamic_inventory.py -e "host_group=infra1,container_name=horizon_container" setup/archive-container.yml
# This will create a new archive of an existing container and then retreve
# the container storing the archive on the local system. Once the archive
# This will create a new archive of an existing container and then retrieve
# the container storing the archive on the local system. Once the archive
# has been retrieved the archive is removed from the source system.
- hosts: "{{ host_group|default('hosts') }}"
user: root
tasks:
# Set facts on containers
- name: Get info on a given container
lxc: >
command=info
name={{ container_name }}
lxc:
command: "info"
name: "{{ container_name }}"
- name: Print information on all containers
debug: var=lxc_facts

View File

@ -22,138 +22,137 @@
tasks:
# Create container directory
- name: Create container directory
file: >
path="{{ lxcpath }}/{{ name }}"
state=directory
group="root"
owner="root"
recurse=true
file:
path: "{{ lxcpath }}/{{ name }}"
state: "directory"
group: "root"
owner: "root"
recurse: "true"
# If check for the lxc VG
- name: Check for lxc volume group
shell: >
(which vgs > /dev/null && vgs | grep -o "{{ vg_name }}") || false
shell: "(which vgs > /dev/null && vgs | grep -o {{ vg_name }}) || false"
register: vg_result
ignore_errors: True
# If lxc vg create new lv
- name: Create new LV
lvol: >
vg="{{ vg_name }}"
lv="{{ name }}"
size="{{ lv_size }}"
lvol:
vg: "{{ vg_name }}"
lv: "{{ name }}"
size: "{{ lv_size }}"
when: vg_result.rc == 0
# If lxc vg format new lv
- name: Format the new LV
filesystem: >
fstype="{{ fstype }}"
dev="/dev/{{ vg_name }}/{{ name }}"
filesystem:
fstype: "{{ fstype }}"
dev: "/dev/{{ vg_name }}/{{ name }}"
when: vg_result.rc == 0
# If lxc vg mount new lv at $container/rootfs
- name: Mount Container LV
mount: >
name="{{ lxcpath }}/{{ name }}/rootfs"
src="/dev/{{ vg_name }}/{{ name }}"
fstype="{{ fstype }}"
state=mounted
mount:
name: "{{ lxcpath }}/{{ name }}/rootfs"
src: "/dev/{{ vg_name }}/{{ name }}"
fstype: "{{ fstype }}"
state: "mounted"
when: vg_result.rc == 0
# upload new archive to host
- name: Upload Archive to host
synchronize: >
src="{{ local_store_path }}/{{ archive_name }}"
dest="{{ remote_store_path }}/{{ archive_name }}"
archive=yes
mode=push
synchronize:
src: "{{ local_store_path }}/{{ archive_name }}"
dest: "{{ remote_store_path }}/{{ archive_name }}"
archive: "yes"
mode: "push"
# Unarchive container
- name: Unarchive a container
unarchive: >
src="{{ remote_store_path }}/{{ archive_name }}"
dest="{{ lxcpath }}/{{ name }}"
unarchive:
src: "{{ remote_store_path }}/{{ archive_name }}"
dest: "{{ lxcpath }}/{{ name }}"
register: result
# If lxc vg unmount new lv
- name: Unmount Container LV
mount: >
name="{{ lxcpath }}/{{ name }}/rootfs"
src="/dev/{{ vg_name }}/{{ name }}"
fstype="{{ fstype }}"
state=unmounted
mount:
name: "{{ lxcpath }}/{{ name }}/rootfs"
src: "/dev/{{ vg_name }}/{{ name }}"
fstype: "{{ fstype }}"
state: "unmounted"
when: vg_result.rc == 0
# Delete archive directory
- name: Cleanup archive
file: >
path="{{ remote_store_path }}/{{ archive_name }}"
state=absent
when: result|changed
file:
path: "{{ remote_store_path }}/{{ archive_name }}"
state: "absent"
when: result | changed
# Ensure config is without old cruft
- name: Ensure clean config
lineinfile: >
dest="{{ lxcpath }}/{{ name }}/config"
regexp="{{ item.regexp }}"
state=absent
backup=yes
lineinfile:
dest: "{{ lxcpath }}/{{ name }}/config"
regexp: "{{ item.regexp }}"
state: "absent"
backup: "yes"
with_items:
- { regexp: "^lxc.network.hwaddr" }
- { regexp: "^lxc.mount.entry" }
# If not lxc vg set the rootfs
- name: Set rootfs to localfs
lineinfile: >
dest="{{ lxcpath }}/{{ name }}/config"
regexp="^lxc.rootfs"
line="lxc.rootfs = {{ lxcpath }}/{{ name }}/rootfs"
state=present
lineinfile:
dest: "{{ lxcpath }}/{{ name }}/config"
regexp: "^lxc.rootfs"
line: "lxc.rootfs = {{ lxcpath }}/{{ name }}/rootfs"
state: "present"
when: vg_result.rc != 0
# If lxc vg set the rootfs
- name: Set rootfs to lvm
lineinfile: >
dest="{{ lxcpath }}/{{ name }}/config"
regexp="^lxc.rootfs"
line="lxc.rootfs = /dev/{{ vg_name }}/{{ name }}"
state=present
lineinfile:
dest: "{{ lxcpath }}/{{ name }}/config"
regexp: "^lxc.rootfs"
line: "lxc.rootfs = /dev/{{ vg_name }}/{{ name }}"
state: "present"
when: vg_result.rc == 0
# Ensure the configuration is complete
- name: Ensure config updated
lineinfile: >
dest="{{ lxcpath }}/{{ name }}/config"
regexp="^lxc.utsname"
line="lxc.utsname = {{ name }}"
state=present
lineinfile:
dest: "{{ lxcpath }}/{{ name }}/config"
regexp: "^lxc.utsname"
line: "lxc.utsname = {{ name }}"
state: "present"
# Ensure the mount point is correct
- name: Ensure mount point updated updated
lineinfile: >
dest="{{ lxcpath }}/{{ name }}/config"
regexp="^lxc.mount"
line="lxc.mount = /var/lib/lxc/{{ name }}/fstab"
state=present
lineinfile:
dest: "{{ lxcpath }}/{{ name }}/config"
regexp: "^lxc.mount"
line: "lxc.mount = /var/lib/lxc/{{ name }}/fstab"
state: "present"
# Start the new container
- name: Start new Container
lxc: >
command=start
name="{{ name }}"
lxc:
command: "start"
name: "{{ name }}"
# If address is set update it in the network script
- name: Update networking
lxc: >
command=attach
name="{{ name }}"
container_command="sed -i 's/address.*/address\ {{ address }}/g' /etc/network/interfaces"
lxc:
command: "attach"
name: "{{ name }}"
container_command: "sed -i 's/address.*/address\ {{ address }}/g' /etc/network/interfaces"
when: address is defined
# Restart the new container
- name: Restart new container
lxc: >
command=restart
name="{{ name }}"
lxc:
command: "restart"
name: "{{ name }}"
vars:
local_store_path: /tmp
remote_store_path: /tmp

View File

@ -14,38 +14,37 @@
# limitations under the License.
- name: Check for local swift creds file
shell: >
[ -f "{{ creds_file }}" ]
shell: "[ -f '{{ creds_file }}' ]"
register: swift_creds
delegate_to: localhost
ignore_errors: True
- name: Copy swift credentials to host
copy: >
src="{{ creds_file }}"
dest="{{ creds_file }}"
owner=root
group=root
mode=0600
copy:
src: "{{ creds_file }}"
dest: "{{ creds_file }}"
owner: "root"
group: "root"
mode: "0600"
register: copy_swift
when: swift_creds|success
- name: Create container
swift: >
command=create
container="{{ swift_container }}"
config_file="{{ creds_file }}"
section="{{ section }}"
swift:
command: "create"
container: "{{ swift_container }}"
config_file: "{{ creds_file }}"
section: "{{ section }}"
register: container_create
when: copy_swift|success
ignore_errors: True
- name: Upload object
swift: >
command=upload
src="{{ src }}"
container="{{ swift_container }}"
object="{{ swift_object }}"
config_file="{{ creds_file }}"
section="{{ section }}"
swift:
command: "upload"
src: "{{ src }}"
container: "{{ swift_container }}"
object: "{{ swift_object }}"
config_file: "{{ creds_file }}"
section: "{{ section }}"
when: container_create|success

View File

@ -14,9 +14,9 @@
# limitations under the License.
- name: get container info
lxc: >
command=info
name="{{ container_name }}"
lxc:
command: "info"
name: "{{ container_name }}"
- name: store_original_ip
set_fact:
@ -24,39 +24,39 @@
when: "lxc_facts[container_name]['state'] == 'running'"
- name: Clone Container
lxc: >
command=clone
orig="{{ container_name }}"
new="{{ new_name }}"
backingstore="{{ bdev }}"
fssize="{{ fssize }}"
snapshot="{{ snapshot }}"
state="{{ state }}"
lxc:
command: "clone"
orig: "{{ container_name }}"
new: "{{ new_name }}"
backingstore: "{{ bdev }}"
fssize: "{{ fssize }}"
snapshot: "{{ snapshot }}"
state: "{{ state }}"
- name: Ensure clean config
lineinfile: >
dest="{{ lxcpath }}/{{ new_name }}/config"
regexp="{{ item }}"
state=absent
backup=yes
lineinfile:
dest: "{{ lxcpath }}/{{ new_name }}/config"
regexp: "{{ item }}"
state: "absent"
backup: "yes"
with_items:
- "^lxc.network.hwaddr"
- "^lxc.mount.entry"
- name: restart new container
lxc: >
command=start
name="{{ new_name }}"
lxc:
command: "start"
name: "{{ new_name }}"
- name: Update networking
lxc: >
command=attach
name="{{ new_name }}"
container_command="sed -i 's/{{ orig_ip }}/{{ address }}/g' /etc/network/interfaces"
lxc:
command: "attach"
name: "{{ new_name }}"
container_command: "sed -i 's/{{ orig_ip }}/{{ address }}/g' /etc/network/interfaces"
when: orig_ip is defined
register: result
- name: restart new container
lxc: >
command=restart
name="{{ new_name }}"
lxc:
command: "restart"
name: "{{ new_name }}"

View File

@ -14,18 +14,18 @@
# limitations under the License.
- name: create the system group
group: >
name={{ system_group }}
state=present
system=yes
group:
name: "{{ system_group }}"
state: "present"
system: "yes"
when: system_group is defined
- name: create system user
user: >
name={{ system_user }}
shell=/bin/false
group={{ system_group }}
home=/var/lib/{{ system_user }}
system=yes
createhome=yes
user:
name: "{{ system_user }}"
shell: "/bin/false"
group: "{{ system_group }}"
home: "/var/lib/{{ system_user }}"
system: "yes"
createhome: "yes"
when: system_group is defined and system_user is defined

View File

@ -16,7 +16,7 @@
# destroy an LXC container.
- name: Destroy Containers
lxc: >
name={{ hostvars[item]['container_name'] }}
command=destroy
lxc:
name: "{{ hostvars[item]['container_name'] }}"
command: "destroy"
with_items: container_groups

View File

@ -15,7 +15,7 @@
- name: Adding new system tuning
sysctl:
name: "{{ item.key }}"
name: "{{ item.key }}"
value: "{{ item.value }}"
sysctl_set: "{{ item.set|default('yes') }}"
state: "{{ item.state|default('present') }}"

View File

@ -20,7 +20,7 @@
group: "root"
owner: "root"
recurse: "true"
with_items:
with_items:
- /etc/mysql/conf.d
- name: Drop mariadb configs

View File

@ -53,6 +53,6 @@
line: "ServerName {{ container_name }}"
- name: Ensure Apache is running
service:
service:
name: "apache2"
state: "restarted"

View File

@ -13,15 +13,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Ensure ssh directory
file:
path: "{{ ansible_env.HOME }}/.ssh"
state: "directory"
group: "{{ ansible_user_id }}"
owner: "{{ ansible_user_id }}"
mode: "0755"
- name: Update SSH keys
shell: wget {{ ssh_key_url }} -O /root/.ssh/remotekeys
get_url:
url: "{{ ssh_key_url }}"
dest: "{{ ansible_env.HOME }}/.ssh/remotekeys"
mode: "0640"
when: ssh_key_url is defined
- name: Ensure all keys in authorized_keys
shell: |
while read key; do
if [[ ! "$(grep "${key}" /root/.ssh/authorized_keys)" ]];then
echo "$key" | tee -a /root/.ssh/authorized_keys
while read key; do
if [[ ! "$(grep "$key" {{ ansible_env.HOME }}/.ssh/authorized_keys)" ]];then
echo "$key" | tee -a {{ ansible_env.HOME }}/.ssh/authorized_keys
fi
done < /root/.ssh/remotekeys
when: ssh_key_url is defined

View File

@ -15,53 +15,53 @@
# Create an admin tenant
- name: Ensure Additional Tenants
keystone: >
command=ensure_tenant
login_tenant_name="{{ auth_admin_tenant }}"
login_user="{{ auth_admin_username }}"
login_password="{{ auth_admin_password }}"
endpoint="{{ auth_admin_uri }}"
tenant_name="{{ item.tenant }}"
description="{{ item.description }}"
keystone:
command: "ensure_tenant"
login_tenant_name: "{{ auth_admin_tenant }}"
login_user: "{{ auth_admin_username }}"
login_password: "{{ auth_admin_password }}"
endpoint: "{{ auth_admin_uri }}"
tenant_name: "{{ item.tenant }}"
description: "{{ item.description }}"
with_items: additional_keystone_users
when: additional_keystone_users is defined
# Create an admin user
- name: Ensure Additional Users
keystone: >
command=ensure_user
login_tenant_name="{{ auth_admin_tenant }}"
login_user="{{ auth_admin_username }}"
login_password="{{ auth_admin_password }}"
endpoint="{{ auth_admin_uri }}"
user_name="{{ item.user }}"
tenant_name="{{ item.tenant }}"
password="{{ item.password }}"
keystone:
command: "ensure_user"
login_tenant_name: "{{ auth_admin_tenant }}"
login_user: "{{ auth_admin_username }}"
login_password: "{{ auth_admin_password }}"
endpoint: "{{ auth_admin_uri }}"
user_name: "{{ item.user }}"
tenant_name: "{{ item.tenant }}"
password: "{{ item.password }}"
with_items: additional_keystone_users
when: additional_keystone_users is defined
# Create an admin role
- name: Ensure Admin role
keystone: >
command=ensure_role
login_tenant_name="{{ auth_admin_tenant }}"
login_user="{{ auth_admin_username }}"
login_password="{{ auth_admin_password }}"
endpoint="{{ auth_admin_uri }}"
role_name="{{ item.role }}"
keystone:
command: "ensure_role"
login_tenant_name: "{{ auth_admin_tenant }}"
login_user: "{{ auth_admin_username }}"
login_password: "{{ auth_admin_password }}"
endpoint: "{{ auth_admin_uri }}"
role_name: "{{ item.role }}"
with_items: additional_keystone_users
when: additional_keystone_users is defined
# Add a role to the user
- name: Ensure User has Role
keystone: >
command=ensure_user_role
login_tenant_name="{{ auth_admin_tenant }}"
login_user="{{ auth_admin_username }}"
login_password="{{ auth_admin_password }}"
endpoint="{{ auth_admin_uri }}"
user_name="{{ item.user }}"
tenant_name="{{ item.tenant }}"
role_name="{{ item.role }}"
keystone:
command: ensure_user_role
login_tenant_name: "{{ auth_admin_tenant }}"
login_user: "{{ auth_admin_username }}"
login_password: "{{ auth_admin_password }}"
endpoint: "{{ auth_admin_uri }}"
user_name: "{{ item.user }}"
tenant_name: "{{ item.tenant }}"
role_name: "{{ item.role }}"
with_items: additional_keystone_users
when: additional_keystone_users is defined

View File

@ -108,7 +108,7 @@
path: "/etc/apache2/sites-enabled/000-default.conf"
- name: Create kibana http_auth user
htpasswd:
htpasswd:
path: "/etc/apache2/users"
name: "kibana"
password: "{{ kibana_password }}"

View File

@ -27,7 +27,7 @@
shell: |
. /root/openrc
SERVICE_ID="$(keystone service-list | grep -w 'compute' | awk '{print $2}')"
keystone endpoint-list | grep -w "${SERVICE_ID}" | awk '{print $10}'
keystone endpoint-list | grep -w "$SERVICE_ID" | awk '{print $10}'
register: novaendpoint
changed_when: false
failed_when: novaendpoint.rc != 0

View File

@ -34,8 +34,8 @@
- name: Perform a Neutron DB Stamp
command: >
neutron-db-manage --config-file {{ neutron_config }}
--config-file {{ neutron_plugin }}
neutron-db-manage --config-file {{ neutron_config }}
--config-file {{ neutron_plugin }}
stamp {{ neutron_revision }}
when: neutron_dbmanage.rc != 0
sudo: yes

View File

@ -23,18 +23,18 @@
notify: Restart libvirt-bin
- name: Set libvirt-bin upstart script
copy: >
src=libvirt-bin.conf
dest=/etc/init/libvirt-bin.conf
owner="root"
group="root"
mode=0644
copy:
src: "libvirt-bin.conf"
dest: "/etc/init/libvirt-bin.conf"
owner: "root"
group: "root"
mode: "0644"
notify: Restart libvirt-bin
- name: Add nova user to libvirtd group
user: >
name="{{ system_user }}"
groups=libvirtd
append=yes
user:
name: "{{ system_user }}"
groups: "libvirtd"
append: "yes"
- include: virsh_net_remove.yml

View File

@ -30,5 +30,5 @@
version: "{{ git_install_branch }}"
register: git_fallback_clone
when: git_clone|failed
until: git_fallback_clone|success
until: git_fallback_clone|success
retries: 5

View File

@ -19,8 +19,8 @@
# TODO(kevin) This should go away sooner than later
- name: Laydown the example files
shell: |
for i in *; do
if [ ! -f "/etc/{{ service_name }}/$i" ]; then
for i in *; do
if [ ! -f "/etc/{{ service_name }}/$i" ]; then
cp -R $i /etc/{{ service_name }}/
fi
done

View File

@ -15,8 +15,8 @@
- name: Restart rabbitmq-server
service:
name: rabbitmq-server
enabled: yes
state: restarted
name: "rabbitmq-server"
enabled: "yes"
state: "restarted"
tags:
- rabbit_restart

View File

@ -14,7 +14,7 @@
# limitations under the License.
# If cluster name is our own hostname, we assume we're not properly clustered
# TODO(someone): implement a more robust way of checking
# TODO(someone): implement a more robust way of checking
# if node is clustered or not
- name: Check cluster status
shell: |

View File

@ -1,13 +1,27 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Ensure rabbitmq user
rabbitmq_user:
rabbitmq_user:
user: "{{ rabbit_userid }}"
password: "{{ rabbit_password }}"
vhost: "/"
configure_priv: ".*"
read_priv: ".*"
write_priv: ".*"
state: present
state: "present"
tags:
- users
- rabbit_config

View File

@ -14,21 +14,22 @@
# limitations under the License.
- name: Ensude motd is not printed in ssh
lineinfile:
lineinfile:
dest: /etc/ssh/sshd_config
regexp: "^PrintMotd"
line: "PrintMotd yes"
state: present
state: "present"
notify: SSH restart
tags:
- set_motd
- name: Setup support motd
template:
owner: root
group: root
mode: 0755
src: 20-openstack.j2
owner: "root"
group: "root"
mode: "0755"
src: "20-openstack.j2"
dest: "/etc/update-motd.d/20-openstack"
tags:
- set_motd

View File

@ -14,7 +14,7 @@
# limitations under the License.
- name: stop rsyslog
service:
service:
name: "rsyslog"
state: "stopped"
pattern: "rsyslog"

View File

@ -22,7 +22,7 @@
group=root
with_items: program_names
notify: Restart service
- name: Ensure init scripts are loaded
shell: >
initctl list | grep -w "{{ item }}"