Merge "Update openstack-helm Armada job"

This commit is contained in:
Zuul 2019-02-04 20:17:16 +00:00 committed by Gerrit Code Review
commit 5648754f50
12 changed files with 35 additions and 1856 deletions

View File

@ -16,10 +16,11 @@
set -xe
source ./tools/deployment/armada/generate-passwords.sh
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
: ${OSH_PATH:="./"}
source ./tools/deployment/armada/generate-osh-passwords.sh
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
@ -42,8 +43,10 @@ export OSH_PATH
# fluentd's configuration. This ensures the placeholder value gets rendered appropriately
export tag='${tag}'
manifests="armada-cluster-ingress armada-ceph armada-lma armada-osh"
for manifest in $manifests; do
for manifest in armada-cluster-ingress armada-ceph; do
echo "Rendering $manifest manifest"
envsubst < ./tools/deployment/armada/manifests/$manifest.yaml > /tmp/$manifest.yaml
envsubst < ${OSH_INFRA_PATH}/tools/deployment/armada/manifests/$manifest.yaml > /tmp/$manifest.yaml
done
echo "Rendering armada-osh manifest"
envsubst < ./tools/deployment/armada/manifests/armada-osh.yaml > /tmp/armada-osh.yaml

View File

@ -16,7 +16,7 @@
set -xe
manifests="armada-cluster-ingress armada-ceph armada-lma armada-osh"
manifests="armada-cluster-ingress armada-ceph armada-osh"
for manifest in $manifests; do
echo "Validating $manifest manifest"
armada validate /tmp/$manifest.yaml

View File

@ -16,7 +16,7 @@
set -xe
manifests="armada-cluster-ingress armada-ceph armada-lma armada-osh"
manifests="armada-cluster-ingress armada-ceph armada-osh"
for manifest in $manifests; do
echo "Applying $manifest manifest"
armada apply /tmp/$manifest.yaml

View File

@ -22,14 +22,9 @@ passwords="BARBICAN_DB_PASSWORD \
CINDER_DB_PASSWORD \
CINDER_RABBITMQ_USER_PASSWORD \
CINDER_USER_PASSWORD \
DB_ADMIN_PASSWORD \
ELASTICSEARCH_ADMIN_PASSWORD \
GLANCE_DB_PASSWORD \
GLANCE_RABBITMQ_USER_PASSWORD \
GLANCE_USER_PASSWORD \
GRAFANA_ADMIN_PASSWORD \
GRAFANA_DB_PASSWORD \
GRAFANA_SESSION_DB_PASSWORD \
HEAT_DB_PASSWORD \
HEAT_RABBITMQ_USER_PASSWORD \
HEAT_STACK_PASSWORD \
@ -39,7 +34,6 @@ passwords="BARBICAN_DB_PASSWORD \
KEYSTONE_AUTHTOKEN_MEMCACHED_SECRET_KEY \
KEYSTONE_DB_PASSWORD \
KEYSTONE_RABBITMQ_USER_PASSWORD \
KIBANA_ADMIN_PASSWORD \
NEUTRON_DB_PASSWORD \
NEUTRON_RABBITMQ_USER_PASSWORD \
NEUTRON_USER_PASSWORD \
@ -48,17 +42,13 @@ passwords="BARBICAN_DB_PASSWORD \
NOVA_RABBITMQ_USER_PASSWORD \
NOVA_USER_PASSWORD \
OPENSTACK_EXPORTER_USER_PASSWORD \
PROMETHEUS_ADMIN_PASSWORD \
OSH_MARIADB_ADMIN_PASSWORD \
RABBITMQ_ADMIN_PASSWORD \
RADOSGW_S3_ADMIN_ACCESS_KEY \
RADOSGW_S3_ADMIN_SECRET_KEY \
RADOSGW_S3_ELASTICSEARCH_ACCESS_KEY \
RADOSGW_S3_ELASTICSEARCH_SECRET_KEY \
SWIFT_USER_PASSWORD"
for password in $passwords
do
value=$(tr -dc A-Za-z0-9 < /dev/urandom 2>/dev/null | head -c 20)
export $password=$value
echo "export $password=$value" >> /tmp/passwords.env
echo "export $password=$value" >> /tmp/osh-passwords.env
done

View File

@ -1,327 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
values: {}
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: helm-toolkit
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph-ingress-controller
data:
chart_name: ceph-ingress-controller
release: ceph-ingress-controller
namespace: ceph
wait:
timeout: 1800
labels:
release_group: osh-ceph-ingress-controller
install:
no_hooks: False
upgrade:
no_hooks: False
pre:
delete:
- type: job
labels:
release_group: osh-ceph-ingress-controller
values:
release_uuid: ${RELEASE_UUID}
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
error_page: 2
ingress: 2
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph-mon
data:
chart_name: ceph-mon
release: ceph-mon
namespace: ceph
wait:
timeout: 1800
labels:
release_group: osh-ceph-mon
install:
no_hooks: False
upgrade:
no_hooks: False
pre:
delete:
- type: job
labels:
release_group: osh-ceph-mon
values:
release_uuid: ${RELEASE_UUID}
endpoints:
ceph_mon:
namespace: ceph
network:
public: ${CEPH_NETWORK}
cluster: ${CEPH_NETWORK}
deployment:
storage_secrets: true
ceph: true
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
# NOTE(portdirect): 5 nodes, with one osd per node
osd: 5
pg_per_osd: 100
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: ceph-mon
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph-osd
data:
chart_name: ceph-osd
release: ceph-osd
namespace: ceph
wait:
timeout: 1800
labels:
release_group: osh-ceph-osd
install:
no_hooks: False
upgrade:
no_hooks: False
pre:
delete:
- type: job
labels:
release_group: osh-ceph-osd
values:
release_uuid: ${RELEASE_UUID}
endpoints:
ceph_mon:
namespace: ceph
network:
public: ${CEPH_NETWORK}
cluster: ${CEPH_NETWORK}
deployment:
ceph: true
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
rgw_ks:
enabled: true
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
# NOTE(portdirect): 5 nodes, with one osd per node
osd: 5
pg_per_osd: 100
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: ceph-osd
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph-client
data:
chart_name: ceph-client
release: ceph-client
namespace: ceph
wait:
timeout: 1800
labels:
release_group: osh-ceph-client
install:
no_hooks: False
upgrade:
no_hooks: False
pre:
delete:
- type: job
labels:
release_group: osh-ceph-client
values:
release_uuid: ${RELEASE_UUID}
endpoints:
ceph_mon:
namespace: ceph
network:
public: ${CEPH_NETWORK}
cluster: ${CEPH_NETWORK}
deployment:
ceph: true
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
# NOTE(portdirect): 5 nodes, with one osd per node
osd: 5
pg_per_osd: 100
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: ceph-client
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph-provisioners
data:
chart_name: ceph-provisioners
release: ceph-provisioners
namespace: ceph
wait:
timeout: 1800
labels:
release_group: osh-ceph-provisioners
install:
no_hooks: False
upgrade:
no_hooks: False
pre:
delete:
- type: job
labels:
release_group: osh-ceph-provisioners
values:
release_uuid: ${RELEASE_UUID}
endpoints:
ceph_mon:
namespace: ceph
network:
public: ${CEPH_NETWORK}
cluster: ${CEPH_NETWORK}
deployment:
ceph: true
rbd_provisioner: true
cephfs_provisioner: true
client_secrets: false
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
# NOTE(portdirect): 5 nodes, with one osd per node
osd: 5
pg_per_osd: 100
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: ceph-provisioners
reference: master
dependencies:
- helm-toolkit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ceph-storage
data:
description: "Ceph Storage"
sequenced: True
chart_group:
- ceph-ingress-controller
- ceph-mon
- ceph-osd
- ceph-client
- ceph-provisioners
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: armada-manifest
data:
release_prefix: osh
chart_groups:
- ceph-storage

View File

@ -1,81 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
values: {}
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: helm-toolkit
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress-kube-system
data:
chart_name: ingress-kube-system
release: ingress-kube-system
namespace: kube-system
wait:
timeout: 1800
labels:
release_group: osh-ingress-kube-system
install:
no_hooks: False
upgrade:
no_hooks: False
pre:
delete:
- type: job
labels:
release_group: osh-ingress-kube-system
values:
release_uuid: ${RELEASE_UUID}
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
error_page: 2
deployment:
mode: cluster
type: DaemonSet
network:
host_namespace: true
source:
type: local
location: ${OSH_INFRA_PATH}
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: cluster-ingress-controller
data:
description: "Cluster Ingress Controller"
sequenced: False
chart_group:
- ingress-kube-system
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: armada-manifest
data:
release_prefix: osh
chart_groups:
- cluster-ingress-controller

File diff suppressed because it is too large Load Diff

View File

@ -136,7 +136,7 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
monitoring:
prometheus:
enabled: True
@ -355,7 +355,7 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
keystone:
password: ${KEYSTONE_DB_PASSWORD}
oslo_messaging:
@ -396,6 +396,8 @@ data:
timeout: 1800
labels:
release_group: osh-radosgw-openstack
test:
enabled: false
install:
no_hooks: False
upgrade:
@ -552,7 +554,7 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
glance:
password: ${GLANCE_DB_PASSWORD}
oslo_cache:
@ -634,7 +636,7 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
cinder:
password: ${CINDER_DB_PASSWORD}
oslo_cache:
@ -882,19 +884,19 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
nova:
password: ${NOVA_DB_PASSWORD}
oslo_db_api:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
nova:
password: ${NOVA_DB_PASSWORD}
oslo_db_cell0:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
nova:
password: ${NOVA_DB_PASSWORD}
oslo_cache:
@ -1084,7 +1086,7 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
neutron:
password: ${NEUTRON_DB_PASSWORD}
oslo_cache:
@ -1326,7 +1328,7 @@ data:
oslo_db:
auth:
admin:
password: ${DB_ADMIN_PASSWORD}
password: ${OSH_MARIADB_ADMIN_PASSWORD}
heat:
password: ${HEAT_DB_PASSWORD}
oslo_cache:

View File

@ -1,74 +0,0 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
gather_facts: True
tasks:
- name: installing ipcalc on Ubuntu
become: true
become_user: root
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
apt:
name: ipcalc
state: present
- name: Install python3-pip for armada
environment:
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
shell: |
set xe;
./tools/deployment/armada/010-armada-host-setup.sh
args:
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
- name: Build armada
environment:
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
shell: |
set xe;
./tools/deployment/armada/015-armada-build.sh
args:
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
- name: Render all Armada manifests
environment:
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
shell: |
set -xe;
./tools/deployment/armada/020-armada-render-manifests.sh
args:
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
- name: Validate all Armada manifests
environment:
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
shell: |
set -xe;
./tools/deployment/armada/025-armada-validate-manifests.sh
args:
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
- name: Apply all Armada manifests
environment:
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
shell: |
set -xe;
./tools/deployment/armada/030-armada-apply-manifests.sh
args:
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"

View File

@ -25,7 +25,6 @@
with_items:
- armada-cluster-ingress
- armada-ceph
- armada-lma
- armada-osh
args:
executable: /bin/bash

View File

@ -176,24 +176,30 @@
- job:
timeout: 9600
name: openstack-helm-armada-fullstack-deploy
name: openstack-helm-armada-deploy
parent: openstack-helm-functional-temp
nodeset: openstack-helm-five-node-ubuntu
roles:
- zuul: openstack/openstack-helm-infra
vars:
zuul_osh_infra_relative_path: ../openstack-helm-infra/
pre-run:
- tools/gate/playbooks/osh-infra-upgrade-host.yaml
- tools/gate/playbooks/osh-infra-deploy-docker.yaml
- tools/gate/playbooks/osh-infra-build.yaml
- tools/gate/playbooks/osh-infra-deploy-k8s.yaml
run: tools/gate/playbooks/armada-fullstack-deploy.yaml
run: tools/gate/playbooks/osh-gate-runner.yaml
post-run:
- tools/gate/playbooks/osh-infra-collect-logs.yaml
- tools/gate/playbooks/gather-armada-manifests.yaml
required-projects:
- openstack/openstack-helm-infra
nodeset: openstack-helm-five-node-ubuntu
vars:
zuul_osh_infra_relative_path: ../openstack-helm-infra/
gate_scripts:
- ./tools/deployment/armada/010-armada-host-setup.sh
- ./tools/deployment/armada/015-armada-build.sh
- ./tools/deployment/armada/020-armada-render-manifests.sh
- ./tools/deployment/armada/025-armada-validate-manifests.sh
- ./tools/deployment/armada/030-armada-apply-manifests.sh
- job:
name: openstack-helm-multinode-temp-ubuntu

View File

@ -44,10 +44,11 @@
jobs:
- openstack-helm-multinode-temp-ubuntu
- openstack-helm-ironic-ubuntu
- openstack-helm-armada-deploy
experimental:
jobs:
# - openstack-helm-multinode-temp-centos
# # NOTE(lamt): Make fedora job experimental until issues with gates are resolved.
# - openstack-helm-multinode-temp-fedora
# - openstack-helm-multinode-temp-tempest
- openstack-helm-armada-fullstack-deploy
- openstack-helm-armada-deploy