Retire kolla-kubernetes project - step 3 remove project content

Depends-On: https://review.openstack.org/567779
Needed-By: https://review.openstack.org/568174
Change-Id: If5bdd602d5f5c8addba70235ac41c0a52cef2c11
This commit is contained in:
Jeffrey Zhang 2018-04-30 21:26:39 +08:00 committed by Jeffrey Zhang
parent 22ed0c232d
commit 434b65c6ef
1060 changed files with 10 additions and 46408 deletions

View File

@ -1,7 +0,0 @@
[run]
branch = True
source = kolla_kubernetes
omit = kolla_kubernetes/tests/*
[report]
ignore_errors = True

60
.gitignore vendored
View File

@ -1,60 +0,0 @@
*.py[cod]
# C extensions
*.so
# helm / chart
values.yaml
**/charts
helm/**/*.lock
# Packages
*.egg*
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
cover/
.coverage*
!.coveragerc
.tox
nosetests.xml
.testrepository
.venv
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?

View File

@ -1,3 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

View File

@ -1,7 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-240} \
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,351 +0,0 @@
- project:
check:
jobs:
- kolla-kubernetes-deploy-centos-binary-2-ceph-multi:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-2-ceph:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-2-external-ovs:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-2-iscsi:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-ubuntu-binary-2-iscsi:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-ubuntu-binary-2-ceph:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-3-ceph-multi:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-source-4-ironic:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-source-4-microchart-ansible:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
experimental:
jobs:
- kolla-kubernetes-deploy-centos-binary-2-helm-operator:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-2-ceph-reboot:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-4-ceph-multi:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-source-4-helm-compute-kit:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-source-4-helm-entrypoint:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-ubuntu-source-4-ironic:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-upgrade-centos-binary-2-ceph:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-upgrade-centos-binary-3-ceph:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-binary-t-ceph-multi:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-centos-source-t-iscsi:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- kolla-kubernetes-deploy-ubuntu-source-t-iscsi:
irrelevant-files:
- ^.*\.rst$
- ^deploy-guide/source/.*
- ^doc/.*
- job:
name: kolla-kubernetes-base
description: |
This job runs provides the base required projects for
legacy kolla-kubernetes jobs.
parent: legacy-base
required-projects:
- openstack/requirements
- job:
name: kolla-kubernetes-deploy-centos-binary-2-ceph
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-ceph-multi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-multi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-multi/post.yaml
nodeset: legacy-centos-7-2-node
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-ceph-reboot
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-reboot/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-reboot/post.yaml
nodeset: legacy-centos-7-2-node
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-external-ovs
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-external-ovs/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-external-ovs/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-helm-operator
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-operator/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-operator/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-2-iscsi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-iscsi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-iscsi/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-3-ceph-multi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-3-ceph-multi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-3-ceph-multi/post.yaml
nodeset: legacy-centos-7-2-node
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-4-ceph-multi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-ceph-multi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-ceph-multi/post.yaml
nodeset: legacy-centos-7-2-node
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-binary-t-ceph-multi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-t-ceph-multi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-t-ceph-multi/post.yaml
nodeset: legacy-centos-7-2-node
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-source-4-helm-compute-kit
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-compute-kit/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-compute-kit/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-source-4-helm-entrypoint
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-entrypoint/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-entrypoint/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-source-4-ironic
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-ironic/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-ironic/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-source-4-microchart-ansible
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-microchart-ansible/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-microchart-ansible/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-centos-source-t-iscsi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-t-iscsi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-t-iscsi/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-deploy-ubuntu-binary-2-ceph
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-ceph/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-ceph/post.yaml
nodeset: legacy-ubuntu-xenial
timeout: 3600
- job:
name: kolla-kubernetes-deploy-ubuntu-binary-2-iscsi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-iscsi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-iscsi/post.yaml
nodeset: legacy-ubuntu-xenial
timeout: 3600
- job:
name: kolla-kubernetes-deploy-ubuntu-source-4-ironic
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-ironic/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-ironic/post.yaml
nodeset: legacy-ubuntu-xenial
timeout: 3600
- job:
name: kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible/post.yaml
nodeset: legacy-ubuntu-xenial
timeout: 3600
- job:
name: kolla-kubernetes-deploy-ubuntu-source-t-iscsi
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-t-iscsi/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-t-iscsi/post.yaml
nodeset: legacy-ubuntu-xenial
timeout: 3600
- job:
name: kolla-kubernetes-upgrade-centos-binary-2-ceph
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-2-ceph/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-2-ceph/post.yaml
nodeset: legacy-centos-7
timeout: 3600
- job:
name: kolla-kubernetes-upgrade-centos-binary-3-ceph
parent: kolla-kubernetes-base
run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-3-ceph/run.yaml
post-run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-3-ceph/post.yaml
nodeset: legacy-centos-7
timeout: 3600

View File

@ -1,17 +0,0 @@
If you would like to contribute to the development of OpenStack, you must
follow the steps in this page:
http://docs.openstack.org/infra/manual/developers.html
If you already have a good understanding of how the system works and your
OpenStack accounts are set up, you can skip to the development workflow
section of this documentation to learn how changes to OpenStack should be
submitted for review via the Gerrit tool:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/kolla-kubernetes

View File

@ -1,22 +0,0 @@
FROM ubuntu:16.04
RUN apt-get update && apt-get -y install python-dev curl libffi-dev gcc libssl-dev sshpass wget crudini git vim
RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \
&& python get-pip.py \
&& rm get-pip.py
RUN pip install ansible==2.2.* oslo_config
ENV HELM_LATEST_VERSION="v2.7.2"
ENV KUBE_LATEST_VERSION="v1.8.4"
RUN wget http://storage.googleapis.com/kubernetes-helm/helm-${HELM_LATEST_VERSION}-linux-amd64.tar.gz \
&& tar -xvf helm-${HELM_LATEST_VERSION}-linux-amd64.tar.gz \
&& mv linux-amd64/helm /usr/local/bin \
&& rm -f /helm-${HELM_LATEST_VERSION}-linux-amd64.tar.gz
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \
&& chmod +x /usr/local/bin/kubectl
ADD . /kolla-kubernetes
RUN pip install -U /kolla-kubernetes/
RUN cp -a /kolla-kubernetes/etc/* /etc

View File

@ -1,4 +0,0 @@
kolla-kubernetes Style Commandments
===============================================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,83 +1,13 @@
========================
Team and repository tags
========================
This project is no longer maintained.
.. image:: http://governance.openstack.org/badges/kolla-kubernetes.svg
:target: http://governance.openstack.org/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout 22ed0c232d7666afb6e288001b8814deea664992".
.. Change things from this point on
For an alternative, consider checking the project at
http://github.com/openstack/openstack-helm.
==============
Kolla Overview
==============
The Kolla project is a member of the OpenStack `Big Tent
Governance <https://governance.openstack.org/tc/reference/projects/index.html>`__.
Kolla's mission statement is:
::
To provide production-ready containers and deployment tools for operating
OpenStack clouds.
================
kolla-kubernetes
================
The kolla-kubernetes deliverable deploys OpenStack on top of Kubernetes.
This work is experimental at this time. A 1.0.0 version or later signals
this work is ready for evaluation.
The kolla-kubernetes Repository
===============================
The kolla-kubernetes repository is one of three deliverables of the
OpenStack Kolla project. The three deliverables that make up the Kolla
project are:
================ =========================================================
Deliverable Repository
================ =========================================================
kolla https://git.openstack.org/openstack/kolla
kolla-ansible https://git.openstack.org/openstack/kolla-ansible
kolla-kubernetes https://git.openstack.org/openstack/kolla-kubernetes
================ =========================================================
The kolla deliverable maintains container images and container build tools.
The kolla-ansible deliverable maintains an Ansible deployment for Kolla
containers on bare metal.
The kolla-kubernetes deliverable maintains a cloud-native implementation
of deployment of OpenStack on Kubernetes.
Getting Involved
================
Need a feature? Find a bug? Let us know! Contributions are much
appreciated and should follow the standard `Gerrit
workflow <http://docs.openstack.org/infra/manual/developers.html>`__.
- We communicate using the #openstack-kolla irc channel.
- File bugs, blueprints, track releases, etc on
`Launchpad <https://launchpad.net/kolla-kubernetes>`__.
- Attend weekly
`meetings <https://wiki.openstack.org/wiki/Meetings/Kolla>`__.
- Contribute `code <https://git.openstack.org/openstack/kolla-kubernetes>`__.
General Information
===================
* Free software: Apache license
* Documentation: http://docs.openstack.org/developer/kolla-kubernetes
* Source: http://git.openstack.org/cgit/openstack/kolla-kubernetes
* Bugs: http://bugs.launchpad.net/kolla-kubernetes
Notices
=======
Docker and the Docker logo are trademarks or registered trademarks of
Docker, Inc. in the United States and/or other countries. Docker, Inc.
and other parties may also have trademark rights in other terms used herein.
For any further questions, please email
openstack-dev@lists.openstack.org with the tagline [kolla] or join
#openstack-kolla on Freenode.

View File

@ -1,156 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 Sam Yaple
# Copyright 2017 99Cloud Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import inspect
import os
from ansible.plugins import action
from six import StringIO
from oslo_config import iniparser
class OverrideConfigParser(iniparser.BaseParser):
def __init__(self):
self._cur_sections = collections.OrderedDict()
self._sections = collections.OrderedDict()
self._cur_section = None
def assignment(self, key, value):
cur_value = self._cur_section.get(key)
if len(value) == 1 and value[0] == '':
value = []
if not cur_value:
self._cur_section[key] = [value]
else:
self._cur_section[key].append(value)
def parse(self, lineiter):
self._cur_sections = collections.OrderedDict()
super(OverrideConfigParser, self).parse(lineiter)
# merge _cur_sections into _sections
for section, values in self._cur_sections.items():
if section not in self._sections:
self._sections[section] = collections.OrderedDict()
for key, value in values.items():
self._sections[section][key] = value
def new_section(self, section):
cur_section = self._cur_sections.get(section)
if not cur_section:
cur_section = collections.OrderedDict()
self._cur_sections[section] = cur_section
self._cur_section = cur_section
return cur_section
def write(self, fp):
def write_key_value(key, values):
for v in values:
if not v:
fp.write('{} =\n'.format(key))
for index, value in enumerate(v):
if index == 0:
fp.write('{} = {}\n'.format(key, value))
else:
fp.write('{} {}\n'.format(len(key) * ' ', value))
def write_section(section):
for key, values in section.items():
write_key_value(key, values)
for section in self._sections:
fp.write('[{}]\n'.format(section))
write_section(self._sections[section])
fp.write('\n')
class ActionModule(action.ActionBase):
TRANSFERS_FILES = True
def read_config(self, source, config):
# Only use config if present
if os.access(source, os.R_OK):
with open(source, 'r') as f:
template_data = f.read()
result = self._templar.template(template_data)
fakefile = StringIO(result)
config.parse(fakefile)
fakefile.close()
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
# NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
# _make_tmp_path function. inspect the number of the args here. In
# this way, ansible 2.0 and ansible 2.1 are both supported
make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
if not tmp and len(make_tmp_path_args) == 1:
tmp = self._make_tmp_path()
if not tmp and len(make_tmp_path_args) == 2:
remote_user = (task_vars.get('ansible_user')
or self._play_context.remote_user)
tmp = self._make_tmp_path(remote_user)
sources = self._task.args.get('sources', None)
extra_vars = self._task.args.get('vars', list())
if not isinstance(sources, list):
sources = [sources]
temp_vars = task_vars.copy()
temp_vars.update(extra_vars)
config = OverrideConfigParser()
old_vars = self._templar._available_variables
self._templar.set_available_variables(temp_vars)
for source in sources:
self.read_config(source, config)
self._templar.set_available_variables(old_vars)
# Dump configparser to string via an emulated file
fakefile = StringIO()
config.write(fakefile)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_data(remote_path, fakefile.getvalue())
fakefile.close()
new_module_args = self._task.args.copy()
new_module_args.pop('vars', None)
new_module_args.pop('sources', None)
new_module_args.update(
dict(
src=xfered
)
)
result.update(self._execute_module(module_name='copy',
module_args=new_module_args,
task_vars=task_vars,
tmp=tmp))
return result

View File

@ -1,96 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 Sam Yaple
# Copyright 2016 intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from yaml import dump
from yaml import safe_load
try:
from yaml import CDumper as Dumper # noqa: F401
from yaml import CLoader as Loader # noqa: F401
except ImportError:
from yaml import Dumper # noqa: F401
from yaml import Loader # noqa: F401
from ansible.plugins import action
class ActionModule(action.ActionBase):
TRANSFERS_FILES = True
def read_config(self, source):
result = None
# Only use config if present
if os.access(source, os.R_OK):
with open(source, 'r') as f:
template_data = f.read()
template_data = self._templar.template(template_data)
result = safe_load(template_data)
return result or {}
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
# NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
# _make_tmp_path function. inspect the number of the args here. In
# this way, ansible 2.0 and ansible 2.1 are both supported
make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
if not tmp and len(make_tmp_path_args) == 1:
tmp = self._make_tmp_path()
if not tmp and len(make_tmp_path_args) == 2:
remote_user = (task_vars.get('ansible_user')
or self._play_context.remote_user)
tmp = self._make_tmp_path(remote_user)
# save template args.
extra_vars = self._task.args.get('vars', list())
old_vars = self._templar._available_variables
temp_vars = task_vars.copy()
temp_vars.update(extra_vars)
self._templar.set_available_variables(temp_vars)
output = {}
sources = self._task.args.get('sources', None)
if not isinstance(sources, list):
sources = [sources]
for source in sources:
output.update(self.read_config(source))
# restore original vars
self._templar.set_available_variables(old_vars)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_data(remote_path,
dump(output,
default_flow_style=False))
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered
)
)
del new_module_args['sources']
result.update(self._execute_module(module_name='copy',
module_args=new_module_args,
task_vars=task_vars,
tmp=tmp))
return result

View File

@ -1,6 +0,0 @@
---
- name: Destroy the existing Kolla-Kubernetes deployment
hosts: localhost
connection: local
roles:
- destroy

View File

@ -1,559 +0,0 @@
---
# The options in this file can be overridden in 'globals.yml'
# The "temp" files that are created before merge need to stay persistent due
# to the fact that ansible will register a "change" if it has to create them
# again. Persistent files allow for idempotency
container_config_directory: "/var/lib/kolla/config_files"
# The directory to merge custom config files the kolla's config files
node_custom_config: "/etc/kolla/config"
# The project to generate configuration files for
project: ""
# The directory to store the config files on the destination node
node_config_directory: "/etc/kolla/{{ project }}"
###################
# Kolla options
###################
std_logger: true
# Which orchestration engine to use. Valid options are [ ANSIBLE, KUBERNETES ]
orchestration_engine: "KUBERNETES"
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
config_strategy: "COPY_ALWAYS"
# Valid options are [ centos, oraclelinux, ubuntu ]
kolla_base_distro: "centos"
# Valid options are [ binary, source ]
kolla_install_type: "binary"
kolla_internal_vip_address: "{{ kolla_internal_address }}"
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_external_vip_address == kolla_internal_vip_address else kolla_external_vip_address }}"
kolla_enable_sanity_checks: "no"
kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}"
####################
# kolla-kubernetes
####################
# By default, Kolla API services bind to the network address assigned
# to the api_interface. Allow the bind address to be an override. In
# some cases (Kubernetes), the api_interface address is not known
# until container runtime, and thus it is necessary to bind to all
# interfaces "0.0.0.0". When used outside of Kubernetes, binding to
# all interfaces may present a security issue, and thus is not
# recommended.
api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
################
# Chrony options
################
# A list contains ntp servers
external_ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
####################
# Database options
####################
database_address: "{{ kolla_internal_fqdn }}"
database_user: "root"
database_port: "3306"
keystone_database_address: "mariadb"
glance_database_address: "mariadb"
nova_database_address: "mariadb"
nova_api_database_address: "mariadb"
neutron_database_address: "mariadb"
cinder_database_address: "mariadb"
ironic_database_address: "mariadb"
placement_database_address: "mariadb"
####################
# Docker options
####################
docker_registry_email:
docker_registry:
docker_namespace: "kolla"
docker_registry_username:
# Valid options are [ never, on-failure, always, unless-stopped ]
docker_restart_policy: "unless-stopped"
# '0' means unlimited retries
docker_restart_policy_retry: "10"
# Common options used throughout Docker
docker_common_options:
auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}"
####################
# keepalived options
####################
# Arbitrary unique number from 0..255
keepalived_virtual_router_id: "51"
####################
# Networking options
####################
network_interface: "eth0"
neutron_external_interface: "eth1"
kolla_external_vip_interface: "{{ network_interface }}"
api_interface: "{{ network_interface }}"
storage_interface: "{{ network_interface }}"
cluster_interface: "{{ network_interface }}"
tunnel_interface: "{{ network_interface }}"
bifrost_network_interface: "{{ network_interface }}"
dns_interface: "{{ network_interface }}"
tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
# Valid options are [ openvswitch, linuxbridge, sfc ]
neutron_plugin_agent: "openvswitch"
# The default ports used by each service.
aodh_api_port: "8042"
barbican_api_port: "9311"
ceilometer_api_port: "8777"
congress_api_port: "1789"
cloudkitty_api_port: "8889"
designate_api_port: "9001"
designate_bind_port: "53"
designate_mdns_port: "5354"
designate_rndc_port: "953"
freezer_api_port: "9090"
iscsi_port: "3260"
gnocchi_api_port: "8041"
mariadb_port: "{{ database_port }}"
mariadb_wsrep_port: "4567"
mariadb_ist_port: "4568"
mariadb_sst_port: "4444"
panko_api_port: "8977"
rabbitmq_port: "5672"
rabbitmq_management_port: "15672"
rabbitmq_cluster_port: "25672"
rabbitmq_epmd_port: "4369"
rabbitmq_servers: "rabbitmq"
mongodb_port: "27017"
mongodb_web_port: "28017"
haproxy_stats_port: "1984"
keystone_public_port: "5000"
keystone_admin_port: "35357"
keystone_ssh_port: "8023"
glance_api_port: "9292"
glance_registry_port: "9191"
octavia_api_port: "9876"
octavia_health_manager_port: "5555"
placement_api_port: "8780"
nova_api_port: "8774"
nova_metadata_port: "8775"
nova_novncproxy_port: "6080"
nova_spicehtml5proxy_port: "6082"
nova_serialproxy_port: "6083"
neutron_server_port: "9696"
cinder_api_port: "8776"
memcached_servers: "memcached"
memcached_port: "11211"
swift_proxy_server_port: "8080"
swift_object_server_port: "6000"
swift_account_server_port: "6001"
swift_container_server_port: "6002"
swift_rsync_port: "10873"
sahara_api_port: "8386"
heat_api_port: "8004"
heat_api_cfn_port: "8000"
horizon_port: "80"
murano_api_port: "8082"
ironic_api_port: "6385"
ironic_inspector_port: "5050"
magnum_api_port: "9511"
solum_application_deployment_port: "9777"
solum_image_builder_port: "9778"
rgw_port: "6780"
mistral_api_port: "8989"
kibana_server_port: "5601"
elasticsearch_port: "9200"
manila_api_port: "8786"
watcher_api_port: "9322"
influxdb_admin_port: "8083"
influxdb_http_port: "8086"
senlin_api_port: "8778"
trove_api_port: "8779"
etcd_client_port: "2379"
etcd_peer_port: "2380"
karbor_api_port: "8799"
kuryr_port: "23750"
searchlight_api_port: "9393"
grafana_server_port: "3000"
tacker_server_port: "9890"
fluentd_syslog_port: "5140"
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
internal_protocol: "http"
admin_protocol: "http"
####################
# OpenStack options
####################
openstack_release: "auto"
openstack_logging_debug: "False"
openstack_region_name: "RegionOne"
# In the context of multi-regions, list here the name of all your regions.
multiple_regions_names:
- "{{ openstack_region_name }}"
openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min if orchestration_engine == 'ANSIBLE' else '1'}}"
# Optionally allow Kolla to set sysctl values
set_sysctl: "yes"
# Valid options are [ novnc, spice ]
nova_console: "novnc"
# OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user.
openstack_auth:
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
username: "admin"
password: "{{ keystone_admin_password }}"
project_name: "admin"
domain_name: "default"
# These roles are required for Kolla to be operation, however a savvy deployer
# could disable some of these required roles and run their own services.
enable_glance: "yes"
enable_haproxy: "yes"
enable_keystone: "yes"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_neutron: "yes"
enable_nova: "yes"
enable_rabbitmq: "yes"
# Additional optional OpenStack features and services are specified here
enable_aodh: "no"
enable_barbican: "no"
enable_cadf_notifications: "no"
enable_ceilometer: "no"
enable_central_logging: "no"
enable_ceph: "no"
enable_ceph_rgw: "no"
enable_chrony: "no"
enable_cinder: "no"
enable_cinder_backend_hnas_iscsi: "no"
enable_cinder_backend_hnas_nfs: "no"
enable_cinder_backend_iscsi: "no"
enable_cinder_backend_lvm: "no"
enable_cinder_backend_nfs: "no"
enable_cloudkitty: "no"
enable_congress: "no"
enable_designate: "no"
enable_etcd: "no"
enable_freezer: "no"
enable_gnocchi: "no"
enable_grafana: "no"
enable_heat: "yes"
enable_horizon: "yes"
enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
enable_horizon_freezer: "{{ enable_freezer | bool }}"
enable_horizon_ironic: "{{ enable_ironic | bool }}"
enable_horizon_karbor: "{{ enable_karbor | bool }}"
enable_horizon_magnum: "{{ enable_magnum | bool }}"
enable_horizon_manila: "{{ enable_manila | bool }}"
enable_horizon_mistral: "{{ enable_mistral | bool }}"
enable_horizon_murano: "{{ enable_murano | bool }}"
enable_horizon_neutron_lbaas: "{{ enable_neutron_lbaas | bool }}"
enable_horizon_sahara: "{{ enable_sahara | bool }}"
enable_horizon_searchlight: "{{ enable_searchlight | bool }}"
enable_horizon_senlin: "{{ enable_senlin | bool }}"
enable_horizon_solum: "{{ enable_solum | bool }}"
enable_horizon_tacker: "{{ enable_tacker | bool }}"
enable_horizon_trove: "{{ enable_trove | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
enable_influxdb: "no"
enable_ironic: "no"
enable_iscsid: "{{ enable_cinder_backend_iscsi | bool or enable_cinder_backend_lvm | bool or enable_ironic | bool }}"
enable_karbor: "no"
enable_kuryr: "no"
enable_magnum: "no"
enable_manila: "no"
enable_manila_backend_generic: "no"
enable_manila_backend_hnas: "no"
enable_mistral: "no"
enable_mongodb: "no"
enable_multipathd: "no"
enable_murano: "no"
enable_neutron_vpnaas: "no"
enable_neutron_dvr: "no"
enable_neutron_lbaas: "no"
enable_neutron_fwaas: "no"
enable_neutron_qos: "no"
enable_neutron_agent_ha: "no"
enable_neutron_bgp_dragent: "no"
enable_nova_serialconsole_proxy: "no"
enable_octavia: "no"
enable_panko: "no"
enable_rally: "no"
enable_sahara: "no"
enable_searchlight: "no"
enable_senlin: "no"
enable_solum: "no"
enable_swift: "no"
enable_tacker: "no"
enable_telegraf: "no"
enable_tempest: "no"
enable_trove: "no"
enable_vmtp: "no"
enable_watcher: "no"
enable_placement: "yes"
ironic_keystone_user: "ironic"
neutron_keystone_user: "neutron"
nova_keystone_user: "nova"
designate_keystone_user: "designate"
# Nova fake driver and the number of fake driver per compute node
enable_nova_fake: "no"
num_nova_fake_per_node: 5
# Monitoring options are specified here
enable_collectd: "no"
# Clean images options are specified here
enable_destroy_images: "no"
####################
# Logging options
####################
elasticsearch_address: "{{ kolla_internal_vip_address }}"
elasticsearch_protocol: "{{ internal_protocol }}"
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_freezer | bool else 'no' }}"
enable_kibana: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
####################
# RabbitMQ options
####################
rabbitmq_user: "openstack"
rabbitmq_version: "rabbitmq_server-3.6/plugins/rabbitmq_clusterer-3.6.x.ez/rabbitmq_clusterer-3.6.x-667f92b0/ebin"
####################
# HAProxy options
####################
haproxy_user: "openstack"
haproxy_enable_external_vip: "{{ 'no' if kolla_external_vip_address == kolla_internal_vip_address else 'yes' }}"
kolla_enable_tls_external: "no"
kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
kolla_external_fqdn_cacert: "{{ node_config_directory }}/certificates/haproxy-ca.crt"
####################
# Kibana options
####################
kibana_user: "kibana"
####################
# Keystone options
####################
keystone_admin_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3"
keystone_internal_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3"
keystone_public_url: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ keystone_public_port }}/v3"
# Valid options are [ uuid, fernet ]
keystone_token_provider: "fernet"
fernet_token_expiry: 86400
keystone_default_user_role: "_member_"
#######################
# Glance options
#######################
glance_backend_file: "{{ not enable_ceph | bool }}"
glance_backend_ceph: "{{ enable_ceph }}"
glance_registry_host: "glance-registry"
#######################
# Ceilometer options
#######################
# Valid options are [ mongodb, mysql, gnocchi ]
ceilometer_database_type: "mongodb"
# Valid options are [ mongodb, gnocchi, panko ]
ceilometer_event_type: "mongodb"
########################
### Panko options
########################
# Valid options are [ mongodb, mysql ]
panko_database_type: "mysql"
#################
# Gnocchi options
#################
# Vaid options are [file, ceph]
gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}"
#################################
# Cinder options
#################################
cinder_backend_ceph: "{{ enable_ceph }}"
cinder_volume_group: "cinder-volumes"
cinder_backup_driver: "nfs"
cinder_backup_share: ""
cinder_backup_mount_options_nfs: ""
#######################
# Cloudkitty options
#######################
# Valid options are [ ceilometer, gnocchi ]
cloudkitty_collector_backend: "ceilometer"
#######################
# Designate options
#######################
# Valid options are [ bind9 ]
designate_backend: "bind9"
designate_ns_record: "sample.openstack.org"
#######################
# Neutron options
#######################
neutron_bgp_router_id: "1.1.1.1"
neutron_host: "neutron"
#######################
# Nova options
#######################
nova_backend_ceph: "{{ enable_ceph }}"
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
# Valid options are [ kvm, qemu ]
nova_compute_virt_type: "kvm"
#######################
# Horizon options
#######################
horizon_backend_database: "{{ enable_murano | bool }}"
#################
# Octavia options
#################
# Load balancer topology options are [ SINGLE, ACTIVE_STANDBY ]
octavia_loadbalancer_topology: "SINGLE"
octavia_amp_boot_network_list:
octavia_amp_secgroup_list:
octavia_amp_flavor_id:
###################
# Ceph options
###################
# Ceph can be setup with a caching to improve performance. To use the cache you
# must provide separate disks than those for the OSDs
ceph_enable_cache: "no"
# Valid options are [ forward, none, writeback ]
ceph_cache_mode: "writeback"
# Valid options are [ ext4, btrfs, xfs ]
ceph_osd_filesystem: "xfs"
# Set to 'yes-i-really-really-mean-it' to force wipe disks with existing partitions for OSDs. Only
# set if you understand the consequences!
ceph_osd_wipe_disk: ""
# These are /etc/fstab options. Comma separated, no spaces (see fstab(8))
ceph_osd_mount_options: "defaults,noatime"
# A requirement for using the erasure-coded pools is you must setup a cache tier
# Valid options are [ erasure, replicated ]
ceph_pool_type: "replicated"
# Integrate Ceph Rados Object Gateway with OpenStack keystone
enable_ceph_rgw_keystone: "no"
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
ceph_glance_pool_name: "images"
ceph_gnocchi_pool_name: "gnocchi"
ceph_nova_pool_name: "vms"
ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
ceph_cache_rule: "cache host firstn"

View File

@ -1,51 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 Sam Yaple
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: merge_configs
short_description: Merge ini-style configs
description:
- ConfigParser is used to merge several ini-style configs into one
options:
dest:
description:
- The destination file name
required: True
type: str
sources:
description:
- A list of files on the destination node to merge together
default: None
required: True
type: str
author: Sam Yaple
'''
EXAMPLES = '''
Merge multiple configs:
- hosts: database
tasks:
- name: Merge configs
merge_configs:
sources:
- "/tmp/config_1.cnf"
- "/tmp/config_2.cnf"
- "/tmp/config_3.cnf"
dest:
- "/etc/mysql/my.cnf"
'''

View File

@ -1,51 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 Sam Yaple
# Copyright 2016 intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: merge_yaml
short_description: Merge yaml-style configs
description:
- PyYAML is used to merge several yaml files into one
options:
dest:
description:
- The destination file name
required: True
type: str
sources:
description:
- A list of files on the destination node to merge together
default: None
required: True
type: str
author: Sean Mooney
'''
EXAMPLES = '''
Merge multiple yaml files:
- hosts: localhost
tasks:
- name: Merge yaml files
merge_yaml:
sources:
- "/tmp/default.yml"
- "/tmp/override.yml"
dest:
- "/tmp/out.yml"
'''

View File

@ -1,37 +0,0 @@
---
project_name: "ceph"
####################
# Docker
####################
ceph_mon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-mon"
ceph_mon_tag: "{{ openstack_release }}"
ceph_mon_image_full: "{{ ceph_mon_image }}:{{ ceph_mon_tag }}"
ceph_osd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-osd"
ceph_osd_tag: "{{ openstack_release }}"
ceph_osd_image_full: "{{ ceph_osd_image }}:{{ ceph_osd_tag }}"
ceph_rgw_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-rgw"
ceph_rgw_tag: "{{ openstack_release }}"
ceph_rgw_image_full: "{{ ceph_rgw_image }}:{{ ceph_rgw_tag }}"
####################
# Ceph
####################
osd_initial_weight: "1"
####################
## Ceph_rgw_keystone
####################
swift_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ rgw_port }}/swift/v1"
swift_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ rgw_port }}/swift/v1"
swift_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ rgw_port }}/swift/v1"
openstack_swift_auth: "{{ openstack_auth }}"
####################
# Kolla
####################
kolla_ceph_use_udev: True

View File

@ -1,33 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
recurse: yes
with_items:
- "ceph-mon"
- "ceph-osd"
- "ceph-rgw"
- name: Copying over config.json files for services
template:
src: "{{ item }}.json.j2"
dest: "{{ node_config_directory }}/{{ item }}/config.json"
with_items:
- "ceph-mon"
- "ceph-osd"
- "ceph-rgw"
- name: Copying over ceph.conf
merge_configs:
vars:
service_name: "{{ item }}"
sources:
- "{{ role_path }}/templates/ceph.conf.j2"
- "{{ node_custom_config }}/ceph.conf"
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
with_items:
- "ceph-mon"
- "ceph-osd"
- "ceph-rgw"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,43 +0,0 @@
{
{%- if orchestration_engine == 'KUBERNETES' %}
"command": "/usr/bin/ceph-mon -f -i @MONID@ --public-addr @MONADDR@:6789",
{%- else %}
"command": "/usr/bin/ceph-mon -f -i {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}:6789",
{%- endif %}
"config_files": [
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "ceph",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
"dest": "/etc/ceph/ceph.client.admin.keyring",
"owner": "ceph",
"perm": "0600",
"optional": true
},
{
"source": "{{ container_config_directory }}/ceph.client.mon.keyring",
"dest": "/etc/ceph/ceph.client.mon.keyring",
"owner": "ceph",
"perm": "0600",
"optional": true
},
{
"source": "{{ container_config_directory }}/ceph.client.radosgw.keyring",
"dest": "/etc/ceph/ceph.client.radosgw.keyring",
"owner": "ceph",
"perm": "0600",
"optional": true
},
{
"source": "{{ container_config_directory }}/ceph.monmap",
"dest": "/etc/ceph/ceph.monmap",
"owner": "ceph",
"perm": "0600",
"optional": true
}
]
}

View File

@ -1,21 +0,0 @@
{
{%- if orchestration_engine == 'KUBERNETES' %}
"command": "/usr/bin/ceph-osd -f --public-addr @HOSTADDR@ --cluster-addr @CLUSTERADDR@",
{%- else %}
"command": "/usr/bin/ceph-osd -f --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --cluster-addr {{ hostvars[inventory_hostname]['ansible_' + cluster_interface]['ipv4']['address'] }}",
{%- endif %}
"config_files": [
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "ceph",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
"dest": "/etc/ceph/ceph.client.admin.keyring",
"owner": "ceph",
"perm": "0600"
}
]
}

View File

@ -1,23 +0,0 @@
{
"command": "/usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -f",
"config_files": [
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "ceph",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
"dest": "/etc/ceph/ceph.client.admin.keyring",
"owner": "ceph",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.client.radosgw.keyring",
"dest": "/etc/ceph/ceph.client.radosgw.keyring",
"owner": "ceph",
"perm": "0600"
}
]
}

View File

@ -1,46 +0,0 @@
[global]
{% if std_logger %}
log to syslog = false
err to syslog = false
log to stderr = true
err to stderr = true
{% else %}
log file = /var/log/kolla/ceph/$cluster-$name.log
log to syslog = false
err to syslog = false
log to stderr = false
err to stderr = false
{% endif %}
fsid = {{ ceph_cluster_fsid }}
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
# NOTE(inc0): This line will mean that if ceph was upgraded, it will run as root
# until contents of /var/lib/ceph are chowned to ceph user.
# This change was introduced in Jewel version and we should include
# chown operation in upgrade procedure. https://bugs.launchpad.net/kolla/+bug/1620702
setuser match path = /var/lib/ceph/$type/$cluster-$id
[mon]
# NOTE(SamYaple): The monitor files have been known to grow very large. The
# only fix for that is to compact the files.
mon compact on start = true
mon cluster log file = /var/log/kolla/ceph/$cluster.log
{% if service_name is defined and service_name == 'ceph-rgw' %}
[client.radosgw.gateway]
{% if enable_ceph_rgw_keystone | bool %}
rgw_keystone_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
rgw_keystone_admin_user = {{ openstack_auth.username }}
rgw_keystone_admin_password = {{ openstack_auth.password }}
rgw_keystone_admin_project = {{ openstack_auth.project_name }}
rgw_keystone_admin_domain = default
rgw_keystone_api_version = 3
rgw_keystone_accepted_roles = admin, {{ keystone_default_user_role }}
{% endif %}
keyring = /etc/ceph/ceph.client.radosgw.keyring
log file = /var/log/kolla/ceph/client.radosgw.gateway.log
{% endif %}

View File

@ -1,160 +0,0 @@
---
project_name: "cinder"
cinder_services:
cinder-api:
container_name: cinder_api
group: cinder-api
enabled: true
image: "{{ cinder_api_image_full }}"
volumes:
- "{{ node_config_directory }}/cinder-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
cinder-scheduler:
container_name: cinder_scheduler
group: cinder-scheduler
enabled: true
image: "{{ cinder_scheduler_image_full }}"
volumes:
- "{{ node_config_directory }}/cinder-scheduler/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
cinder-volume:
container_name: cinder_volume
group: cinder-volume
enabled: true
image: "{{ cinder_volume_image_full }}"
privileged: True
ipc_mode: "host"
volumes:
- "{{ node_config_directory }}/cinder-volume/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "/run/:/run/:shared"
- "{% if enable_iscsid | bool %}cinder:/var/lib/cinder{% endif %}"
- "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "kolla_logs:/var/log/kolla/"
cinder-backup:
container_name: cinder_backup
group: cinder-backup
enabled: true
image: "{{ cinder_backup_image_full }}"
privileged: True
volumes:
- "{{ node_config_directory }}/cinder-backup/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "/run/:/run/:shared"
- "{% if enable_cinder_backend_lvm | bool %}cinder:/var/lib/cinder{% endif %}"
- "{% if enable_cinder_backend_lvm | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "kolla_logs:/var/log/kolla/"
####################
# Ceph
####################
ceph_cinder_pool_type: "{{ ceph_pool_type }}"
ceph_cinder_cache_mode: "{{ ceph_cache_mode }}"
ceph_cinder_backup_pool_type: "{{ ceph_pool_type }}"
ceph_cinder_backup_cache_mode: "{{ ceph_cache_mode }}"
# Due to Ansible issues on include, you cannot override these variables. Please
# override the variables they reference instead.
cinder_pool_name: "{{ ceph_cinder_pool_name }}"
cinder_pool_type: "{{ ceph_cinder_pool_type }}"
cinder_cache_mode: "{{ ceph_cinder_cache_mode }}"
cinder_backup_pool_name: "{{ ceph_cinder_backup_pool_name }}"
cinder_backup_pool_type: "{{ ceph_cinder_backup_pool_type }}"
cinder_backup_cache_mode: "{{ ceph_cinder_backup_cache_mode }}"
####################
# Database
####################
cinder_database_name: "cinder"
cinder_database_user: "cinder"
cinder_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
####################
# Docker
####################
cinder_volume_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-volume"
cinder_volume_tag: "{{ openstack_release }}"
cinder_volume_image_full: "{{ cinder_volume_image }}:{{ cinder_volume_tag }}"
cinder_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-scheduler"
cinder_scheduler_tag: "{{ openstack_release }}"
cinder_scheduler_image_full: "{{ cinder_scheduler_image }}:{{ cinder_scheduler_tag }}"
cinder_backup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-backup"
cinder_backup_tag: "{{ openstack_release }}"
cinder_backup_image_full: "{{ cinder_backup_image }}:{{ cinder_backup_tag }}"
cinder_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-api"
cinder_api_tag: "{{ openstack_release }}"
cinder_api_image_full: "{{ cinder_api_image }}:{{ cinder_api_tag }}"
####################
# OpenStack
####################
cinder_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
cinder_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
cinder_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
cinder_v2_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
cinder_v2_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
cinder_v2_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
cinder_v3_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v3/%(tenant_id)s"
cinder_v3_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v3/%(tenant_id)s"
cinder_v3_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v3/%(tenant_id)s"
cinder_logging_debug: "{{ openstack_logging_debug }}"
cinder_keystone_user: "cinder"
openstack_cinder_auth: "{{ openstack_auth }}"
####################
# Cinder
####################
cinder_backends:
- name: "rbd-1"
driver: "ceph"
enabled: "{{ enable_ceph | bool and cinder_backend_ceph | bool }}"
- name: "lvm-1"
driver: "lvm"
enabled: "{{ enable_cinder_backend_lvm | bool }}"
- name: "nfs-1"
driver: "nfs"
enabled: "{{ enable_cinder_backend_nfs | bool }}"
- name: "hnas-iscsi"
driver: "hnas_iscsi"
enabled: "{{ enable_cinder_backend_hnas_iscsi | bool }}"
- name: "hnas-nfs"
driver: "hnas_nfs"
enabled: "{{ enable_cinder_backend_hnas_nfs | bool }}"
cinder_enabled_backends: "{{ cinder_backends|selectattr('enabled', 'equalto', true)|list }}"
#############################################
# Hitachi NAS Platform iSCSI and NFS drivers
#############################################
# iscsi
hnas_iscsi_backend: "hnas_iscsi_backend"
hnas_iscsi_username:
hnas_iscsi_password:
hnas_iscsi_mgmt_ip0:
hnas_iscsi_svc0_volume_type:
hnas_iscsi_svc0_hdp:
hnas_iscsi_svc0_ip:
# nfs
hnas_nfs_backend: "hnas_nfs_backend"
hnas_nfs_username:
hnas_nfs_password:
hnas_nfs_mgmt_ip0:
hnas_nfs_svc0_volume_type:
hnas_nfs_svc0_hdp:

View File

@ -1,65 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
recurse: yes
when:
- item.value.enabled | bool
with_dict: "{{ cinder_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
register: cinder_config_jsons
when:
- item.value.enabled | bool
with_dict: "{{ cinder_services }}"
- name: Copying over cinder.conf
merge_configs:
vars:
service_name: "{{ item.key }}"
sources:
- "{{ role_path }}/templates/cinder.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/cinder.conf"
- "{{ node_custom_config }}/cinder/{{ item.key }}.conf"
- "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/cinder.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/cinder.conf"
register: cinder_confs
when:
- item.value.enabled | bool
with_dict: "{{ cinder_services }}"
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/cinder/policy.json"
register: cinder_policy
- name: Copying over existing policy.json
template:
src: "{{ node_custom_config }}/cinder/policy.json"
dest: "{{ node_config_directory }}/{{ item.key }}/policy.json"
register: cinder_policy_jsons
when:
- cinder_policy.stat.exists
with_dict: "{{ cinder_services }}"
- name: Copying over nfs_shares files for cinder_volume
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/cinder-volume/nfs_shares"
with_first_found:
- files:
- "{{ node_custom_config }}/nfs_shares.j2"
- "{{ node_custom_config }}/cinder/nfs_shares.j2"
- "{{ node_custom_config }}/cinder/cinder-volume/nfs_shares.j2"
- "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/nfs_shares.j2"
- "{{ node_custom_config }}/nfs_shares"
- "{{ node_custom_config }}/cinder/nfs_shares"
- "{{ node_custom_config }}/cinder/cinder-volume/nfs_shares"
- "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/nfs_shares"
skip: "{{ not enable_cinder_backend_nfs | bool and not enable_cinder_backend_hnas_nfs | bool }}"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,30 +0,0 @@
{
"command": "cinder-api --config-file /etc/cinder/cinder.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/cinder/policy.json",
"owner": "cinder",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}

View File

@ -1,37 +0,0 @@
{
"command": "cinder-backup --config-file /etc/cinder/cinder.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/cinder/policy.json",
"owner": "cinder",
"perm": "0600",
"optional": true
}{% if cinder_backend_ceph | bool %},
{
"source": "{{ container_config_directory }}/ceph.*",
"dest": "/etc/ceph/",
"owner": "cinder",
"perm": "0700",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
}{% endif %}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}

View File

@ -1,30 +0,0 @@
{
"command": "cinder-scheduler --config-file /etc/cinder/cinder.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/cinder/policy.json",
"owner": "cinder",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}

View File

@ -1,51 +0,0 @@
{
"command": "cinder-volume --config-file /etc/cinder/cinder.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.*",
"dest": "/etc/ceph/",
"owner": "cinder",
"perm": "0700",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/nfs_shares",
"dest": "/etc/cinder/nfs_shares",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not enable_cinder_backend_nfs | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/cinder/policy.json",
"owner": "cinder",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}

View File

@ -1,153 +0,0 @@
[DEFAULT]
debug = {{ cinder_logging_debug }}
use_forwarded_for = true
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
log_dir = /var/log/kolla/cinder
# Set use_stderr to False or the logs will also be sent to stderr
# and collected by Docker
use_stderr = False
{% endif %}
enable_v1_api=false
osapi_volume_workers = {{ openstack_service_workers }}
volume_name_template = volume-%s
glance_api_servers = {{ internal_protocol }}://glance-api:{{ glance_api_port }}
glance_api_version = 2
os_region_name = {{ openstack_region_name }}
{% if cinder_enabled_backends %}
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% endif %}
{% if service_name == "cinder-backup" and enable_ceph | bool and cinder_backend_ceph | bool %}
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
{% elif cinder_backup_driver == "nfs"%}
backup_driver = cinder.backup.drivers.nfs
backup_mount_options = {{ cinder_backup_mount_options_nfs }}
backup_mount_point_base = /var/lib/cinder/backup
backup_share = {{ cinder_backup_share }}
backup_file_size = 327680000
{% elif cinder_backup_driver == "swift"%}
backup_driver = cinder.backup.drivers.swift
backup_swift_url = http://{{ kolla_internal_vip_address }}:{{ swift_proxy_server_port }}/v1/AUTH_
backup_swift_auth = per_user
backup_swift_auth_version = 1
backup_swift_user =
backup_swift_key =
{% endif %}
osapi_volume_listen = {{ api_interface_address }}
osapi_volume_listen_port = {{ cinder_api_port }}
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }}
[oslo_messaging_notifications]
{% if enable_ceilometer | bool or enable_searchlight | bool %}
driver = messagingv2
topics = notifications
{% else %}
driver = noop
{% endif %}
[database]
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }}
max_retries = -1
[keystone_authtoken]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ cinder_keystone_user }}
password = {{ cinder_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {{ memcached_servers }}:{{ memcached_port }}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
{% if enable_cinder_backend_lvm | bool %}
[lvm-1]
volume_group = {{ cinder_volume_group }}
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvm-1
iscsi_helper = tgtadm
iscsi_protocol = iscsi
{% endif %}
{% if enable_ceph | bool and cinder_backend_ceph | bool %}
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = {{ ceph_cinder_pool_name }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
{% endif %}
{% if enable_cinder_backend_nfs | bool %}
[nfs-1]
volume_driver = cinder.volume.drivers.nfs.NfsDriver
volume_backend_name = nfs-1
nfs_shares_config = /etc/cinder/nfs_shares
{% endif %}
{% if enable_cinder_backend_hnas_iscsi | bool %}
[hnas-iscsi]
volume_driver = cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver
volume_backend_name = {{ hnas_iscsi_backend }}
hnas_username = {{ hnas_iscsi_username }}
hnas_password = {{ hnas_iscsi_password }}
hnas_mgmt_ip0 = {{ hnas_iscsi_mgmt_ip0 }}
hnas_chap_enabled = True
hnas_svc0_volume_type = {{ hnas_iscsi_svc0_volume_type }}
hnas_svc0_hdp = {{ hnas_iscsi_svc0_hdp }}
hnas_svc0_iscsi_ip = {{ hnas_iscsi_svc0_ip }}
{% endif %}
{% if enable_cinder_backend_hnas_nfs | bool %}
[hnas-nfs]
volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver
nfs_shares_config = /home/cinder/nfs_shares
volume_backend_name = {{ hnas_nfs_backend }}
hnas_username = {{ hnas_nfs_username }}
hnas_password = {{ hnas_nfs_password }}
hnas_mgmt_ip0 = {{ hnas_nfs_mgmt_ip0 }}
hnas_svc0_volume_type = {{ hnas_nfs_svc0_volume_type }}
hnas_svc0_hdp = {{ hnas_nfs_svc0_hdp }}
{% endif %}
[privsep_entrypoint]
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf

View File

@ -1,23 +0,0 @@
---
# Due to the way we do our inventory, ansible does not pick up on the fact that
# this role has already run. We can track what has run with host facts.
common_run: False
####################
# Docker
####################
kolla_toolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kolla-toolbox"
kolla_toolbox_tag: "{{ openstack_release }}"
kolla_toolbox_image_full: "{{ kolla_toolbox_image }}:{{ kolla_toolbox_tag }}"
cron_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cron"
cron_tag: "{{ openstack_release }}"
cron_image_full: "{{ cron_image }}:{{ cron_tag }}"
fluentd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-fluentd"
fluentd_tag: "{{ openstack_release }}"
fluentd_image_full: "{{ fluentd_image }}:{{ fluentd_tag }}"
kubetoolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kubetoolbox"
kubetoolbox_tag: "{{ openstack_release }}"
kubetoolbox_image_full: "{{ kubetoolbox_image }}:{{ kubetoolbox_tag }}"

View File

@ -1,5 +0,0 @@
---
- name: "Removing label on node {{ host['metadata']['name'] }}"
command: "kubectl label node {{ host['metadata']['name'] }} {{ item.key }}-"
when: item.key.startswith('kolla_')
with_dict: "{{ host['metadata']['labels'] }}"

View File

@ -1,86 +0,0 @@
---
- name: Obtain node information
command: kubectl get nodes -o json
register: kubectl_nodes
- name: Set node facts
set_fact:
kubectl_dict: "{{ kubectl_nodes.stdout|from_json }}"
- name: Obtain list of Kolla PVs
command: "kubectl get pvc -n kolla -o jsonpath={.items[*].spec.volumeName}"
register: pv_list
failed_when:
- pv_list.rc != 0
- name: Obtain list of Kolla configmaps
command: "kubectl get configmaps -n kolla -o name"
register: configmaps_list
changed_when:
- configmaps_list | success
failed_when:
- configmaps_list.rc != 0
- name: Obtain list of Kolla secrets
command: "kubectl get secrets -n kolla -o name"
register: secrets_list
changed_when:
- secrets_list | success
failed_when:
- secrets_list.rc != 0
- name: Obtain list of Kolla Helm charts
command: "helm list --namespace kolla --all -q"
register: helm_list
changed_when:
- helm_list | success
failed_when:
- helm_list.rc != 0
- name: Delete existing Kolla Helm charts
command: "helm delete {{ item }} --purge"
when: helm_list.stdout | length != 0
with_items:
- "{{ helm_list.stdout }}"
- name: Delete existing Kolla secrets
command: "kubectl delete -n kolla {{ item }}"
when: secrets_list.stdout | length != 0
with_items:
- "{{ secrets_list.stdout }}"
- name: Delete existing Kolla configmaps
command: "kubectl delete -n kolla {{ item }}"
when: configmaps_list.stdout | length != 0
with_items:
- "{{ configmaps_list.stdout }}"
- name: "Delete existing Kolla labels"
include: "label_iterator.yml"
with_items: "{{ kubectl_dict['items'] }}"
loop_control:
loop_var: host
- name: Obtain list of Kolla PVCs
shell: "kubectl get pvc -n kolla -o jsonpath={.items[*].metadata.name}"
register: pvc_list
failed_when:
- pvc_list.rc != 0
- name: Delete existing Kolla PVCs
command: "kubectl delete pvc -n kolla {{ item }}"
when: pvc_list.stdout | length != 0
with_items:
- "{{ pvc_list.stdout }}"
- name: Delete existing Kolla PVs
command: "kubectl delete pv {{ item }}"
when: pv_list.stdout | length != 0
with_items:
- "{{ pv_list.stdout }}"
- name: Delete Kolla namespace
command: "kubectl delete namespace kolla"
register: namespace_delete
failed_when:
- namespace_delete.rc != 0

View File

@ -1,62 +0,0 @@
---
project_name: "glance"
glance_services:
glance-api:
container_name: glance_api
group: glance-api
enabled: true
image: "{{ glance_api_image_full }}"
glance-registry:
container_name: glance_registry
group: glance-registry
enabled: true
image: "{{ glance_registry_image_full }}"
####################
# Ceph
####################
ceph_glance_pool_type: "{{ ceph_pool_type }}"
ceph_glance_cache_mode: "{{ ceph_cache_mode }}"
# Due to Ansible issues on include, you cannot override these variables. Please
# override the variables they reference instead.
glance_pool_name: "{{ ceph_glance_pool_name }}"
glance_pool_type: "{{ ceph_glance_pool_type }}"
glance_cache_mode: "{{ ceph_glance_cache_mode }}"
####################
# Database
####################
glance_database_name: "glance"
glance_database_user: "glance"
glance_database_address: "{{ kolla_external_fqdn }}:{{ database_port }}"
####################
# Docker
####################
glance_registry_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-glance-registry"
glance_registry_tag: "{{ openstack_release }}"
glance_registry_image_full: "{{ glance_registry_image }}:{{ glance_registry_tag }}"
glance_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-glance-api"
glance_api_tag: "{{ openstack_release }}"
glance_api_image_full: "{{ glance_api_image }}:{{ glance_api_tag }}"
####################
# OpenStack
####################
glance_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}"
glance_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}"
glance_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ glance_api_port }}"
glance_logging_debug: "{{ openstack_logging_debug }}"
glance_keystone_user: "glance"
openstack_glance_auth: "{{ openstack_auth }}"
glance_registry_host: "glance-registry"

View File

@ -1,49 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
recurse: yes
when:
- item.value.enabled | bool
with_dict: "{{ glance_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
register: glance_config_jsons
when:
- item.value.enabled | bool
with_dict: "{{ glance_services }}"
- name: Copying over glance-*.conf
merge_configs:
vars:
service_name: "{{ item.key }}"
sources:
- "{{ role_path }}/templates/{{ item.key }}.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/glance.conf"
- "{{ node_custom_config }}/glance/{{ item.key }}.conf"
- "{{ node_custom_config }}/glance/{{ inventory_hostname }}/{{ item.key }}.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.conf"
register: glance_confs
when:
- item.value.enabled | bool
with_dict: "{{ glance_services }}"
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/glance/policy.json"
register: glance_policy
- name: Copying over existing policy.json
template:
src: "{{ node_custom_config }}/glance/policy.json"
dest: "{{ node_config_directory }}/{{ item.key }}/policy.json"
register: glance_policy_jsons
when:
- glance_policy.stat.exists
with_dict: "{{ glance_services }}"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,71 +0,0 @@
[DEFAULT]
debug = {{ glance_logging_debug }}
use_forwarded_for = true
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
# NOTE(elemoine) log_dir alone does not work for Glance
log_file = /var/log/kolla/glance/api.log
{% endif %}
bind_host = {{ api_interface_address }}
bind_port = {{ glance_api_port }}
workers = {{ openstack_service_workers }}
registry_host = {{ glance_registry_host }}
{% if enable_ceph | bool %}
show_image_direct_url= True
show_multiple_locations = True
{% endif %}
cinder_catalog_info = volume:cinder:internalURL
transport_url = rabbit://rabbitmq:{{ rabbitmq_port }}
[database]
connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
max_retries = -1
[keystone_authtoken]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ glance_keystone_user }}
password = {{ glance_keystone_password }}
{# For Kolla-Ansible, generate the memcache servers based on the list of
memcached servers in the inventory and memcached_servers should be un-set.
For Kolla-Kubernetes, it is necessary to define the memcached_servers
variable in globals.yml to set it to the Kubernetes service for memcached. #}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {{ memcached_servers }}
[paste_deploy]
flavor = keystone
[glance_store]
{% if enable_ceph | bool and glance_backend_ceph | bool %}
default_store = rbd
stores = rbd,http
rbd_store_user = glance
rbd_store_pool = {{ ceph_glance_pool_name }}
rbd_store_chunk_size = 8
{% else %}
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
{% endif %}
[oslo_messaging_notifications]
{% if enable_ceilometer | bool or enable_searchlight | bool %}
driver = messagingv2
{% else %}
driver = noop
{% endif %}

View File

@ -1,37 +0,0 @@
{
"command": "glance-api",
"config_files": [
{
"source": "{{ container_config_directory }}/glance-api.conf",
"dest": "/etc/glance/glance-api.conf",
"owner": "glance",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/glance/policy.json",
"owner": "glance",
"perm": "0600",
"optional": true
}{% if glance_backend_ceph | bool %},
{
"source": "{{ container_config_directory }}/ceph.*",
"dest": "/etc/ceph/",
"owner": "glance",
"perm": "0700"
}
{% endif %}
],
"permissions": [
{
"path": "/var/lib/glance",
"owner": "glance:glance",
"recurse": true
},
{
"path": "/var/log/kolla/glance",
"owner": "glance:glance",
"recurse": true
}
]
}

View File

@ -1,48 +0,0 @@
[DEFAULT]
debug = {{ glance_logging_debug }}
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
# NOTE(elemoine) log_dir alone does not work for Glance
log_file = /var/log/kolla/glance/registry.log
{% endif %}
bind_host = {{ api_interface_address }}
bind_port = {{ glance_registry_port }}
workers = {{ openstack_service_workers }}
transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq
[database]
connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
max_retries = -1
[keystone_authtoken]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ glance_keystone_user }}
password = {{ glance_keystone_password }}
{# For Kolla-Ansible, generate the memcache servers based on the list of
memcached servers in the inventory and memcached_servers should be un-set.
For Kolla-Kubernetes, it is necessary to define the memcached_servers
variable in globals.yml to set it to the Kubernetes service for memcached. #}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {{ memcached_servers }}
[paste_deploy]
flavor = keystone
[oslo_messaging_notifications]
{% if enable_ceilometer | bool or enable_searchlight | bool %}
driver = messagingv2
{% else %}
driver = noop
{% endif %}

View File

@ -1,25 +0,0 @@
{
"command": "glance-registry",
"config_files": [
{
"source": "{{ container_config_directory }}/glance-registry.conf",
"dest": "/etc/glance/glance-registry.conf",
"owner": "glance",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/glance/policy.json",
"owner": "glance",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/glance",
"owner": "glance:glance",
"recurse": true
}
]
}

View File

@ -1,72 +0,0 @@
---
project_name: "heat"
heat_services:
heat-api:
container_name: heat_api
group: heat-api
enabled: true
image: "{{ heat_api_image_full }}"
volumes:
- "{{ node_config_directory }}/heat-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
heat-api-cfn:
container_name: heat_api_cfn
group: heat-api-cfn
enabled: true
image: "{{ heat_api_cfn_image_full }}"
volumes:
- "{{ node_config_directory }}/heat-api-cfn/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
heat-engine:
container_name: heat_engine
group: heat-engine
enabled: true
image: "{{ heat_engine_image_full }}"
volumes:
- "{{ node_config_directory }}/heat-engine/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
####################
# Database
####################
heat_database_name: "heat"
heat_database_user: "heat"
heat_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
####################
# Docker
####################
heat_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-api"
heat_api_tag: "{{ openstack_release }}"
heat_api_image_full: "{{ heat_api_image }}:{{ heat_api_tag }}"
heat_api_cfn_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-api-cfn"
heat_api_cfn_tag: "{{ openstack_release }}"
heat_api_cfn_image_full: "{{ heat_api_cfn_image }}:{{ heat_api_cfn_tag }}"
heat_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-engine"
heat_engine_tag: "{{ openstack_release }}"
heat_engine_image_full: "{{ heat_engine_image }}:{{ heat_engine_tag }}"
####################
# OpenStack
####################
heat_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s"
heat_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s"
heat_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s"
heat_cfn_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_cfn_port }}/v1"
heat_cfn_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_cfn_port }}/v1"
heat_cfn_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}/v1"
heat_logging_debug: "{{ openstack_logging_debug }}"
heat_keystone_user: "heat"
heat_stack_user_role: "heat_stack_user"
heat_stack_owner_role: "heat_stack_owner"
openstack_heat_auth: "{{ openstack_auth }}"

View File

@ -1,55 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
recurse: yes
when:
- item.value.enabled | bool
with_dict: "{{ heat_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
when:
- item.value.enabled | bool
with_dict: "{{ heat_services }}"
- name: Copying over the heat-engine environment file
template:
src: "_deprecated.yaml"
dest: "{{ node_config_directory }}/{{ item }}/_deprecated.yaml"
with_items:
- "heat-engine"
- name: Copying over heat.conf
merge_configs:
vars:
service_name: "{{ item.key }}"
sources:
- "{{ role_path }}/templates/heat.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/heat.conf"
- "{{ node_custom_config }}/heat/{{ item.key }}.conf"
- "{{ node_custom_config }}/heat/{{ inventory_hostname }}/heat.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/heat.conf"
register: heat_confs
when:
- item.value.enabled | bool
with_dict: "{{ heat_services }}"
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/heat/policy.json"
register: heat_policy
- name: Copying over existing policy.json
template:
src: "{{ node_custom_config }}/heat/policy.json"
dest: "{{ node_config_directory }}/{{ item.key }}/policy.json"
register: heat_policy_jsons
when:
- heat_policy.stat.exists
with_dict: "{{ heat_services }}"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,4 +0,0 @@
resource_registry:
"OS::Heat::HARestarter":
"OS::Heat::SoftwareDeployments":
"OS::Heat::StructuredDeployments":

View File

@ -1,25 +0,0 @@
{
"command": "heat-api-cfn",
"config_files": [
{
"source": "{{ container_config_directory }}/heat.conf",
"dest": "/etc/heat/heat.conf",
"owner": "heat",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/heat/policy.json",
"owner": "heat",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/heat",
"owner": "heat:heat",
"recurse": true
}
]
}

View File

@ -1,25 +0,0 @@
{
"command": "heat-api",
"config_files": [
{
"source": "{{ container_config_directory }}/heat.conf",
"dest": "/etc/heat/heat.conf",
"owner": "heat",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/heat/policy.json",
"owner": "heat",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/heat",
"owner": "heat:heat",
"recurse": true
}
]
}

View File

@ -1,31 +0,0 @@
{
"command": "heat-engine",
"config_files": [
{
"source": "{{ container_config_directory }}/heat.conf",
"dest": "/etc/heat/heat.conf",
"owner": "heat",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/_deprecated.yaml",
"dest": "/etc/heat/environment.d/_deprecated.yaml",
"owner": "heat",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/heat/policy.json",
"owner": "heat",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/heat",
"owner": "heat:heat",
"recurse": true
}
]
}

View File

@ -1,83 +0,0 @@
[DEFAULT]
debug = {{ heat_logging_debug }}
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
log_dir = /var/log/kolla/heat
{% endif %}
heat_watch_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}
heat_metadata_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}
heat_waitcondition_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}/v1/waitcondition
heat_stack_user_role = {{ heat_stack_user_role }}
stack_domain_admin = heat_domain_admin
stack_domain_admin_password = {{ heat_domain_admin_password }}
stack_user_domain_name = heat_user_domain
deferred_auth_method = trusts
trusts_delegated_roles = heat_stack_owner
num_engine_workers = {{ openstack_service_workers }}
transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }}
{% if service_name == 'heat-api' %}
[heat_api]
bind_host = {{ api_interface_address }}
bind_port = {{ heat_api_port }}
workers = {{ openstack_service_workers }}
{% endif %}
{% if service_name == 'heat-api-cfn' %}
[heat_api_cfn]
bind_host = {{ api_interface_address }}
bind_port = {{ heat_api_cfn_port }}
{% endif %}
[database]
connection = mysql+pymysql://{{ heat_database_user }}:{{ heat_database_password }}@{{ heat_database_address }}/{{ heat_database_name }}
max_retries = -1
[keystone_authtoken]
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ heat_keystone_user }}
password = {{ heat_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {{ memcached_servers }}:{{ memcached_port }}
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = {{ memcached_servers }}:{{ memcached_port }}
[trustee]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
user_domain_id = default
username = {{ heat_keystone_user }}
password = {{ heat_keystone_password }}
[ec2authtoken]
auth_uri = {{ keystone_internal_url }}
[clients_keystone]
auth_uri = {{ keystone_internal_url }}
[oslo_messaging_notifications]
driver = noop
[clients]
endpoint_type = internalURL

View File

@ -1,53 +0,0 @@
---
project_name: "horizon"
horizon_services:
horizon:
container_name: horizon
group: horizon
enabled: true
image: "{{ horizon_image_full }}"
environment:
ENABLE_CLOUDKITTY: "{{ 'yes' if enable_horizon_cloudkitty | bool else 'no' }}"
ENABLE_FREEZER: "{{ 'yes' if enable_horizon_freezer | bool else 'no' }}"
ENABLE_IRONIC: "{{ 'yes' if enable_horizon_ironic | bool else 'no' }}"
ENABLE_KARBOR: "{{ 'yes' if enable_horizon_karbor | bool else 'no' }}"
ENABLE_MAGNUM: "{{ 'yes' if enable_horizon_magnum | bool else 'no' }}"
ENABLE_MANILA: "{{ 'yes' if enable_horizon_manila | bool else 'no' }}"
ENABLE_MISTRAL: "{{ 'yes' if enable_horizon_mistral | bool else 'no' }}"
ENABLE_MURANO: "{{ 'yes' if enable_horizon_murano | bool else 'no' }}"
ENABLE_NEUTRON_LBAAS: "{{ 'yes' if enable_horizon_neutron_lbaas | bool else 'no' }}"
ENABLE_SAHARA: "{{ 'yes' if enable_horizon_sahara | bool else 'no' }}"
ENABLE_SEARCHLIGHT: "{{ 'yes' if enable_horizon_searchlight | bool else 'no' }}"
ENABLE_SENLIN: "{{ 'yes' if enable_horizon_senlin | bool else 'no' }}"
ENABLE_SOLUM: "{{ 'yes' if enable_horizon_solum | bool else 'no' }}"
ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}"
ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}"
ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}"
volumes:
- "{{ node_config_directory }}/horizon/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
####################
# Database
####################
horizon_database_name: "horizon"
horizon_database_user: "horizon"
horizon_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
####################
# Docker
####################
horizon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-horizon"
horizon_tag: "{{ openstack_release }}"
horizon_image_full: "{{ horizon_image }}:{{ horizon_tag }}"
####################
# OpenStack
####################
openstack_horizon_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
horizon_logging_debug: "{{ openstack_logging_debug }}"

View File

@ -1,88 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
recurse: yes
when:
- item.value.enabled | bool
with_dict: "{{ horizon_services }}"
- name: Copying over config.json files for services
vars:
horizon: "{{ horizon_services['horizon'] }}"
template:
src: "horizon.json.j2"
dest: "{{ node_config_directory }}/horizon/config.json"
register: horizon_config_json
when:
- horizon.enabled | bool
- name: Copying over horizon.conf
vars:
horizon: "{{ horizon_services['horizon'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/horizon/horizon.conf"
register: horizon_conf
with_first_found:
- "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf"
- "{{ node_custom_config }}/horizon/horizon.conf"
- "horizon.conf.j2"
when:
- horizon.enabled | bool
- name: Copying over local_settings
vars:
horizon: "{{ horizon_services['horizon'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/horizon/local_settings"
with_first_found:
- "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/local_settings"
- "{{ node_custom_config }}/horizon/local_settings"
- "local_settings.j2"
register: horizon_local_settings
when:
- horizon.enabled | bool
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/horizon/{{ item.name }}_policy.json"
register: custom_policy
when: item.enabled | bool
with_items:
- { name: "ceilometer", enabled: "{{ enable_ceilometer }}" }
- { name: "cinder", enabled: "{{ enable_cinder }}" }
- { name: "cloudkitty", enabled: "{{ enable_horizon_cloudkitty }}" }
- { name: "freezer", enabled: "{{ enable_horizon_freezer }}" }
- { name: "glance", enabled: "{{ enable_glance }}" }
- { name: "heat", enabled: "{{ enable_heat }}" }
- { name: "ironic", enabled: "{{ enable_horizon_ironic }}" }
- { name: "keystone", enabled: "{{ enable_keystone }}" }
- { name: "karbor", enabled: "{{ enable_horizon_karbor }}" }
- { name: "magnum", enabled: "{{ enable_horizon_magnum }}" }
- { name: "manila", enabled: "{{ enable_horizon_manila }}" }
- { name: "mistral", enabled: "{{ enable_horizon_mistral }}" }
- { name: "murano", enabled: "{{ enable_horizon_murano }}" }
- { name: "neutron", enabled: "{{ enable_neutron }}" }
- { name: "nova", enabled: "{{ enable_nova }}" }
- { name: "sahara", enabled: "{{ enable_horizon_sahara }}" }
- { name: "searchlight", enabled: "{{ enable_horizon_searchlight }}" }
- { name: "senlin", enabled: "{{ enable_horizon_senlin }}" }
- { name: "solum", enabled: "{{ enable_horizon_solum }}" }
- { name: "tacker", enabled: "{{ enable_horizon_tacker }}" }
- { name: "trove", enabled: "{{ enable_horizon_trove }}" }
- { name: "watcher", enabled: "{{ enable_horizon_watcher }}" }
- name: Copying over existing policy.json
vars:
horizon: "{{ horizon_services['horizon'] }}"
template:
src: "{{ node_custom_config }}/horizon/{{ item[0]['name'] }}_policy.json"
dest: "{{ node_config_directory }}/horizon/{{ item[0]['name'] }}_policy.json"
register: policy_jsons
when:
- horizon.enabled | bool
- item.item.enabled | bool
- item.stat.exists
with_items: "{{ custom_policy.results }}"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,76 +0,0 @@
{% set python_path = '/usr/share/openstack-dashboard' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
Listen {{ api_interface_address }}:{{ horizon_port }}
<VirtualHost *:{{ horizon_port }}>
LogLevel warn
{% if std_logger %}
ErrorLog /proc/self/fd/2
CustomLog /proc/self/fd/1 combined
{% else %}
ErrorLog /var/log/kolla/horizon/horizon.log
CustomLog /var/log/kolla/horizon/horizon-access.log combined
{% endif %}
WSGIScriptReloading On
WSGIDaemonProcess horizon-http processes={{ openstack_service_workers }} threads=1 user=horizon group=horizon display-name=%{GROUP} python-path={{ python_path }}
WSGIProcessGroup horizon-http
WSGIScriptAlias / {{ python_path }}/openstack_dashboard/wsgi/django.wsgi
WSGIPassAuthorization On
<Location "/">
Require all granted
</Location>
Alias /static {{ python_path }}/static
<Location "/static">
SetHandler None
</Location>
</VirtualHost>
{% if kolla_enable_tls_external | bool %}
Header edit Location ^http://(.*)$ https://$1
{% else %}
# NOTE(Jeffrey4l): Only enable deflate when tls is disabled until the
# OSSN-0037 is fixed.
# see https://wiki.openstack.org/wiki/OSSN/OSSN-0037 for more information.
<IfModule mod_deflate.c>
# Compress HTML, CSS, JavaScript, Text, XML and fonts
AddOutputFilterByType DEFLATE application/javascript
AddOutputFilterByType DEFLATE application/rss+xml
AddOutputFilterByType DEFLATE application/vnd.ms-fontobject
AddOutputFilterByType DEFLATE application/x-font
AddOutputFilterByType DEFLATE application/x-font-opentype
AddOutputFilterByType DEFLATE application/x-font-otf
AddOutputFilterByType DEFLATE application/x-font-truetype
AddOutputFilterByType DEFLATE application/x-font-ttf
AddOutputFilterByType DEFLATE application/x-javascript
AddOutputFilterByType DEFLATE application/xhtml+xml
AddOutputFilterByType DEFLATE application/xml
AddOutputFilterByType DEFLATE font/opentype
AddOutputFilterByType DEFLATE font/otf
AddOutputFilterByType DEFLATE font/ttf
AddOutputFilterByType DEFLATE image/svg+xml
AddOutputFilterByType DEFLATE image/x-icon
AddOutputFilterByType DEFLATE text/css
AddOutputFilterByType DEFLATE text/html
AddOutputFilterByType DEFLATE text/javascript
AddOutputFilterByType DEFLATE text/plain
AddOutputFilterByType DEFLATE text/xml
</IfModule>
{% endif %}
<IfModule mod_expires.c>
<Filesmatch "\.(jpg|jpeg|png|gif|js|css|swf|ico|woff)$">
ExpiresActive on
ExpiresDefault "access plus 1 month"
ExpiresByType application/javascript "access plus 1 year"
ExpiresByType text/css "access plus 1 year"
ExpiresByType image/x-ico "access plus 1 year"
ExpiresByType image/jpg "access plus 1 year"
ExpiresByType image/jpeg "access plus 1 year"
ExpiresByType image/gif "access plus 1 year"
ExpiresByType image/png "access plus 1 year"
Header merge Cache-Control public
Header unset ETag
</Filesmatch>
</IfModule>

View File

@ -1,54 +0,0 @@
{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'horizon.conf' %}
{% set services = [
( 'ceilometer', enable_ceilometer ),
( 'cinder', enable_cinder ),
( 'cloudkitty', enable_horizon_cloudkitty ),
( 'freezer', enable_horizon_freezer ),
( 'glance', enable_glance ),
( 'heat', enable_heat ),
( 'ironic', enable_horizon_ironic ),
( 'keystone', enable_keystone ),
( 'karbor', enable_horizon_karbor ),
( 'magnum', enable_horizon_magnum ),
( 'manila', enable_horizon_manila ),
( 'mistral', enable_horizon_mistral ),
( 'murano', enable_horizon_murano ),
( 'neutron', enable_neutron ),
( 'nova', enable_nova ),
( 'sahara', enable_horizon_sahara ),
( 'searchlight', enable_horizon_searchlight ),
( 'senlin', enable_horizon_senlin ),
( 'solum', enable_horizon_solum ),
( 'tacker', enable_horizon_tacker ),
( 'trove', enable_horizon_trove ),
( 'watcher', enable_horizon_watcher )
] %}
{
"command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND",
"config_files": [
{
"source": "{{ container_config_directory }}/horizon.conf",
"dest": "/etc/{{ apache_dir }}/{{ apache_file }}",
"owner": "horizon",
"perm": "0644"
},
{% for service, enabled in services if enabled | bool %}
{
"source": "{{ container_config_directory }}/horizon/{{ service }}_policy.json",
"dest": "/etc/openstack-dashboard/{{ service }}_policy.json",
"owner": "horizon",
"perm": "0600",
"optional": true
},
{% endfor %}
{
"source": "{{ container_config_directory }}/local_settings",
"dest": "/etc/openstack-dashboard/local_settings",
"owner": "horizon",
"perm": "0644"
}
]
}

View File

@ -1,815 +0,0 @@
# -*- coding: utf-8 -*-
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
from openstack_dashboard.settings import HORIZON_CONFIG
DEBUG = {{ horizon_logging_debug }}
TEMPLATE_DEBUG = DEBUG
COMPRESS_OFFLINE = True
# WEBROOT is the location relative to Webserver root
# should end with a slash.
WEBROOT = '/'
#LOGIN_URL = WEBROOT + 'auth/login/'
#LOGOUT_URL = WEBROOT + 'auth/logout/'
#
# LOGIN_REDIRECT_URL can be used as an alternative for
# HORIZON_CONFIG.user_home, if user_home is not set.
# Do not set it to '/home/', as this will cause circular redirect loop
#LOGIN_REDIRECT_URL = WEBROOT
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
{% if horizon_backend_database | bool %}
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '{{ horizon_database_name }}',
'USER': '{{ horizon_database_user }}',
'PASSWORD': '{{ horizon_database_password }}',
'HOST': '{{ database_address }}',
'PORT': '{{ database_port }}'
}
}
{% endif %}
# Set SSL proxy settings:
# Pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
{% if kolla_enable_tls_external | bool %}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
{% endif %}
# The absolute path to the directory where message files are collected.
# The message file must have a .json file extension. When the user logins to
# horizon, the message files collected are processed and displayed to the user.
#MESSAGES_PATH=None
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# Versions specified here should be integers or floats, not strings.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be 2.0 or 3.
# Minimum compute version to get the instance locked status is 2.9.
#OPENSTACK_API_VERSIONS = {
# "data-processing": 1.1,
# "identity": 3,
# "volume": 2,
# "compute": 2,
#}
OPENSTACK_API_VERSIONS = {
"identity": 3,
"volume": 2,
}
# Set this to True if running on a multi-domain model. When this is enabled, it
# will require the user to enter the Domain name in addition to the username
# for login.
#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# NOTE: This value must be the ID of the default domain, NOT the name.
# Also, you will most likely have a value in the keystone policy file like this
# "cloud_admin": "rule:admin_required and domain_id:<your domain id>"
# This value must match the domain id specified there.
#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'
# Set this to True to enable panels that provide the ability for users to
# manage Identity Providers (IdPs) and establish a set of rules to map
# federation protocol attributes to Identity API attributes.
# This extension requires v3.0+ of the Identity API.
#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False
# Set Console type:
# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
# Set to None explicitly if you want to deactivate the console.
#CONSOLE_TYPE = "AUTO"
# If provided, a "Report Bug" link will be displayed in the site header
# which links to the value of this setting (ideally a URL containing
# information on how to report issues).
#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
# Show backdrop element outside the modal, do not close the modal
# after clicking on backdrop.
#HORIZON_CONFIG["modal_backdrop"] = "static"
# Specify a regular expression to validate user passwords.
#HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements."),
#}
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
#HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for forms including the login form and
# the database creation workflow if so desired.
#HORIZON_CONFIG["password_autocomplete"] = "off"
# Setting this to True will disable the reveal button for password fields,
# including on the login form.
#HORIZON_CONFIG["disable_password_reveal"] = False
LOCAL_PATH = '/tmp'
# Set custom secret key:
# You can either set it to a specific value or you can let horizon generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
# there may be situations where you would want to set this explicitly, e.g.
# when multiple dashboard instances are distributed on different machines
# (usually behind a load-balancer). Either you have to make sure that a session
# gets all requests routed to the same dashboard instance or you set the same
# SECRET_KEY for all of them.
SECRET_KEY='{{ horizon_secret_key }}'
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# },
#}
{% if horizon_backend_database | bool == False %}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{{ memcached_servers }}'
}
}
{% endif %}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
#EMAIL_HOST = 'smtp.my-company.com'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = 'djangomail'
#EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
#AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
#]
OPENSTACK_HOST = "{{ api_interface_address }}"
OPENSTACK_KEYSTONE_URL = "{{ keystone_internal_url }}"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
# Enables keystone web single-sign-on if set to True.
#WEBSSO_ENABLED = False
# Determines which authentication choice to show as default.
#WEBSSO_INITIAL_CHOICE = "credentials"
# The list of authentication mechanisms which include keystone
# federation protocols and identity provider/federation protocol
# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol
# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID
# Connect respectively.
# Do not remove the mandatory credentials mechanism.
# Note: The last two tuples are sample mapping keys to a identity provider
# and federation protocol combination (WEBSSO_IDP_MAPPING).
#WEBSSO_CHOICES = (
# ("credentials", _("Keystone Credentials")),
# ("oidc", _("OpenID Connect")),
# ("saml2", _("Security Assertion Markup Language")),
# ("acme_oidc", "ACME - OpenID Connect"),
# ("acme_saml2", "ACME - SAML2"),
#)
# A dictionary of specific identity provider and federation protocol
# combinations. From the selected authentication mechanism, the value
# will be looked up as keys in the dictionary. If a match is found,
# it will redirect the user to a identity provider and federation protocol
# specific WebSSO endpoint in keystone, otherwise it will use the value
# as the protocol_id when redirecting to the WebSSO by protocol endpoint.
# NOTE: The value is expected to be a tuple formatted as: (<idp_id>, <protocol_id>).
#WEBSSO_IDP_MAPPING = {
# "acme_oidc": ("acme", "oidc"),
# "acme_saml2": ("acme", "saml2"),
#}
# Disable SSL certificate checks (useful for self-signed certificates):
#OPENSTACK_SSL_NO_VERIFY = True
# The CA certificate to use to verify SSL connections
#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True,
}
# Setting this to True, will add a new "Retrieve Password" action on instance,
# allowing Admin session password retrieval/decryption.
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
# The Launch Instance user experience has been significantly enhanced.
# You can choose whether to enable the new launch instance experience,
# the legacy experience, or both. The legacy experience will be removed
# in a future release, but is available as a temporary backup setting to ensure
# compatibility with existing deployments. Further development will not be
# done on the legacy experience. Please report any problems with the new
# experience via the Launchpad tracking system.
#
# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
# determine the experience to enable. Set them both to true to enable
# both.
#LAUNCH_INSTANCE_LEGACY_ENABLED = True
#LAUNCH_INSTANCE_NG_ENABLED = False
# A dictionary of settings which can be used to provide the default values for
# properties found in the Launch Instance modal.
#LAUNCH_INSTANCE_DEFAULTS = {
# 'config_drive': False,
# 'enable_scheduler_hints': True
#}
# The Xen Hypervisor has the ability to set the mount point for volumes
# attached to instances (other Hypervisors currently do not). Setting
# can_set_mount_point to True will add the option to set the mount point
# from the UI.
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
'requires_keypair': False,
'enable_quotas': True
}
# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
# services provided by cinder that is not exposed by its extension API.
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': True,
'enable_ipv6': True,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': True,
'enable_firewall': True,
'enable_vpn': True,
'enable_fip_topology_check': True,
# Default dns servers you would like to use when a subnet is
# created. This is only a default, users can still choose a different
# list of dns servers when creating a new subnet.
# The entries below are examples only, and are not appropriate for
# real deployments
# 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"],
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
# Set which provider network types are supported. Only the network types
# in this list will be available to choose from when creating a network.
# Network types include local, flat, vlan, gre, vxlan and geneve.
# 'supported_provider_types': ['*'],
# You can configure available segmentation ID range per network type
# in your deployment.
# 'segmentation_id_range': {
# 'vlan': [1024, 2048],
# 'vxlan': [4094, 65536],
# },
# You can define additional provider network types here.
# 'extra_provider_types': {
# 'awesome_type': {
# 'display_name': 'Awesome New Type',
# 'require_physical_network': False,
# 'require_segmentation_id': True,
# }
# },
# Set which VNIC types are supported for port binding. Only the VNIC
# types in this list will be available to choose from when creating a
# port.
# VNIC types include 'normal', 'macvtap' and 'direct'.
# Set to empty list or None to disable VNIC type selection.
'supported_vnic_types': ['*'],
}
# The OPENSTACK_HEAT_STACK settings can be used to disable password
# field required while launching the stack.
OPENSTACK_HEAT_STACK = {
'enable_user_pass': True,
}
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
#OPENSTACK_IMAGE_BACKEND = {
# 'image_formats': [
# ('', _('Select format')),
# ('aki', _('AKI - Amazon Kernel Image')),
# ('ami', _('AMI - Amazon Machine Image')),
# ('ari', _('ARI - Amazon Ramdisk Image')),
# ('docker', _('Docker')),
# ('iso', _('ISO - Optical Disk Image')),
# ('ova', _('OVA - Open Virtual Appliance')),
# ('qcow2', _('QCOW2 - QEMU Emulator')),
# ('raw', _('Raw')),
# ('vdi', _('VDI - Virtual Disk Image')),
# ('vhd', _('VHD - Virtual Hard Disk')),
# ('vmdk', _('VMDK - Virtual Machine Disk')),
# ],
#}
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type"),
}
# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
# custom properties should not be displayed in the Image Custom Properties
# table.
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
# Set to 'legacy' or 'direct' to allow users to upload images to glance via
# Horizon server. When enabled, a file form field will appear on the create
# image form. If set to 'off', there will be no file form field on the create
# image form. See documentation for deployment considerations.
#HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
OPENSTACK_ENDPOINT_TYPE = "internalURL"
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
#SECONDARY_ENDPOINT_TYPE = None
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The size of chunk in bytes for downloading objects from Swift
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 30
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
#CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
#}
# Set this to True to display an 'Admin Password' field on the Change Password
# form to verify that it is indeed the admin logged-in who wants to change
# the password.
#ENFORCE_PASSWORD_CHECK = False
# Modules that provide /auth routes that can be used to handle different types
# of user authentication. Add auth plugins that require extra route handling to
# this list.
#AUTHENTICATION_URLS = [
# 'openstack_auth.urls',
#]
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
POLICY_FILES_PATH = '/etc/openstack-dashboard'
# Map of local copy of service policy files.
# Please insure that your identity policy file matches the one being used on
# your keystone servers. There is an alternate policy file that may be used
# in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
# This file is not included in the Horizon repository by default but can be
# found at
# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \
# policy.v3cloudsample.json
# Having matching policy files on the Horizon and Keystone servers is essential
# for normal operation. This holds true for all services and their policy files.
#POLICY_FILES = {
# 'identity': 'keystone_policy.json',
# 'compute': 'nova_policy.json',
# 'volume': 'cinder_policy.json',
# 'image': 'glance_policy.json',
# 'orchestration': 'heat_policy.json',
# 'network': 'neutron_policy.json',
# 'telemetry': 'ceilometer_policy.json',
#}
# TODO: (david-lyle) remove when plugins support adding settings.
# Note: Only used when trove-dashboard plugin is configured to be used by
# Horizon.
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
#TROVE_ADD_USER_PERMS = []
#TROVE_ADD_DATABASE_PERMS = []
# Change this patch to the appropriate list of tuples containing
# a key, label and static directory containing two files:
# _variables.scss and _styles.scss
#AVAILABLE_THEMES = [
# ('default', 'Default', 'themes/default'),
# ('material', 'Material', 'themes/material'),
#]
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'formatters': {
'operation': {
# The format of "%(message)s" is defined by
# OPERATION_LOG_OPTIONS['format']
'format': '%(asctime)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
'operation': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'operation',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'horizon.operation_log': {
'handlers': ['operation'],
'level': 'INFO',
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'neutronclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'heatclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'ceilometerclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'swiftclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
'scss': {
'handlers': ['null'],
'propagate': False,
},
},
}
# 'direction' should not be specified for all_tcp/udp/icmp.
# It is specified in the form.
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1433',
'to_port': '1433',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
# Deprecation Notice:
#
# The setting FLAVOR_EXTRA_KEYS has been deprecated.
# Please load extra spec metadata into the Glance Metadata Definition Catalog.
#
# The sample quota definitions can be found in:
# <glance_source>/etc/metadefs/compute-quota.json
#
# The metadata definition catalog supports CLI and API:
# $glance --os-image-api-version 2 help md-namespace-import
# $glance-manage db_load_metadefs <directory_with_definition_files>
#
# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
# TODO: (david-lyle) remove when plugins support settings natively
# Note: This is only used when the Sahara plugin is configured and enabled
# for use in Horizon.
# Indicate to the Sahara data processing service whether or not
# automatic floating IP allocation is in effect. If it is not
# in effect, the user will be prompted to choose a floating IP
# pool for use in their cluster. False by default. You would want
# to set this to True if you were running Nova Networking with
# auto_assign_floating_ip = True.
#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
# The hash algorithm to use for authentication tokens. This must
# match the hash algorithm that the identity server and the
# auth_token middleware are using. Allowed values are the
# algorithms supported by Python's hashlib library.
#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
# AngularJS requires some settings to be made available to
# the client side. Some settings are required by in-tree / built-in horizon
# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
# form of ['SETTING_1','SETTING_2'], etc.
#
# You may remove settings from this list for security purposes, but do so at
# the risk of breaking a built-in horizon feature. These settings are required
# for horizon to function properly. Only remove them if you know what you
# are doing. These settings may in the future be moved to be defined within
# the enabled panel configuration.
# You should not add settings to this list for out of tree extensions.
# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
'LAUNCH_INSTANCE_DEFAULTS',
'OPENSTACK_IMAGE_FORMATS']
# Additional settings can be made available to the client side for
# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
# !! Please use extreme caution as the settings are transferred via HTTP/S
# and are not encrypted on the browser. This is an experimental API and
# may be deprecated in the future without notice.
#REST_API_ADDITIONAL_SETTINGS = []
# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
# Scripting (XFS) vulnerability, so this option allows extra security hardening
# where iframes are not used in deployment. Default setting is True.
# For more information see:
# http://tinyurl.com/anticlickjack
#DISALLOW_IFRAME_EMBED = True
# Help URL can be made available for the client. To provide a help URL, edit the
# following attribute to the URL of your choice.
#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org"
# Settings for OperationLogMiddleware
# OPERATION_LOG_ENABLED is flag to use the function to log an operation on
# Horizon.
# mask_targets is arrangement for appointing a target to mask.
# method_targets is arrangement of HTTP method to output log.
# format is the log contents.
#OPERATION_LOG_ENABLED = False
#OPERATION_LOG_OPTIONS = {
# 'mask_fields': ['password'],
# 'target_methods': ['POST'],
# 'format': ("[%(domain_name)s] [%(domain_id)s] [%(project_name)s]"
# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]"
# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]"
# " [%(http_status)s] [%(param)s]"),
#}
# The default date range in the Overview panel meters - either <today> minus N
# days (if the value is integer N), or from the beginning of the current month
# until today (if set to None). This setting should be used to limit the amount
# of data fetched by default when rendering the Overview panel.
#OVERVIEW_DAYS_RANGE = 1
# To allow operators to require admin users provide a search criteria first
# before loading any data into the admin views, set the following attribute to
# True
#ADMIN_FILTER_DATA_FIRST=False

View File

@ -1,66 +0,0 @@
---
project_name: "ironic"
####################
# Database
####################
ironic_database_name: "ironic"
ironic_database_user: "ironic"
ironic_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
ironic_inspector_database_name: "ironic_inspector"
ironic_inspector_database_user: "ironic_inspector"
ironic_inspector_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
####################
# Docker
####################
ironic_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-api"
ironic_api_tag: "{{ openstack_release }}"
ironic_api_image_full: "{{ ironic_api_image }}:{{ ironic_api_tag }}"
ironic_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-conductor"
ironic_conductor_tag: "{{ openstack_release }}"
ironic_conductor_image_full: "{{ ironic_conductor_image }}:{{ ironic_conductor_tag }}"
ironic_pxe_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-pxe"
ironic_pxe_tag: "{{ openstack_release }}"
ironic_pxe_image_full: "{{ ironic_pxe_image }}:{{ ironic_pxe_tag }}"
ironic_inspector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-inspector"
ironic_inspector_tag: "{{ openstack_release }}"
ironic_inspector_image_full: "{{ ironic_inspector_image }}:{{ ironic_inspector_tag }}"
ironic_dnsmasq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-dnsmasq"
ironic_dnsmasq_tag: "{{ openstack_release }}"
ironic_dnsmasq_image_full: "{{ ironic_dnsmasq_image }}:{{ ironic_dnsmasq_tag }}"
####################
# OpenStack
####################
ironic_inspector_keystone_user: "ironic-inspector"
ironic_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}"
ironic_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}"
ironic_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ ironic_api_port }}"
ironic_inspector_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_inspector_port }}"
ironic_inspector_internal_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_inspector_port }}"
ironic_inspector_public_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_inspector_port }}"
ironic_logging_debug: "{{ openstack_logging_debug }}"
openstack_ironic_auth: "{{ openstack_auth }}"
openstack_ironic_inspector_auth: "{{ openstack_auth }}"
#########
# Ironic
#########
ironic_dnsmasq_interface: "{{ api_interface }}"
ironic_dnsmasq_dhcp_range:
ironic_cleaning_network:

View File

@ -1,90 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
recurse: yes
with_items:
- "ironic-api"
- "ironic-conductor"
- "ironic-conductor-tftp"
- "ironic-inspector"
- "ironic-inspector-tftp"
- "ironic-dnsmasq"
- name: Copying over config.json files for services
template:
src: "{{ item }}.json.j2"
dest: "{{ node_config_directory }}/{{ item }}/config.json"
with_items:
- "ironic-api"
- "ironic-conductor"
- "ironic-conductor-tftp"
- "ironic-inspector"
- "ironic-inspector-tftp"
- "ironic-dnsmasq"
- name: Copying over ironic.conf
merge_configs:
vars:
service_name: "{{ item }}"
sources:
- "{{ role_path }}/templates/ironic.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/ironic.conf"
- "{{ node_custom_config }}/ironic/{{ item }}.conf"
- "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic.conf"
dest: "{{ node_config_directory }}/{{ item }}/ironic.conf"
with_items:
- "ironic-api"
- "ironic-conductor"
- name: Copying over inspector.conf
merge_configs:
vars:
service_name: "ironic-inspector"
sources:
- "{{ role_path }}/templates/ironic-inspector.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/ironic-inspector.conf"
- "{{ node_custom_config }}/ironic-inspector/inspector.conf"
- "{{ node_custom_config }}/ironic-inspector/{{ inventory_hostname }}/inspector.conf"
dest: "{{ node_config_directory }}/ironic-inspector/inspector.conf"
- name: Copying over dnsmasq.conf
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/ironic-dnsmasq/dnsmasq.conf"
with_first_found:
- "{{ node_custom_config }}/ironic/ironic-dnsmasq.conf"
- "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic-dnsmasq.conf"
- "ironic-dnsmasq.conf.j2"
- name: Copying pxelinux.cfg default
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/ironic-inspector-tftp/default"
with_first_found:
- "{{ node_custom_config }}/ironic/pxelinux.default"
- "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/pxelinux.default"
- "pxelinux.default.j2"
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/ironic/policy.json"
register: ironic_policy
- name: Copying over existing policy.json
template:
src: "{{ node_custom_config }}/ironic/policy.json"
dest: "{{ node_config_directory }}/{{ item }}/policy.json"
with_items:
- "ironic-api"
- "ironic-conductor"
- "ironic-inspector"
- "ironic-inspector-tftp"
when:
ironic_policy.stat.exists

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,25 +0,0 @@
{
"command": "ironic-api",
"config_files": [
{
"source": "{{ container_config_directory }}/ironic.conf",
"dest": "/etc/ironic/ironic.conf",
"owner": "ironic",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/ironic/policy.json",
"owner": "ironic",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/ironic",
"owner": "ironic:ironic",
"recurse": true
}
]
}

View File

@ -1,11 +0,0 @@
{
"command": "/usr/sbin/in.tftpd --verbose --foreground --user root --address 0.0.0.0:69 --map-file /map-file /tftpboot",
"config_files": [],
"permissions": [
{
"path": "/tftpboot/pxelinux.cfg",
"owner": "ironic:ironic",
"recurse": true
}
]
}

View File

@ -1,35 +0,0 @@
{
"command": "ironic-conductor",
"config_files": [
{
"source": "{{ container_config_directory }}/ironic.conf",
"dest": "/etc/ironic/ironic.conf",
"owner": "ironic",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/ironic/policy.json",
"owner": "ironic",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/ironic",
"owner": "ironic:ironic",
"recurse": true
},
{
"path": "/var/lib/ironic",
"owner": "ironic:ironic",
"recurse": true
},
{
"path": "/tftpboot",
"owner": "ironic:ironic",
"recurse": true
}
]
}

View File

@ -1,9 +0,0 @@
port=0
interface={{ api_interface }}
dhcp-range={{ ironic_dnsmasq_dhcp_range }}
dhcp-option=option:tftp-server,{{ kolla_internal_vip_address }}
dhcp-option=option:server-ip-address,{{ kolla_internal_vip_address }}
bind-interfaces
dhcp-sequential-ip
dhcp-option=option:bootfile-name,pxelinux.0
dhcp-option=210,/tftpboot/

View File

@ -1,11 +0,0 @@
{
"command": "dnsmasq --no-daemon --conf-file=/etc/dnsmasq.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/dnsmasq.conf",
"dest": "/etc/dnsmasq.conf",
"owner": "root",
"perm": "0600"
}
]
}

View File

@ -1,18 +0,0 @@
{
"command": "/usr/sbin/in.tftpd --verbose --foreground --user root --address 0.0.0.0:69 --map-file /map-file /tftpboot",
"config_files": [
{
"source": "{{ container_config_directory }}/default",
"dest": "/tftpboot/pxelinux.cfg/default",
"owner": "root",
"perm": "0644"
}
],
"permissions": [
{
"path": "/tftpboot/pxelinux.cfg",
"owner": "ironic:ironic",
"recurse": true
}
]
}

View File

@ -1,41 +0,0 @@
[DEFAULT]
debug = {{ ironic_logging_debug }}
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
log_dir = /var/log/kolla/ironic
{% endif %}
listen_address = 0.0.0.0
listen_port = {{ ironic_inspector_port }}
[ironic]
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ ironic_inspector_keystone_user }}
password = {{ ironic_inspector_keystone_password }}
[keystone_authtoken]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ ironic_inspector_keystone_user }}
password = {{ ironic_inspector_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {{ memcached_servers }}
[firewall]
dnsmasq_interface = undefined
[database]
connection = mysql+pymysql://{{ ironic_inspector_database_user }}:{{ ironic_inspector_database_password }}@{{ ironic_inspector_database_address }}/{{ ironic_inspector_database_name }}

View File

@ -1,25 +0,0 @@
{
"command": "ironic-inspector --config-file /etc/ironic-inspector/inspector.conf",
"config_files": [
{
"source": "{{ container_config_directory }}/inspector.conf",
"dest": "/etc/ironic-inspector/inspector.conf",
"owner": "ironic",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/ironic/policy.json",
"owner": "ironic",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/ironic",
"owner": "ironic:ironic",
"recurse": true
}
]
}

View File

@ -1,63 +0,0 @@
[DEFAULT]
debug = {{ ironic_logging_debug }}
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
log_dir = /var/log/kolla/ironic
{% endif %}
admin_user = {{ openstack_auth.username }}
admin_password = {{ keystone_admin_password }}
enabled_drivers = pxe_ipmitool
transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }}
{% if service_name == 'ironic-api' %}
[api]
host_ip = 0.0.0.0
api_workers = {{ openstack_service_workers }}
{% endif %}
{% if service_name == 'ironic-conductor' %}
[conductor]
api_url = {{ internal_protocol }}://ironic-api:{{ ironic_api_port }}
automated_clean=false
{% endif %}
[database]
connection = mysql+pymysql://{{ ironic_database_user }}:{{ ironic_database_password }}@{{ ironic_database_address }}/{{ ironic_database_name }}
max_retries = -1
[keystone_authtoken]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ ironic_keystone_user }}
password = {{ ironic_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {{ memcached_servers }}
[glance]
glance_host = {{ internal_protocol }}://glance-api:{{ glance_api_port }}
[neutron]
url = {{ internal_protocol }}://neutron-server:{{ neutron_server_port }}
cleaning_network = {{ ironic_cleaning_network }}
[inspector]
enabled = true
[agent]
deploy_logs_local_path = /var/log/kolla/ironic
deploy_logs_storage_backend = local
deploy_logs_collect = always

View File

@ -1,7 +0,0 @@
default introspect
label introspect
kernel ironic-agent.kernel
append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://ironic-inspector:{{ ironic_inspector_port }}/v1/continue systemd.journald.forward_to_console=yes
ipappend 3

View File

@ -1,13 +0,0 @@
---
project_name: "iscsi"
####################
# Docker
####################
iscsid_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-iscsid"
iscsid_tag: "{{ openstack_release }}"
iscsid_image_full: "{{ iscsid_image }}:{{ iscsid_tag }}"
tgtd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-tgtd"
tgtd_tag: "{{ openstack_release }}"
tgtd_image_full: "{{ tgtd_image }}:{{ tgtd_tag }}"

View File

@ -1,34 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
recurse: yes
with_items:
- "iscsid"
- name: Copying over config.json files for services
template:
src: "{{ item }}.json.j2"
dest: "{{ node_config_directory }}/{{ item }}/config.json"
with_items:
- "iscsid"
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
recurse: yes
when:
- enable_cinder_backend_lvm | bool
with_items:
- "tgtd"
- name: Copying over config.json files for services
template:
src: "{{ item }}.json.j2"
dest: "{{ node_config_directory }}/{{ item }}/config.json"
when:
- enable_cinder_backend_lvm | bool
with_items:
- "tgtd"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,4 +0,0 @@
{
"command": "iscsid -d 8 -f --pid=/run/iscsid.pid",
"config_files": []
}

View File

@ -1,4 +0,0 @@
{
"command": "tgtd -d 1 -f --iscsi portal={{ api_interface_address }}:{{ iscsi_port }}",
"config_files": []
}

View File

@ -1,38 +0,0 @@
---
project_name: "keystone"
keystone_services:
keystone:
container_name: "keystone"
group: "keystone"
enabled: true
image: "{{ keystone_image_full }}"
####################
# Database
####################
keystone_database_name: "keystone"
keystone_database_user: "keystone"
keystone_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
####################
# Fernet
####################
keystone_username: "keystone"
keystone_groupname: "keystone"
####################
# Docker
####################
keystone_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keystone"
keystone_tag: "{{ openstack_release }}"
keystone_image_full: "{{ keystone_image }}:{{ keystone_tag }}"
####################
# OpenStack
####################
keystone_logging_debug: "{{ openstack_logging_debug }}"
openstack_keystone_auth: "{{ openstack_auth }}"

View File

@ -1,111 +0,0 @@
---
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/keystone/policy.json"
register: keystone_policy
- name: Check if Keystone Domain specific settings enabled
local_action: stat path="{{ node_custom_config }}/keystone/domains"
register: keystone_domain_directory
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
recurse: yes
when:
- item.value.enabled | bool
with_dict: "{{ keystone_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
register: keystone_config_jsons
with_dict: "{{ keystone_services }}"
when:
- item.value.enabled | bool
- name: Copying over keystone.conf
merge_configs:
vars:
service_name: "{{ item.key }}"
sources:
- "{{ role_path }}/templates/keystone.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/keystone.conf"
- "{{ node_custom_config }}/keystone/{{ item.key }}.conf"
- "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/keystone.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/keystone.conf"
register: keystone_confs
with_dict: "{{ keystone_services }}"
when:
- item.key == "keystone"
- item.value.enabled | bool
- name: Creating Keystone Domain directory
vars:
keystone: "{{ keystone_services.keystone }}"
file:
dest: "{{ node_config_directory }}/keystone/domains/"
state: "directory"
when:
- keystone.enabled | bool
- keystone_domain_directory.stat.exists
- name: Get file list in custom domains folder
local_action: find path="{{ node_custom_config }}/keystone/domains" recurse=no file_type=file
register: keystone_domains
when: keystone_domain_directory.stat.exists
- name: Copying Keystone Domain specific settings
vars:
keystone: "{{ keystone_services.keystone }}"
copy:
src: "{{ item.path }}"
dest: "{{ node_config_directory }}/keystone/domains/"
register: keystone_domains
when:
- keystone.enabled | bool
- keystone_domain_directory.stat.exists
with_items: "{{ keystone_domains.files|default([]) }}"
- name: Copying over existing policy.json
template:
src: "{{ node_custom_config }}/keystone/policy.json"
dest: "{{ node_config_directory }}/{{ item.key }}/policy.json"
register: keystone_policy_jsons
when:
- item.key == "keystone"
- item.value.enabled | bool
- keystone_policy.stat.exists
with_dict: "{{ keystone_services }}"
- name: Copying over wsgi-keystone.conf
vars:
keystone: "{{ keystone_services.keystone }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf"
register: keystone_wsgi
when:
- keystone.enabled | bool
with_first_found:
- "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf"
- "{{ node_custom_config }}/keystone/wsgi-keystone.conf"
- "wsgi-keystone.conf.j2"
- name: Copying over keystone-paste.ini
vars:
keystone: "{{ keystone_services.keystone }}"
merge_configs:
sources:
- "{{ role_path }}/templates/keystone-paste.ini.j2"
- "{{ node_custom_config }}/keystone/keystone-paste.ini"
- "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/keystone-paste.ini"
dest: "{{ node_config_directory }}/keystone/keystone-paste.ini"
register: keystone_paste_ini
when:
- keystone.enabled | bool

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,83 +0,0 @@
# Keystone PasteDeploy configuration file.
[filter:debug]
use = egg:oslo.middleware#debug
[filter:request_id]
use = egg:oslo.middleware#request_id
[filter:build_auth_context]
use = egg:keystone#build_auth_context
[filter:token_auth]
use = egg:keystone#token_auth
[filter:json_body]
use = egg:keystone#json_body
[filter:cors]
use = egg:oslo.middleware#cors
oslo_config_project = keystone
[filter:ec2_extension]
use = egg:keystone#ec2_extension
[filter:ec2_extension_v3]
use = egg:keystone#ec2_extension_v3
[filter:s3_extension]
use = egg:keystone#s3_extension
[filter:url_normalize]
use = egg:keystone#url_normalize
[filter:sizelimit]
use = egg:oslo.middleware#sizelimit
[app:public_service]
use = egg:keystone#public_service
[app:service_v3]
use = egg:keystone#service_v3
[app:admin_service]
use = egg:keystone#admin_service
[pipeline:public_api]
# The last item in this pipeline must be public_service or an equivalent
# application. It cannot be a filter.
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension public_service
[pipeline:admin_api]
# The last item in this pipeline must be admin_service or an equivalent
# application. It cannot be a filter.
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension s3_extension admin_service
[pipeline:api_v3]
# The last item in this pipeline must be service_v3 or an equivalent
# application. It cannot be a filter.
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
[app:public_version_service]
use = egg:keystone#public_version_service
[app:admin_version_service]
use = egg:keystone#admin_version_service
[pipeline:public_version_api]
pipeline = cors sizelimit url_normalize public_version_service
[pipeline:admin_version_api]
pipeline = cors sizelimit url_normalize admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -1,55 +0,0 @@
[DEFAULT]
debug = {{ keystone_logging_debug }}
{% if enable_cadf_notifications | bool %}
notification_format = cadf
transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }}
{% endif %}
{% if std_logger %}
use_syslog = False
use_stderr = True
{% else %}
# NOTE(elemoine) log_dir alone does not work for Keystone
log_file = /var/log/kolla/keystone/keystone.log
use_stderr = True
{% endif %}
secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
[database]
connection = mysql+pymysql://{{ keystone_database_user }}:{{ keystone_database_password }}@{{ keystone_database_address }}/{{ keystone_database_name }}
max_retries = -1
{% if keystone_domain_directory.stat.exists %}
[identity]
domain_specific_drivers_enabled = true
domain_config_dir = /etc/keystone/domains
{% endif %}
[token]
revoke_by_id = False
{% if keystone_token_provider == 'uuid' %}
provider = uuid
{% elif keystone_token_provider == 'fernet' %}
provider = {{ keystone_token_provider }}
expiration = {{ fernet_token_expiry }}
[fernet_tokens]
max_active_keys = 3
{% endif %}
[cache]
backend = oslo_cache.memcache_pool
enabled = True
{# For Kolla-Ansible, generate the memcache servers based on the list of
memcached servers in the inventory and memcached_servers should be un-set.
For Kolla-Kubernetes, it is necessary to define the memcached_servers
variable in globals.yml to set it to the Kubernetes service for memcached. #}
memcache_servers = {{ memcached_servers }}:{{ memcached_port }}
{% if enable_cadf_notifications | bool %}
[oslo_messaging_notifications]
driver = messagingv2
{% endif %}

View File

@ -1,49 +0,0 @@
{% set keystone_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
{% set keystone_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
{
"command": "/usr/sbin/{{ keystone_cmd }}",
"config_files": [
{
"source": "{{ container_config_directory }}/keystone.conf",
"dest": "/etc/keystone/keystone.conf",
"owner": "keystone",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/keystone-paste.ini",
"dest": "/etc/keystone/keystone-paste.ini",
"owner": "keystone",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/domains",
"dest": "/etc/keystone/domains",
"owner": "keystone",
"perm": "0700",
"optional": true
},
{
"source": "{{ container_config_directory }}/policy.json",
"dest": "/etc/keystone/policy.json",
"owner": "keystone",
"perm": "0600",
"optional": true
},
{
"source": "{{ container_config_directory }}/wsgi-keystone.conf",
"dest": "/etc/{{ keystone_dir }}/wsgi-keystone.conf",
"owner": "keystone",
"perm": "0644"
}
],
"permissions": [
{
"path": "/var/log/kolla",
"owner": "keystone:kolla"
},
{
"path": "/var/log/kolla/keystone/keystone.log",
"owner": "keystone:keystone"
}
]
}

View File

@ -1,42 +0,0 @@
{% set keystone_log_dir = '/var/log/kolla/keystone' %}
{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
Listen {{ api_interface_address }}:{{ keystone_public_port }}
Listen {{ api_interface_address }}:{{ keystone_admin_port }}
<VirtualHost *:{{ keystone_public_port }}>
WSGIDaemonProcess keystone-public processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=%{GROUP} python-path={{ python_path }}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /var/www/cgi-bin/keystone/main
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
{% if std_logger %}
ErrorLog /proc/self/fd/2
CustomLog /proc/self/fd/1 combined
{% else %}
ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log"
CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat
{% endif %}
</VirtualHost>
<VirtualHost *:{{ keystone_admin_port }}>
WSGIDaemonProcess keystone-admin processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=%{GROUP} python-path={{ python_path }}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /var/www/cgi-bin/keystone/admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
{% if std_logger %}
ErrorLog /proc/self/fd/2
CustomLog /proc/self/fd/1 combined
{% else %}
ErrorLog "{{ keystone_log_dir }}/keystone-apache-admin-error.log"
CustomLog "{{ keystone_log_dir }}/keystone-apache-admin-access.log" logformat
{% endif %}
</VirtualHost>

View File

@ -1,15 +0,0 @@
---
project_name: "mariadb"
####################
# Database
####################
database_cluster_name: "openstack"
database_max_timeout: 60
####################
# Docker
####################
mariadb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mariadb"
mariadb_tag: "{{ openstack_release }}"
mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}"

View File

@ -1,27 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
recurse: yes
with_items:
- "mariadb"
- name: Copying over config.json files for services
template:
src: "{{ item }}.json.j2"
dest: "{{ node_config_directory }}/{{ item }}/config.json"
with_items:
- "mariadb"
- name: Copying over galera.cnf
merge_configs:
vars:
service_name: "{{ item }}"
sources:
- "{{ role_path }}/templates/galera.cnf.j2"
- "{{ node_custom_config }}/galera.cnf"
- "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf"
dest: "{{ node_config_directory }}/{{ item }}/galera.cnf"
with_items:
- "mariadb"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,41 +0,0 @@
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8
[mysqld]
bind-address={{ api_interface_address }}
port={{ mariadb_port }}
{% if not std_logger %}
log-error=/var/log/kolla/mariadb/mariadb.log
{% endif %}
log-bin=mysql-bin
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
collation-server = utf8_general_ci
init-connect='SET NAMES utf8'
character-set-server = utf8
datadir=/var/lib/mysql/
wsrep_provider=none
max_connections=10000
key_buffer_size = '64M'
max_heap_table_size = '64M'
tmp_table_size = '64M'
{% set dynamic_pool_size_mb = (hostvars[inventory_hostname]['ansible_memtotal_mb'] * 0.4) | round | int %}
{% if dynamic_pool_size_mb < 8192 %}
innodb_buffer_pool_size = '{{ dynamic_pool_size_mb }}M'
{% else %}
innodb_buffer_pool_size = '8192M'
{% endif %}
[server]
pid-file=/var/lib/mysql/mariadb.pid

View File

@ -1,24 +0,0 @@
{% set mysql_dir = 'mysql' if kolla_base_distro in ['ubuntu', 'debian'] else '' %}
{
"command": "/usr/bin/mysqld_safe",
"config_files": [
{
"source": "{{ container_config_directory }}/galera.cnf",
"dest": "/etc/{{ mysql_dir }}/my.cnf",
"owner": "mysql",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/mariadb",
"owner": "mysql:mysql",
"recurse": true
},
{
"path": "/var/lib/mysql",
"owner": "mysql:mysql",
"recurse": true
}
]
}

View File

@ -1,25 +0,0 @@
---
project_name: "memcached"
memcached_services:
memcached:
container_name: "memcached"
image: "{{ memcached_image_full }}"
enabled: true
group: "memcached"
volumes:
- "{{ node_config_directory }}/memcached/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
####################
# Docker
####################
memcached_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-memcached"
memcached_tag: "{{ openstack_release }}"
memcached_image_full: "{{ memcached_image }}:{{ memcached_tag }}"
####################
# Memcached options
####################
memcached_connection_limit: "5000"

View File

@ -1,16 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
recurse: yes
with_items:
- "memcached"
- name: Copying over config.json files for services
template:
src: "{{ item }}.json.j2"
dest: "{{ node_config_directory }}/{{ item }}/config.json"
register: memcached_config_json
with_items:
- "memcached"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,4 +0,0 @@
{
"command": "/usr/bin/memcached -vv -l {{ api_interface_address }} -p {{ memcached_port }} -c {{ memcached_connection_limit }} -P /memcached/memcached.pid",
"config_files": []
}

View File

@ -1,262 +0,0 @@
---
project_name: "neutron"
neutron_services:
openvswitch-db-server:
container_name: "openvswitch_db"
image: "{{ openvswitch_db_image_full }}"
enabled: "{{ neutron_plugin_agent == 'openvswitch' }}"
volumes:
- "{{ node_config_directory }}/openvswitch-db-server/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
- "openvswitch_db:/var/lib/openvswitch/"
openvswitch-vswitchd:
container_name: "openvswitch_vswitchd"
image: "{{ openvswitch_vswitchd_image_full }}"
enabled: "{{ neutron_plugin_agent == 'openvswitch' }}"
privileged: True
volumes:
- "{{ node_config_directory }}/openvswitch-vswitchd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
neutron-server:
container_name: "neutron_server"
image: "{{ neutron_server_image_full }}"
enabled: true
group: "neutron-server"
volumes:
- "{{ node_config_directory }}/neutron-server/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
neutron-openvswitch-agent:
container_name: "neutron_openvswitch_agent"
image: "{{ neutron_openvswitch_agent_image_full }}"
enabled: "{{ neutron_plugin_agent == 'openvswitch' }}"
privileged: True
volumes:
- "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
neutron-sfc-agent:
container_name: "neutron_sfc_agent"
image: "{{ neutron_sfc_agent_image_full }}"
enabled: "{{ neutron_plugin_agent == 'sfc' }}"
privileged: True
volumes:
- "{{ node_config_directory }}/neutron-sfc-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
neutron-linuxbridge-agent:
container_name: "neutron_linuxbridge_agent"
image: "{{ neutron_linuxbridge_agent_image_full }}"
privileged: True
enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
NEUTRON_BRIDGE: "br-ex"
NEUTRON_INTERFACE: "{{ neutron_external_interface }}"
volumes:
- "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
neutron-dhcp-agent:
container_name: "neutron_dhcp_agent"
image: "{{ neutron_dhcp_agent_image_full }}"
privileged: True
enabled: True
group: "neutron-dhcp-agent"
volumes:
- "{{ node_config_directory }}/neutron-dhcp-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run/:/run/:shared"
- "/run/netns/:/run/netns/:shared"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
neutron-l3-agent:
container_name: "neutron_l3_agent"
image: "{{ neutron_l3_agent_image_full }}"
privileged: True
enabled: "{{ not enable_neutron_vpnaas | bool }}"
volumes:
- "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run:/run:shared"
- "/run/netns/:/run/netns/:shared"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
neutron-lbaas-agent:
container_name: "neutron_lbaas_agent"
image: "{{ neutron_lbaas_agent_image_full }}"
privileged: True
enabled: "{{ enable_neutron_lbaas | bool }}"
group: "neutron-lbaas-agent"
volumes:
- "{{ node_config_directory }}/neutron-lbaas-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run:/run:shared"
- "/run/netns/:/run/netns/:shared"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
neutron-metadata-agent:
container_name: "neutron_metadata_agent"
image: "{{ neutron_metadata_agent_image_full }}"
privileged: True
enabled: true
volumes:
- "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run/netns/:/run/netns/:shared"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
neutron-vpnaas-agent:
container_name: "neutron_vpnaas_agent"
image: "{{ neutron_vpnaas_agent_image_full }}"
privileged: True
enabled: "{{ enable_neutron_vpnaas | bool }}"
group: "neutron-vpnaas-agent"
volumes:
- "{{ node_config_directory }}/neutron-vpnaas-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run:/run:shared"
- "/run/netns/:/run/netns/:shared"
- "/lib/modules:/lib/modules:ro"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
neutron-bgp-dragent:
container_name: "neutron_bgp_dragent"
image: "{{ neutron_bgp_dragent_image_full }}"
privileged: True
enabled: "{{ enable_neutron_bgp_dragent | bool }}"
group: "neutron-bgp-dragent"
volumes:
- "{{ node_config_directory }}/neutron-bgp-dragent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run:/run:shared"
- "/run/netns/:/run/netns/:shared"
- "/lib/modules:/lib/modules:ro"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
####################
# Database
####################
neutron_database_name: "neutron"
neutron_database_user: "neutron"
neutron_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
####################
# Docker
####################
neutron_dhcp_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-dhcp-agent"
neutron_dhcp_agent_tag: "{{ openstack_release }}"
neutron_dhcp_agent_image_full: "{{ neutron_dhcp_agent_image }}:{{ neutron_dhcp_agent_tag }}"
neutron_l3_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-l3-agent"
neutron_l3_agent_tag: "{{ openstack_release }}"
neutron_l3_agent_image_full: "{{ neutron_l3_agent_image }}:{{ neutron_l3_agent_tag }}"
neutron_lbaas_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-lbaas-agent"
neutron_lbaas_agent_tag: "{{ openstack_release }}"
neutron_lbaas_agent_image_full: "{{ neutron_lbaas_agent_image }}:{{ neutron_lbaas_agent_tag }}"
neutron_linuxbridge_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-linuxbridge-agent"
neutron_linuxbridge_agent_tag: "{{ openstack_release }}"
neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}"
neutron_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-metadata-agent"
neutron_metadata_agent_tag: "{{ openstack_release }}"
neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}"
neutron_openvswitch_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-openvswitch-agent"
neutron_openvswitch_agent_tag: "{{ openstack_release }}"
neutron_openvswitch_agent_image_full: "{{ neutron_openvswitch_agent_image }}:{{ neutron_openvswitch_agent_tag }}"
neutron_sfc_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-sfc-agent"
neutron_sfc_agent_tag: "{{ openstack_release }}"
neutron_sfc_agent_image_full: "{{ neutron_sfc_agent_image }}:{{ neutron_sfc_agent_tag }}"
neutron_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-server"
neutron_server_tag: "{{ openstack_release }}"
neutron_server_image_full: "{{ neutron_server_image }}:{{ neutron_server_tag }}"
neutron_vpnaas_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-vpnaas-agent"
neutron_vpnaas_agent_tag: "{{ openstack_release }}"
neutron_vpnaas_agent_image_full: "{{ neutron_vpnaas_agent_image }}:{{ neutron_vpnaas_agent_tag }}"
neutron_bgp_dragent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-bgp-dragent"
neutron_bgp_dragent_tag: "{{ openstack_release }}"
neutron_bgp_dragent_image_full: "{{ neutron_bgp_dragent_image }}:{{ neutron_bgp_dragent_tag }}"
openvswitch_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-openvswitch-db-server"
openvswitch_db_tag: "{{ openstack_release }}"
openvswitch_db_image_full: "{{ openvswitch_db_image }}:{{ openvswitch_db_tag }}"
openvswitch_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-openvswitch-vswitchd"
openvswitch_vswitchd_tag: "{{ openstack_release }}"
openvswitch_vswitchd_image_full: "{{ openvswitch_vswitchd_image }}:{{ openvswitch_vswitchd_tag }}"
####################
# OpenStack
####################
dhcp_agents_per_network: 2
min_l3_agents_per_router: 2
max_l3_agents_per_router: 3
neutron_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}"
neutron_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}"
neutron_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ neutron_server_port }}"
neutron_logging_debug: "{{ openstack_logging_debug }}"
neutron_bridge_name: "br-ex"
openstack_neutron_auth: "{{ openstack_auth }}"
####################
# Extension drivers
####################
extension_drivers:
- name: "qos"
enabled: "{{ enable_neutron_qos | bool }}"
- name: "port_security"
enabled: "{{ enable_tacker | bool or enable_designate | bool }}"
- name: "dns"
enabled: "{{ enable_designate | bool }}"
neutron_extension_drivers: "{{ extension_drivers|selectattr('enabled', 'equalto', true)|list }}"
####################
# Service Plugins
####################
service_plugins:
- name: "flow_classifier"
enabled: "{{ neutron_plugin_agent == 'sfc' }}"
- name: "neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2"
enabled: "{{ enable_neutron_lbaas | bool }}"
- name: "neutron.services.firewall.fwaas_plugin.FirewallPlugin"
enabled: "{{ enable_neutron_fwaas | bool }}"
- name: "neutron_vpnaas.services.vpn.plugin.VPNDriverPlugin"
enabled: "{{ enable_neutron_vpnaas | bool }}"
- name: "qos"
enabled: "{{ enable_neutron_qos | bool }}"
- name: "router"
enabled: true
- name: "sfc"
enabled: "{{ neutron_plugin_agent == 'sfc' }}"
- name: "neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin"
enabled: "{{ enable_neutron_bgp_dragent | bool }}"
neutron_service_plugins: "{{ service_plugins|selectattr('enabled', 'equalto', true)|list }}"

View File

@ -1,52 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}"
state: "directory"
recurse: yes
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when: inventory_hostname in groups['compute']
- name: Copying over config.json files for services
template:
src: "neutron-openvswitch-agent.json.j2"
dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/config.json"
register: fake_config_json
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when:
- inventory_hostname in groups['compute']
- neutron_plugin_agent == "openvswitch"
- name: Copying over neutron.conf
merge_configs:
vars:
service_name: "{{ item }}"
sources:
- "{{ role_path }}/templates/neutron.conf.j2"
- "{{ node_config_directory }}/config/global.conf"
- "{{ node_config_directory }}/config/database.conf"
- "{{ node_config_directory }}/config/messaging.conf"
- "{{ node_config_directory }}/config/neutron.conf"
- "{{ node_config_directory }}/config/neutron/{{ item }}.conf"
- "{{ node_config_directory }}/config/neutron/{{ inventory_hostname }}/neutron.conf"
dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/neutron.conf"
register: fake_neutron_conf
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when:
- inventory_hostname in groups['compute']
- neutron_plugin_agent == "openvswitch"
- name: Copying over ml2_conf.ini
merge_configs:
vars:
service_name: "{{ item }}"
sources:
- "{{ role_path }}/templates/ml2_conf.ini.j2"
- "{{ node_config_directory }}/config/neutron/ml2_conf.ini"
- "{{ node_config_directory }}/config/neutron/{{ inventory_hostname }}/neutron.conf"
dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/ml2_conf.ini"
register: fake_neutron_ml2_conf_ini
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when:
- inventory_hostname in groups['compute']
- neutron_plugin_agent == "openvswitch"

View File

@ -1,255 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
recurse: yes
when:
- item.value.enabled | bool
with_dict: "{{ neutron_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
register: neutron_config_jsons
when:
- item.value.enabled | bool
with_dict: "{{ neutron_services }}"
- name: Copying over neutron.conf
vars:
service_name: "{{ item.key }}"
services_need_neutron_conf:
- "neutron-dhcp-agent"
- "neutron-l3-agent"
- "neutron-linuxbridge-agent"
- "neutron-metadata-agent"
- "neutron-openvswitch-agent"
- "neutron-server"
- "neutron-lbaas-agent"
- "neutron-vpnaas-agent"
- "neutron-bgp-dragent"
merge_configs:
sources:
- "{{ role_path }}/templates/neutron.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/database.conf"
- "{{ node_custom_config }}/messaging.conf"
- "{{ node_custom_config }}/neutron.conf"
- "{{ node_custom_config }}/neutron/{{ item.key }}.conf"
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/neutron.conf"
register: neutron_confs
when:
- item.value.enabled | bool
- item.key in services_need_neutron_conf
with_dict: "{{ neutron_services }}"
- name: Copying over neutron_lbaas.conf
vars:
service_name: "{{ item.key }}"
services_need_neutron_lbaas_conf:
- "neutron-server"
- "neutron-lbaas-agent"
merge_configs:
sources:
- "{{ role_path }}/templates/neutron_lbaas.conf.j2"
- "{{ node_custom_config }}/neutron/neutron_lbaas.conf"
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_lbaas.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/neutron_lbaas.conf"
register: neutron_lbaas_confs
when:
- item.value.enabled | bool
- item.key in services_need_neutron_lbaas_conf
with_dict: "{{ neutron_services }}"
- name: Copying over neutron_vpnaas.conf
vars:
service_name: "neutron-server"
neutron_server: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/neutron_vpnaas.conf.j2"
- "{{ node_custom_config }}/neutron/neutron_vpnaas.conf"
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_vpnaas.conf"
dest: "{{ node_config_directory }}/{{ service_name }}/neutron_vpnaas.conf"
register: neutron_vpnaas_conf
when:
- neutron_server.enabled | bool
- name: Copying over ml2_conf.ini
vars:
service_name: "{{ item.key }}"
services_need_ml2_conf_ini:
- "neutron-dhcp-agent"
- "neutron-l3-agent"
- "neutron-linuxbridge-agent"
- "neutron-lbaas-agent"
- "neutron-metadata-agent"
- "neutron-openvswitch-agent"
- "neutron-server"
- "neutron-vpnaas-agent"
merge_configs:
sources:
- "{{ role_path }}/templates/ml2_conf.ini.j2"
- "{{ node_custom_config }}/neutron/ml2_conf.ini"
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/ml2_conf.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/ml2_conf.ini"
register: neutron_ml2_confs
when:
- item.key in services_need_ml2_conf_ini
- item.value.enabled | bool
with_dict: "{{ neutron_services }}"
- name: Copying over dhcp_agent.ini
vars:
service_name: "neutron-dhcp-agent"
neutron_dhcp_agent: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/dhcp_agent.ini.j2"
- "{{ node_custom_config }}/neutron/dhcp_agent.ini"
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/dhcp_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/dhcp_agent.ini"
register: dhcp_agent_ini
when:
- neutron_dhcp_agent.enabled | bool
- name: Copying over dnsmasq.conf
vars:
service_name: "neutron-dhcp-agent"
neutron_dhcp_agent: "{{ neutron_services[service_name] }}"
template:
src: "dnsmasq.conf.j2"
dest: "{{ node_config_directory }}/{{ service_name }}/dnsmasq.conf"
register: dnsmasq_conf
when:
- neutron_dhcp_agent.enabled | bool
- name: Copying over l3_agent.ini
vars:
service_name: "{{ item.key }}"
services_need_l3_agent_ini:
- "neutron-l3-agent"
- "neutron-vpnaas-agent"
merge_configs:
sources:
- "{{ role_path }}/templates/l3_agent.ini.j2"
- "{{ node_custom_config }}/neutron/l3_agent.ini"
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/l3_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/l3_agent.ini"
register: neutron_l3_agent_inis
when:
- item.key in services_need_l3_agent_ini
- item.value.enabled | bool
with_dict: "{{ neutron_services }}"
- name: Copying over fwaas_driver.ini
vars:
service_name: "{{ item.key }}"
services_need_fwaas_driver_ini:
- "neutron-l3-agent"
- "neutron-vpnaas-agent"
merge_configs:
sources:
- "{{ role_path }}/templates/fwaas_driver.ini.j2"
- "{{ node_custom_config }}/neutron/fwaas_driver.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/fwaas_driver.ini"
register: neutron_fwaas_driver_inis
when:
- item.key in services_need_fwaas_driver_ini
- item.value.enabled | bool
with_dict: "{{ neutron_services }}"
- name: Copying over metadata_agent.ini
vars:
service_name: "neutron-metadata-agent"
neutron_metadata_agent: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/metadata_agent.ini.j2"
- "{{ node_custom_config }}/neutron/metadata_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/metadata_agent.ini"
register: neutron_metadata_agent_ini
when:
- neutron_metadata_agent.enabled | bool
- name: Copying over lbaas_agent.ini
vars:
service_name: "neutron-lbaas-agent"
neutron_lbaas_agent: "{{ neutron_services['neutron-lbaas-agent'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/lbaas_agent.ini.j2"
- "{{ node_custom_config }}/neutron/lbaas_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/lbaas_agent.ini"
register: neutron_lbaas_agent_ini
when:
- neutron_lbaas_agent.enabled | bool
- name: Copying over vpnaas_agent.ini
vars:
service_name: "neutron-vpnaas-agent"
neutron_vpnaas_agent: "{{ neutron_services['neutron-vpnaas-agent'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/vpnaas_agent.ini.j2"
- "{{ node_custom_config }}/neutron/vpnaas_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/vpnaas_agent.ini"
register: neutron_vpnaas_agent_ini
when:
- neutron_vpnaas_agent.enabled | bool
- name: Copying over bgp_dragent.ini
vars:
service_name: "neutron-bgp-dragent"
neutron_bgp_dragent: "{{ neutron_services['neutron-bgp-dragent'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/bgp_dragent.ini.j2"
- "{{ node_custom_config }}/neutron/bgp_dragent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/bgp_dragent.ini"
register: neutron_bgp_dragent_ini
when:
- neutron_bgp_dragent.enabled | bool
- name: Check if policies shall be overwritten
local_action: stat path="{{ node_custom_config }}/neutron/policy.json"
register: neutron_policy
- name: Copying over existing policy.json
vars:
service_name: "{{ item.key }}"
services_need_policy_json:
- "neutron-dhcp-agent"
- "neutron-l3-agent"
- "neutron-linuxbridge-agent"
- "neutron-metadata-agent"
- "neutron-openvswitch-agent"
- "neutron-server"
- "neutron-lbaas-agent"
- "neutron-vpnaas-agent"
- "neutron-bgp-dragent"
template:
src: "{{ node_custom_config }}/neutron/policy.json"
dest: "{{ node_config_directory }}/{{ service_name }}/policy.json"
register: policy_jsons
when:
- neutron_policy.stat.exists | bool
- item.value.enabled | bool
with_dict: "{{ neutron_services }}"

View File

@ -1,2 +0,0 @@
---
- include: "config.yml"

View File

@ -1,3 +0,0 @@
[BGP]
bgp_speaker_driver = neutron_dynamic_routing.services.bgp.agent.driver.ryu.driver.RyuBgpDriver
bgp_router_id = {{ neutron_bgp_router_id }}

View File

@ -1,6 +0,0 @@
# dhcp_agent.ini
[DEFAULT]
dnsmasq_config_file = /etc/neutron/dnsmasq.conf
enable_isolated_metadata = true
force_metadata = true
dnsmasq_dns_servers = 8.8.8.8,8.8.4.4

View File

@ -1,8 +0,0 @@
{% if std_logger %}
#FIXME(kfox1111) This breaks things for some reason... I suspect a parent process is
#reading stdout. Come up with an alternate solution in a follow on PS for this issue.
#log-facility=-
log-facility=/var/log/kolla/neutron/dnsmasq.log
{% else %}
log-facility=/var/log/kolla/neutron/dnsmasq.log
{% endif %}

View File

@ -1 +0,0 @@
[fwaas]

View File

@ -1,16 +0,0 @@
#jinja2: trim_blocks: False
[DEFAULT]
{% if enable_neutron_dvr | bool %}
{% if inventory_hostname in groups['network'] %}
agent_mode = dvr_snat
{% elif inventory_hostname in groups['compute'] %}
agent_mode = dvr
{% endif %}
{% else %}
agent_mode = legacy
{% endif %}
{% if enable_neutron_fwaas | bool %}
[fwaas]
driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
enabled = True
{% endif %}

View File

@ -1,6 +0,0 @@
[DEFAULT]
debug = {{ neutron_logging_debug }}
device_driver = neutron_lbaas.drivers.haproxy.namespace_driver.HaproxyNSDriver
[haproxy]
user_group = haproxy

View File

@ -1,6 +0,0 @@
# metadata_agent.ini
[DEFAULT]
nova_metadata_ip = {% if orchestration_engine == 'KUBERNETES' %}nova-metadata{% else %}{{ kolla_internal_fqdn }}{% endif %}
nova_metadata_port = {{ nova_metadata_port }}
metadata_proxy_shared_secret = {{ metadata_secret }}

Some files were not shown because too many files have changed in this diff Show More