Retire this repo

The opendev project has been moving away from puppet and this is one of
the puppet modules that is no longer used. To simplify things for us we
are taking the extra step of retiring this repo.

Change-Id: I46ccab87f063c51dd3236757dbb0567738803fae
changes/08/829808/1
Clark Boylan 10 months ago
parent 2c4f42cecd
commit 86a69a5b4d
  1. 8
      .gitignore
  2. 21
      CONTRIBUTING.rst
  3. 15
      Gemfile
  4. 202
      LICENSE
  5. 20
      README.md
  6. 8
      Rakefile
  7. 11
      bindep.txt
  8. 4
      contrib/README.md
  9. 9
      contrib/hiera.yaml
  10. 11
      contrib/log_server_data.yaml
  11. 34
      contrib/log_server_site.pp
  12. 57
      contrib/single_node_ci_data.yaml
  13. 72
      contrib/single_node_ci_site.pp
  14. 245
      doc/source/conf.py
  15. 18
      doc/source/index.rst
  16. 615
      doc/source/third_party_ci.rst
  17. 2
      files/disallow_robots.txt
  18. 11
      files/log_archive_maintenance.sh
  19. 13
      files/os-loganalyze-file_conditions.yaml
  20. 58
      manifests/elasticsearch_node.pp
  21. 136
      manifests/jenkins_master.pp
  22. 233
      manifests/logserver.pp
  23. 53
      manifests/logstash.pp
  24. 67
      manifests/logstash_worker.pp
  25. 141
      manifests/nodepool.pp
  26. 131
      manifests/nodepool_builder.pp
  27. 106
      manifests/nodepool_launcher.pp
  28. 315
      manifests/single_node_ci.pp
  29. 48
      manifests/subunit_worker.pp
  30. 71
      manifests/zuul_merger.pp
  31. 113
      manifests/zuul_scheduler.pp
  32. 23
      metadata.json
  33. 23
      setup.cfg
  34. 21
      setup.py
  35. 40
      spec/acceptance/basic_spec.rb
  36. 5
      spec/acceptance/fixtures/default.pp
  37. 3
      spec/acceptance/fixtures/nodepool/builder-postconditions.pp
  38. 16
      spec/acceptance/fixtures/nodepool/builder-preconditions.pp
  39. 16
      spec/acceptance/fixtures/nodepool/builder.pp
  40. 3
      spec/acceptance/fixtures/nodepool/launcher-postconditions.pp
  41. 8
      spec/acceptance/fixtures/nodepool/launcher-preconditions.pp
  42. 17
      spec/acceptance/fixtures/nodepool/launcher.pp
  43. 47
      spec/acceptance/nodepool_builder_spec.rb
  44. 47
      spec/acceptance/nodepool_launcher_spec.rb
  45. 11
      spec/acceptance/nodesets/default.yml
  46. 10
      spec/acceptance/nodesets/nodepool-centos7.yml
  47. 10
      spec/acceptance/nodesets/nodepool-trusty.yml
  48. 10
      spec/acceptance/nodesets/nodepool-xenial.yml
  49. 14
      templates/be.certipost.hudson.plugin.SCPRepositoryPublisher.xml.erb
  50. 148
      templates/logs-dev.vhost.erb
  51. 148
      templates/logs.vhost.erb
  52. 16
      templates/os-loganalyze-wsgi.conf.erb
  53. 2
      test-requirements.txt
  54. 15
      tox.ini

8
.gitignore vendored

@ -1,8 +0,0 @@
.eggs
.idea
.tox
puppet_openstackci.egg-info
AUTHORS
ChangeLog
doc/build

@ -1,21 +0,0 @@
============
Contributing
============
If you would like to contribute to the development of OpenStack,
you must follow the steps in this page:
http://docs.openstack.org/infra/manual/developers.html
If you already have a good understanding of how the system works and your
OpenStack accounts are set up, you can skip to the development workflow section
of this documentation to learn how changes to OpenStack should be submitted for
review via the Gerrit tool:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on StoryBoard, not GitHub:
https://storyboard.openstack.org/#!/project/808

@ -1,15 +0,0 @@
source 'https://rubygems.org'
if File.exists?('/home/zuul/src/opendev.org/opendev/puppet-openstack_infra_spec_helper')
gem_checkout_method = {:path => '/home/zuul/src/opendev.org/opendev/puppet-openstack_infra_spec_helper'}
else
gem_checkout_method = {:git => 'https://opendev.org/opendev/puppet-openstack_infra_spec_helper'}
end
gem_checkout_method[:require] = false
group :development, :test, :system_tests do
gem 'puppet-openstack_infra_spec_helper',
gem_checkout_method
end
# vim:ft=ruby

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,15 +1,9 @@
# OpenStack Continuous Integration Module
This project is no longer maintained.
## Overview
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
Configures an OpenStack Continuous Integration System
## Developing
If you are adding features to this module, first ask yourself: "Does this logic
belong in the module for the service?"
An example of this is the gearman-logging.conf file needed by the zuul service.
This file should be managed by the zuul module, not managed here. What should go
in this module is high level directives and integrations such as a list of
jenkins plugins to install or a class that instantiates multiple services.
For any further questions, please email
service-discuss@lists.opendev.org or join #opendev on OFTC.

@ -1,8 +0,0 @@
require 'rubygems'
require 'puppetlabs_spec_helper/rake_tasks'
require 'puppet-lint/tasks/puppet-lint'
PuppetLint.configuration.fail_on_warnings = true
PuppetLint.configuration.send('disable_80chars')
PuppetLint.configuration.send('disable_autoloader_layout')
PuppetLint.configuration.send('disable_class_inherits_from_params_class')
PuppetLint.configuration.send('disable_class_parameter_defaults')

@ -1,11 +0,0 @@
# This is a cross-platform list tracking distribution packages needed by tests;
# see http://docs.openstack.org/infra/bindep/ for additional information.
libxml2-devel [test platform:rpm]
libxml2-dev [test platform:dpkg]
libxslt-devel [test platform:rpm]
libxslt1-dev [test platform:dpkg]
ruby-devel [test platform:rpm]
ruby-dev [test platform:dpkg]
zlib1g-dev [test platform:dpkg]
zlib-devel [test platform:rpm]

@ -1,4 +0,0 @@
# OpenStack Third-Party CI
These instructions have been moved to doc/source/third_party_ci.rst
You can also view the [published version here](http://docs.openstack.org/infra/openstackci/)

@ -1,9 +0,0 @@
---
:backends:
- yaml
:logger: console
:hierarchy:
- common
:yaml:
:datadir: /etc/puppet/environments

@ -1,11 +0,0 @@
# See parameter documetation inside ../manifests/single_node_ci.pp
# Fields commented out have reasonable default values
domain: your-domain.example.com
jenkins_ssh_public_key: your-jenkins-public-key-no-whitespace
ara_middleware: false
#swift_authurl:
#swift_user:
#swift_key:
#swift_tenant_name:
#swift_region_name:
#swift_default_container:

@ -1,34 +0,0 @@
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# A sample puppet node configuration that installs and configures a server
# that hosts log files that are viewable in a browser.
# Note that using swift is optional and the defaults provided disable its
# usage.
node default {
class { '::openstackci::logserver':
domain => hiera('domain'),
jenkins_ssh_key => hiera('jenkins_ssh_public_key'),
ara_middleware => hiera('ara_middleware', false),
swift_authurl => hiera('swift_authurl', ''),
swift_user => hiera('swift_user', ''),
swift_key => hiera('swift_key', ''),
swift_tenant_name => hiera('swift_tenant_name', ''),
swift_region_name => hiera('swift_region_name', ''),
swift_default_container => hiera('swift_default_container', ''),
}
}

@ -1,57 +0,0 @@
# See parameter documentation inside ../manifests/single_node_ci.pp
# Fields commented out have reasonable default values
#vhost_name:
project_config_repo: http://your-project-config-repo.example.com/project-config-example.git
#serveradmin:
# Jenkins version 1.651 is the last known version to work out of the box with
# zuul. However, it has numerous security vulnerabilities, and should only
# be used for installations that have other means (e.g. firewalls) that block all
# untrusted access to Jenkins.
# see SECURITY-170 in:
# "https://wiki.jenkins-ci.org/display/SECURITY/Jenkins+Security+Advisory+2016-05-11"
#jenkins_version: present
#jenkins_vhost_name: jenkins
#jenkins_username: jenkins
#jenkins_password:
jenkins_ssh_private_key: |
-----BEGIN RSA PRIVATE KEY-----
Insert jenkins private key here
-----END RSA PRIVATE KEY-----
jenkins_ssh_public_key: your-jenkins-public-key-no-whitespace
jjb_git_revision: 1.6.2
#java_args_override: |
# These are the arguments to pass to Java
# The recommended value:
# "-Xloggc:/var/log/jenkins/gc.log -XX:+PrintGCDetails -Xmx12g -Dorg.kohsuke.stapler.compression.CompressionFilter.disabled=true -Djava.util.logging.config.file=/var/lib/jenkins/logger.conf -Dhudson.model.ParametersAction.keepUndefinedParameters=true"
# Please note that using the parameter: keepUndefinedParameters=true is not secure and exposes a potential jenkins security vulnerability
#jjb_git_url: https://git.openstack.org/openstack-infra/jenkins-job-builder
#gerrit_server: review.openstack.org
#gerrit_ssh_host_key:
gerrit_user: your-gerrit-user
gerrit_user_ssh_private_key: |
-----BEGIN RSA PRIVATE KEY-----
Insert gerrit private key here
-----END RSA PRIVATE KEY-----
gerrit_user_ssh_public_key: gerrit-public-key-no-whitespace
git_email: your-email@example.com
git_name: Your Name
log_server: logs.example.com
#smtp_host:
#smtp_default_from:
#smtp_default_to:
zuul_revision: 2.5.1
#zuul_git_source_repo: https://git.openstack.org/openstack-infra/zuul
oscc_file_contents: |
# Insert OSCC file contents here as explained in the
# documentation so that nodepool is able to
# authenticate to your cloud(s)
#
mysql_root_password: secret_mysql_root_password
mysql_nodepool_password: secret_mysql_nodepool_password
#nodepool_jenkins_target: jenkins1
#jenkins_api_key:
#jenkins_credentials_id:
nodepool_revision: 0.3.1
#nodepool_git_source_repo: https://git.openstack.org/openstack-infra/nodepool

@ -1,72 +0,0 @@
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# A sample puppet node configuration that installs and configures Jenkins,
# Zuul, Nodepool, Jenkins Job Builder, onto a single VM using the
# specified project-config repository and other configurations stored in hiera.
# Zuul status page will be available on port 80
# Jenkins UI will be available on port 8080
# Default values are provided where reasonable options are available assuming
# use of the review.openstack.org Gerrit server and for an unsecured Jenkins.
# All others must be provided by hiera. See the related single_node_ci_hiera.yaml
# which includes all optional and required parameters.
node default {
# If the fqdn is not resolvable, use its ip address
$vhost_name = hiera('vhost_name', $::fqdn)
class { '::openstackci::single_node_ci':
vhost_name => $vhost_name,
project_config_repo => hiera('project_config_repo'),
serveradmin => hiera('serveradmin', "webmaster@${vhost_name}"),
jenkins_version => hiera('jenkins_version', 'present'),
jenkins_vhost_name => hiera('jenkins_vhost_name', 'jenkins'),
jenkins_username => hiera('jenkins_username', 'jenkins'),
jenkins_password => hiera('jenkins_password', 'XXX'),
jenkins_ssh_private_key => hiera('jenkins_ssh_private_key'),
jenkins_ssh_public_key => hiera('jenkins_ssh_public_key'),
java_args_override => hiera('java_args_override', undef),
gerrit_server => hiera('gerrit_server', 'review.openstack.org'),
gerrit_user => hiera('gerrit_user'),
gerrit_user_ssh_public_key => hiera('gerrit_user_ssh_public_key'),
gerrit_user_ssh_private_key => hiera('gerrit_user_ssh_private_key'),
gerrit_ssh_host_key => hiera('gerrit_ssh_host_key',
'[review.openstack.org]:29418,[104.130.246.91]:29418,[2001:4800:7819:103:be76:4eff:fe05:8525]:29418 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfsIj/jqpI+2CFdjCL6kOiqdORWvxQ2sQbCzSzzmLXic8yVhCCbwarkvEpfUOHG4eyB0vqVZfMffxf0Yy3qjURrsroBCiuJ8GdiAcGdfYwHNfBI0cR6kydBZL537YDasIk0Z3ILzhwf7474LmkVzS7V2tMTb4ZiBS/jUeiHsVp88FZhIBkyhlb/awAGcUxT5U4QBXCAmerYXeB47FPuz9JFOVyF08LzH9JRe9tfXtqaCNhlSdRe/2pPRvn2EIhn5uHWwATACG9MBdrK8xv8LqPOik2w1JkgLWyBj11vDd5I3IjrmREGw8dqImqp0r6MD8rxqADlc1elfDIXYsy+TVH'),
git_email => hiera('git_email'),
git_name => hiera('git_name'),
log_server => hiera('log_server'),
smtp_host => hiera('smtp_host', 'localhost'),
smtp_default_from => hiera('smtp_default_from', "zuul@${vhost_name}"),
smtp_default_to => hiera('smtp_default_to', "zuul.reports@${vhost_name}"),
zuulv2 => hiera('zuulv2', true),
zuul_revision => hiera('zuul_revision', 'master'),
zuul_git_source_repo => hiera('zuul_git_source_repo',
'https://git.openstack.org/openstack-infra/zuul'),
oscc_file_contents => hiera('oscc_file_contents', ''),
mysql_root_password => hiera('mysql_root_password'),
mysql_nodepool_password => hiera('mysql_nodepool_password'),
nodepool_jenkins_target => hiera('nodepool_jenkins_target', 'jenkins1'),
jenkins_api_key => hiera('jenkins_api_key', 'XXX'),
jenkins_credentials_id => hiera('jenkins_credentials_id', 'XXX'),
nodepool_revision => hiera('nodepool_revision', 'master'),
nodepool_git_source_repo => hiera('nodepool_git_source_repo',
'https://git.openstack.org/openstack-infra/nodepool'),
jjb_git_revision => hiera('jjb_git_revision', '1.6.2'),
jjb_git_url => hiera('jjb_git_url',
'https://git.openstack.org/openstack-infra/jenkins-job-builder'),
}
}

@ -1,245 +0,0 @@
# -*- coding: utf-8 -*-
#
# OpenStack CI Puppet Module documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 17 16:04:57 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'oslo.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenStack CI Puppet Module'
copyright = u'2015, OpenStack Infrastructure Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCIPuppetModuledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenStackCIPuppetModule.tex', u'OpenStack CI Puppet Module Documentation',
u'OpenStack Infrastructure Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openstackcipuppetmodule', u'OpenStack CI Puppet Module Documentation',
[u'OpenStack Infrastructure Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenStackCIPuppetModule', u'OpenStack CI Puppet Module Documentation',
u'OpenStack Infrastructure Team', 'OpenStackCIPuppetModule', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

@ -1,18 +0,0 @@
Welcome to OpenStack CI Puppet Module's documentation!
======================================================
The purpose of this module is to help others create a
continuous integration (CI) environment as used by the
OpenStack Infrastructure Team.
.. toctree::
:maxdepth: 2
third_party_ci
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

@ -1,615 +0,0 @@
OpenStack Third-Party CI
========================
.. warning::
The documentation here has been maintained by Third Party CI operators.
Unfortunately over time it has grown old and is no longer up to date.
If you are running a Third Party CI system and would like to help produce
an up to date document, this
`spec <https://specs.openstack.org/openstack-infra/infra-specs/specs/zuulv3-3rd-party-ci.html>`_
aims to provide guidance on how to do that.
As an alternative you may try to use Zuul's `documentation
<https://zuul-ci.org/docs/zuul/admin/index.html>`_ to spin up a working
CI system.
These instructions provide a **Third Party Testing** solution using the
same tools and scripts used by the OpenStack Infrastructure 'Jenkins' CI
system.
If you are setting up a similar system for use outside of OpenStack,
many of these steps are still valid, while others can be skipped. These
will be mentioned within each step.
If you are creating a third-party CI system for use within OpenStack,
you'll need to familiarize yourself with the contents of the `third
party
manual <http://docs.openstack.org/infra/system-config/third_party.html>`__,
and in particular you'll need to [create a service account]
(http://docs.openstack.org/infra/system-config/third\_party.html#creating-a-service-account).
Overview
--------
This CI solution uses a few open-source tools:
- `Jenkins <http://docs.openstack.org/infra/system-config/jenkins.html>`__
- an open-source continuous integration server.
- `Zuul <http://docs.openstack.org/infra/system-config/zuul.html>`__ -
a project gating system
- `Nodepool <http://docs.openstack.org/infra/system-config/nodepool.html>`__-
a node management system for testing
- `Jenkins Job
Builder <http://docs.openstack.org/infra/system-config/jjb.html>`__ -
a tool to manage jenkins job definitions
- `os-loganalyze <http://git.openstack.org/cgit/openstack-infra/os-loganalyze/>`__
- a tool to facilitate browsing, sharing, and filtering log files by
log level.
The following steps will help you integrate and deploy the first 4 tools
on a single node. An initial system with 8GB RAM, 4CPUs, 80GB HD should
be sufficient, running Ubuntu 14.04.
A second node will be used to store the log files and create a public
log server to host the static log files generated by jenkins jobs. This
log server node is an Apache server serving log files stored on disk or
on a Swift service. It is hosted on a separate node because it usually
needs to be publicly accessible to share job results whereas the rest of
the CI system can be located behind a firewall or within a VPN. At the
end of a Jenkins Job, ``publishers`` will scp log files from the jenkins
slave to the log server node or upload to the Swift Service.
The system requires two external resources:
- A source for Nodepool nodes. This is a service that implements the
OpenStack Nova API to provide virtual machines or bare metal nodes.
Nodepool will use this service to manage a pool of Jenkins slaves
that will run the actual CI jobs. You can use a public or private
OpenStack cloud, or even run your own
`devstack <https://git.openstack.org/cgit/openstack-dev/devstack/>`__
to get started.
- A Gerrit server (for OpenStack users, this is provided to you at
review.openstack.org) Zuul will listen to the Gerrit event stream to
decide which jobs to run when it receives a desired event. Zuul will
also post a comment with results to this Gerrit server with the job
results along with a link to the related log files.
These instructions are for a 'masterless' puppet setup, which is the
simplest version to set up for those not familiar with puppet.
Install and Configure Puppet
----------------------------
On each node, you will need to install and configure puppet. These
scripts assume a dedicated 'clean' node built with a base `ubuntu 14.04
server image <http://www.ubuntu.com/download/server>`__.
Install Puppet
~~~~~~~~~~~~~~
Puppet is a tool to automate the installation of servers by defining the
desired end state. You can quickly install puppet along with basic tools
(such as pip and git) using this script:
::
sudo su -
wget https://git.openstack.org/cgit/openstack-infra/system-config/plain/install_puppet.sh
bash install_puppet.sh
exit
Install Puppet Modules
~~~~~~~~~~~~~~~~~~~~~~
You can get the latest version of the puppet modules needed using this
script.
::
sudo su -
git clone https://git.openstack.org/openstack-infra/system-config
cd system-config
./install_modules.sh
exit
This script will install all the puppet modules used by upstream to
``/etc/puppet/modules``. In many cases, these are git cloned, and
running the ``install_modules.sh`` script again will update them to the
latest version. This script uses ``modules.env`` as its configuration
input.
Configure Masterless Puppet
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The instructions in this section apply to both the single-node CI server
node as well as the log server node.
It is useful to save the history, so set up a git repo as root user:
::
sudo su -
cd /etc/puppet
git init
echo "modules/" >> .gitignore
git add .
git config --global user.email "you@example.com"
git config --global user.name "Your Name"
git commit -m "initial files"
exit
You will be configuring 3 puppet files. The first is ``site.pp`` which
is the top level entry point for puppet to start managing the node. The
second is a ``hiera.yaml`` which configures Puppet Hiera to store local
configurations and secrets such as passwords and private keys, and
finally some ``yaml`` files which store the actual configurations and
secrets.
Set up these 3 files by starting with the samples provided. For each
node, select the corresponding ``single_node_ci*`` or ``log_server*``
files.
Configure Puppet ``hiera.yaml`` so that puppet knows where to look for the
``common.yaml`` file you'll create in the next step.
::
sudo su -
cp /etc/puppet/modules/openstackci/contrib/hiera.yaml /etc/puppet
exit
If setting up the ``single node ci`` node:
::
sudo su -
cp /etc/puppet/modules/openstackci/contrib/single_node_ci_site.pp /etc/puppet/manifests/site.pp
cp /etc/puppet/modules/openstackci/contrib/single_node_ci_data.yaml /etc/puppet/environments/common.yaml
exit
If setting up the ``log server`` node:
::
sudo su -
cp /etc/puppet/modules/openstackci/contrib/log_server_site.pp /etc/puppet/manifests/site.pp
cp /etc/puppet/modules/openstackci/contrib/log_server_data.yaml /etc/puppet/environments/common.yaml
exit
Modify ``/etc/puppet/environments/common.yaml`` as you need using
the parameter documentation described in
`single\_node\_ci.pp <http://git.openstack.org/cgit/openstack-infra/puppet-openstackci/tree/manifests/single_node_ci.pp>`__
or
`logserver.pp <http://git.openstack .org/cgit/openstack-infra/puppet-openstackci/tree/manifests/logserver.pp>`__.
These are the top level puppet class that is used in ``site.pp``.
One parameter called ``project_config_repo`` is necessary to be set
into ``/etc/puppet/environments/common.yaml``.
You need to configure this parameter with the URL of the 'project-config'
repository which you will create in the step
`Create an Initial 'project-config' Repository`_ below.
Once completed, you should commit these 3 files to the ``/etc/puppet``
git repo. Your git workflow may vary a bit, but here is an example:
::
sudo su -
cd /etc/puppet
git checkout -b setup
git add environments/common.yaml
# repeat for other modified files
git commit -a -m 'initial setup'
exit
Set up the log server
=====================
Set up the log server node first as it is simpler to configure. Besides,
its FQDN (or IP address) is needed to set up the CI server node.
While setting up jenkins\_ssh\_public\_key in ``common.yaml`` it is
important that the same ssh key pair is used when setting up the CI
server node in the next step. This is the ssh key that Jenkins will use
to scp files.
At this point you are ready to invoke Puppet for the first time. Puppet
needs to be run as root.
::
sudo puppet apply --verbose /etc/puppet/manifests/site.pp
You can simulate a jenkins file upload using:
::
scp -i $JENKINS_SSH_PRIVATE_KEY_FILE -o StrictHostKeyChecking=no $your-log-file jenkins@<fqdn_or_ip>:/srv/static/logs/
You should now be able to see the file you uploaded at
``http://<fqnd_or_ip>/$your-log-file``
Set up the CI server
====================
Follow the steps above to install and configure puppet on the CI server
node.
Create an Initial 'project-config' Repository
---------------------------------------------
Setting up a CI system consists of two major operational aspects. The
first is system configuration, which focuses on the installation and
deployment of the services, including any ssh keys, credentials,
databases, etc., and ensure all system components are able to interact
together. This portion is performed by a System Administrator.
The second is project configuration, which includes the configuration
files that the services use to perform the desired project-specific
operations.
The instructions provided here are mainly focused on the system
configuration aspect. However, system configuration requires an initial
set of project configurations in order to work. These project
configurations are provided via a git URL to a ``project-config``
repository. Before moving on, create an initial ``project-config``
repository. You can start with this
`project-config-example <https://git.openstack.org/cgit/openstack-infra/project-config-example/>`__
following the instructions provided in its README.md. While tailored for
OpenStack users, the instructions provided will help non-OpenStack users
also start with this repository. After your system is deployed, you can
make further changes to the ``project-config`` repository to
continuously tailor it to your needs.
Add 'jenkins' to your host name
-------------------------------
Add 'jenkins' to your /etc/hosts file so that Apache (which will be
installed by the puppet scripts) is happy. This is needed because the
scripts will install multiple services on a single node. For example:
::
head -n 1 /etc/hosts
127.0.0.1 localhost jenkins
Run masterless Puppet
---------------------
At this point you are ready to invoke Puppet for the first time. Puppet
needs to be run as root.
::
sudo puppet apply --verbose /etc/puppet/manifests/site.pp
Puppet will install nodepool, jenkins, zuul, jenkins jobs builder, etc.
Your ``project-config`` repository will be cloned to
/etc/project-config, and the puppet scripts will use these configuration
files located in this folder. Do not update these files directly.
Instead, you should update them from a clone on a dev host, merge the
changes to master, and push them to the same git remote location. Puppet
will always pull down the latest version of master from the git remote
and use that to update services.
If you get the following error, manually run the failed
``jenkins-jobs update`` command with the arguments specified in the
error message as root. This is caused by a bug in the puppet scripts
where Jenkins is not yet running when Jenkins Job Builder attempts to
load the Jenkins jobs.
::
Notice: /Stage[main]/Jenkins::Job_builder/Exec[jenkins_jobs_update]/returns: jenkins.JenkinsException: Error in request: [Errno 111] Connection refused
Notice: /Stage[main]/Jenkins::Job_builder/Exec[jenkins_jobs_update]/returns: INFO:jenkins_jobs.builder:Cache saved
Error: /Stage[main]/Jenkins::Job_builder/Exec[jenkins_jobs_update]: Failed to call refresh: jenkins-jobs update --delete-old /etc/jenkins_jobs/config returned 1 instead of one of [0]
Error: /Stage[main]/Jenkins::Job_builder/Exec[jenkins_jobs_update]: jenkins-jobs update --delete-old /etc/jenkins_jobs/config returned 1 instead of one of [0]
Restart apache if necessary
---------------------------
There are some known issues with Puppet automation. If you get the
following error:
::
AH00526: Syntax error on line 21 of /etc/apache2/sites-enabled/50-<fqdn/ip>.conf:
Invalid command 'RewriteEngine', perhaps misspelled or defined by a module not included in the server configuration
A simple restart works around the issue:
::
sudo service apache2 restart
Start zuul
----------
We'll start zuul first:
::
sudo service zuul start
sudo service zuul-merger start
You should see 2 zuul-server processes and 1 zuul-merger process
::
ps -ef | grep zuul
zuul 5722 1 2 18:13 ? 00:00:00 /usr/bin/python /usr/local/bin/zuul-server
zuul 5725 5722 0 18:13 ? 00:00:00 /usr/bin/python /usr/local/bin/zuul-server
zuul 5741 1 2 18:13 ? 00:00:00 /usr/bin/python /usr/local/bin/zuul-merger
You can view the log files for any errors:
::
view /var/log/zuul/zuul.log
Most zuul files are located in either of the following directories. They
should not need to be modified directly, but are useful to help identify
root causes:
::
/var/lib/zuul
/etc/zuul
Start nodepool
--------------
The first time starting nodepool, it's recommended to manually build the
image to aid in debugging any issues. To do that, first, initiate the
nodepool-builder service:
::
sudo service nodepool-builder start
The nodepool-builder service is responsible for receiving image building
requests and calling Disk Image Builder to carry on the image creation.
You can see its logs by typing:
::
view /var/log/nodepool/nodepool-builder.log
Next, log into the nodepool user to issue manually the image building:
::
sudo su - nodepool
# Ensure the NODEPOOL_SSH_KEY variable is in the environment
# Otherwise nodepool won't be able to ssh into nodes based
# on the image built manually using these instructions
source /etc/default/nodepool
# In the command below <image-name> references one of the
# images defined in your project-config/nodepool/nodepool.yaml
# file as the 'name' field in the section 'diskimages'.
nodepool image-build <image-name>
You can follow the image creation process by seeing the image creation
log:
::
tail -f /var/log/nodepool/image/image.log
If you run into issues building the image, the `documentation provided
here can help you
debug <https://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/README.rst>`__
After you have successfully built an image, manually upload it to the
provider to ensure provider authentication and image uploading work:
::
nodepool image-upload all <image-name>
Once successful, you can start nodepool. (Note that if you don't yet
have an image, this is one of the first actions nodepool will do when it
starts, before creating any nodes):
::
sudo service nodepool start
You should see at least one process running. In particular:
::
ps -ef | grep nodepool
nodepool 5786 1 28 18:14 ? 00:00:01 /usr/bin/python /usr/local/bin/nodepoold -c /etc/nodepool/nodepool.yaml -l /etc/nodepool/logging.conf
After building and uploading the images to the providers, nodepool will
start to build nodes on those providers based on the image and will
register those nodes as jenkins slaves.
If that does not happen, the nodepool log files will help identify the
causes.
::
view /var/log/nodepool/nodepool.log
view /var/log/nodepool/debug.log
Most nodepool configuration files are located in either of the following
directories. They should never to be modified directly as puppet will
overwrite any changes, but are useful to help identify root causes:
::
/etc/nodepool
/home/nodepool/.config/openstack/clouds.yaml
Setup Jenkins
-------------
First Restart Jenkins so that plugins will be fully installed:
::
sudo service jenkins restart
Then open the Jenkins UI to finish manual configuration steps.
Enable Gearman, which is the Jenkins plugin zuul uses to queue jobs:
::
http://<host fqdn/ip>:8080/
Manage Jenkins --> Configure System
For "Gearman Server Port" use port number 4730
Under "Gearman Plugin Config" Check the box "Enable Gearman"
Click "Test Connection" It should return success if zuul is running.
The zuul process is running a gearman server on port 4730. To check the status
of gearman: on your zuul node telnet to 127.0.0.1 port 4730, and issue the
command ``status`` to get status information about the jobs registered in
gearman.
::
echo 'status' | nc 127.0.0.1 4730 -w 1
The output of the ``status`` command contains tab separated columns with the
following information.
1. Name: The name of the job.
2. Number in queue: The total number of jobs in the queue including the
currently running ones (next column).
3. Number of jobs running: The total number of jobs currently running.
4. Number of capable workers: A maximum possible count of workers that can run
this job. This number being zero is one reason zuul reports "NOT Registered".
::
build:noop-check-communication 1 0 1
build:dsvm-tempest-full 2 1 1
Enable ZMQ Event Publisher, which is how nodepool is notified of Jenkin
slaves status events:
::
http://<host fqdn/ip>:8080/
Manage Jenkins --> Configure System
Under "ZMQ Event Publisher"
Check the box "Enable on all Jobs"
Securing Jenkins (optional)
---------------------------
By default, Jenkins is installed with security disabled. While this is
fine for development environments where external access to Jenkins UI is
restricted, you are strongly encouraged to enable it. You can skip this
step and do it at a later time if you wish:
Create a jenkins 'credentials':
::
http://<host fqdn/ip>:8080/
Manage Jenkins --> Add Credentials --> SSH Username with private key
Username 'jenkins'
Private key --> From a file on Jenkins master
"/var/lib/jenkins/.ssh/id_rsa"
--> Save
Save the credential uuid in your hiera data:
::
sudo su jenkins
cat /var/lib/jenkins/credentials.xml | grep "<id>"
Copy the id to the 'jenkins_credentials_id' value in /etc/puppet/environments/common.yaml
Enable basic Jenkins security:
::
http://<host fqdn/ip>:8080/
Manage Jenkins --> Configure Global Security
Check "Enable Security"
Under "Security Realm"
Select Jenkin's own user database
Uncheck allow users to sign up
Under "Authorization" select "logged-in users can do anything"
Create a user 'jenkins'
Choose a password.
check 'Sign up'
Save the password to the 'jenkins_password' value in /etc/puppet/environments/common.yaml
Get the new 'jenkins' user API token:
::
http://<host fqdn/ip>:8080/
Manage Jenkins --> People --> Select user 'jenkins' --> configure --> Show API Token
Save this token to the 'jenkins_api_key' value in /etc/puppet/environments/common.yaml
Reconfigure your system to use Jenkins security settings stored in
``/etc/puppet/environments/common.yaml``
::
sudo puppet apply --verbose /etc/puppet/manifests/site.pp
Configuring Jenkins Plugins (recommended)
-----------------------------------------
single-use slave:
This plugin will mark nodes as offline when a job completes on them.
This plugin is intended to be used with external tools like Nodepool,
which has the ability to spin up slaves on demand and then reap them when
Jenkins has run a job on them. This plugin is needed because there is a race
condition between when the job completes and when the external tool is able
to reap the node.
Labels can be taken from the project-config/nodepool/nodepool.yaml file
under section "labels".
::
http://<host fqdn/ip>:8080/
Manage Jenkins --> Configure System
Under "Single Use Slaves"
Add comma seperated labels
Updating your masterless puppet hosts
=====================================
Any time you check-in changes to your ``project-config`` repo, make
changes to the hiera data (``/etc/puppet/environments/common.yaml``), or
update the puppet files (in /etc/puppet/modules, either manually or via
the ``install_modules.sh`` script), run the same puppet command to
update the host.
::
sudo puppet apply --verbose /etc/puppet/manifests/site.pp
If you need to change the git url in your ``project-config`` or any
other git urls in your ``common.yaml``, delete the respective
repository, e.g. ``/etc/project-config``, and puppet will reclone it
from the new location when the above ``puppet apply`` command is
reinvoked.
Note that it is safe, and expected, to rerun the above ``puppet apply``
command. Puppet will update the configuration of the host as described
in the puppet classes. This means that if you delete or modify any files
managed by puppet, rerunning the ``puppet apply`` command will restore
those settings back to the specified state (and remove your local
changes for better or worse). You could even run the ``puppet apply``
command as a cron job to enable continuous deployment in your CI system.

@ -1,2 +0,0 @@
User-agent: *
Disallow: /

@ -1,11 +0,0 @@
#!/bin/sh
sleep $((RANDOM%600)) && \
flock -n /var/run/gziplogs.lock \
find -O3 /srv/static/logs/ -depth -not -name lost+found \
-not -wholename /srv/static/logs/help/\* \
-not -wholename /srv/static/logs/robots.txt \( \
\( -type f -mmin +10 -not -name \*\[.-\]gz -not -name \*\[._-\]\[zZ\] \
\( -name \*.txt -or -name \*.html -or -name tmp\* \) \
-exec gzip \{\} \; \) \
-o \( \( -type f -o -type l \) -mtime +30 -execdir rm \{\} \; \) \
-o \( -type d -empty -mtime +1 -execdir rmdir {} \; \) \)

@ -1,13 +0,0 @@
conditions:
- filename_pattern: ^.*\.txt(\.gz)?$
filter: SevFilter
view: HTMLView
- filename_pattern: ^.*\.log(\.gz)?$
filter: SevFilter
view: HTMLView
- filename_pattern: ^.*console\.html(\.gz)?$
filter: SevFilter
view: HTMLView
- filename_pattern: ^.*$
filter: NoFilter
view: PassthroughView

@ -1,58 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: openstackci::elasticsearch_node
# Elasticsearch server glue class.
#
class openstackci::elasticsearch_node (
$discover_nodes = ['localhost'],
$es_heap_size = '30g',
$es_version = '1.7.3',
$es_gw_recover_after_nodes = '5',
$es_gw_recover_after_time = '5m',
$es_gw_expected_nodes = '6',
$es_discovery_min_master_nodes = '4',
$es_indices_cleanup_hour = '2',
$es_indices_cleanup_minute = '0',
$es_indices_cleanup_period = '10 days ago',
) {
class { '::logstash::elasticsearch': }
class { '::elasticsearch':
es_template_config => {
'index.store.compress.stored' => true,
'index.store.compress.tv' => true,
'indices.memory.index_buffer_size' => '33%',
'indices.breaker.fielddata.limit' => '70%',
'bootstrap.mlockall' => true,
'gateway.recover_after_nodes' => $es_gw_recover_after_nodes,
'gateway.recover_after_time' => $es_gw_recover_after_time,
'gateway.expected_nodes' => $es_gw_expected_nodes,
'discovery.zen.minimum_master_nodes' => $es_discovery_min_master_nodes,
'discovery.zen.ping.multicast.enabled' => false,
'discovery.zen.ping.unicast.hosts' => $discover_nodes,
'http.cors.enabled' => true,
'http.cors.allow-origin' => "'*'", # lint:ignore:double_quoted_strings
},
heap_size => $es_heap_size,
version => $es_version,
}
class { '::logstash::curator':
keep_for_days => '10',
}
}

@ -1,136 +0,0 @@
# == Class: openstackci::jenkins_master
#
class openstackci::jenkins_master (
$serveradmin,
$jenkins_password,
$jenkins_username = 'jenkins',
$vhost_name = $::fqdn,
$logo = '', # Logo must be present in puppet-jenkins/files
$ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem',
$ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key',
$ssl_chain_file = '',
$ssl_cert_file_contents = '',
$ssl_key_file_contents = '',
$ssl_chain_file_contents = '',
$jenkins_ssh_private_key = '',
$jenkins_ssh_public_key = '',
$jenkins_version = 'present',
$manage_jenkins_jobs = false,
$jenkins_url = 'http://localhost:8080',
$java_args_override = undef,
$jjb_update_timeout = 1200,
$jjb_git_url = 'https://git.openstack.org/openstack-infra/jenkins-job-builder',
$jjb_git_revision = 'master',
$project_config_repo = '',
$project_config_base = '',
$log_server = undef,
) {
class { '::jenkins::master':
vhost_name => $vhost_name,
serveradmin => $serveradmin,
logo => $logo,
ssl_cert_file => $ssl_cert_file,
ssl_key_file => $ssl_key_file,
ssl_chain_file => $ssl_chain_file,
ssl_cert_file_contents => $ssl_cert_file_contents,
ssl_key_file_contents => $ssl_key_file_contents,
ssl_chain_file_contents => $ssl_chain_file_contents,
jenkins_ssh_private_key => $jenkins_ssh_private_key,
jenkins_ssh_public_key => $jenkins_ssh_public_key,
jenkins_version => $jenkins_version,
java_args_override => $java_args_override,
}
jenkins::plugin { 'build-timeout':
version => '1.14',
}
jenkins::plugin { 'copyartifact':
version => '1.22',
}
jenkins::plugin { 'dashboard-view':
version => '2.3',
}
jenkins::plugin { 'gearman-plugin':
version => '0.1.1',
}
jenkins::plugin { 'git':
version => '1.1.23',
}
jenkins::plugin { 'greenballs':
version => '1.12',
}
jenkins::plugin { 'extended-read-permission':
version => '1.0',
}
jenkins::plugin { 'zmq-event-publisher':
version => '0.0.3',
}
jenkins::plugin { 'scp':
version => '1.9',