Fork openstack-ci to devstack gate.

Remove everything but the devstack gate scripts.
This commit is contained in:
James E. Blair
2012-03-02 12:55:20 -08:00
parent 63bf7c14e8
commit 9b283c072e
61 changed files with 4 additions and 5366 deletions

View File

@@ -1,32 +0,0 @@
[Tarmac]
rejected_branch_status = Work in progress
[lp:nova]
verify_command=/var/lib/jenkins/test_nova.sh
[lp:~hudson-openstack/nova/milestone-proposed]
verify_command=/var/lib/jenkins/test_nova.sh
[lp:openstack-dashboard]
verify_command=bash run_tests.sh
[lp:glance]
verify_command=python setup.py test
[lp:~hudson-openstack/glance/milestone-proposed]
verify_command=python setup.py test
[lp:swift]
verify_command=python setup.py test
[lp:swift/1.1]
verify_command=python setup.py test
[lp:swift/1.2]
verify_command=python setup.py test
[lp:~hudson-openstack/swift/milestone-proposed]
verify_command=python setup.py test
[lp:burrow]
verify_command=python setup.py test

View File

@@ -1,4 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/openstack-ci.git
project=openstack-ci/devstack-gate.git

View File

@@ -1,11 +1,3 @@
This repo contains scripts used by the OpenStack Jenkins to perform tasks,
the tarmac config used by Jenkins, as well as a a few scripts for creating
new Jenkins slaves and additional utility nodes.
launch_node.py will launch a base node and add user accounts for everyone in
the ~openstack-admins team
launch_slave.py will launch a node with everything it needs to perform basic
jenkins slave tasks
Devstack-gate is a collection of scripts used by the OpenStack CI team
to test every change to core OpenStack projects by deploying OpenStack
via devstack on a cloud server.

View File

@@ -1,89 +0,0 @@
#!/bin/sh
set -e
if [ -z "$PROJECT" ]
then
echo '$PROJECT not set.'
exit 1
fi
HUDSON=http://localhost:8080/
VERSIONDIR=$HOME/versions
PKGRECORDFILE=$VERSIONDIR/binpkgversions
JENKINS_TARBALL_JOB=${JENKINS_TARBALL_JOB:-$PROJECT-tarball}
BZR_BRANCH=${BZR_BRANCH:-lp:~openstack-ubuntu-packagers/$PROJECT/ubuntu}
PPAS=${PPAS:-ppa:$PROJECT-core/trunk}
PACKAGING_REVNO=${PACKAGING_REVNO:--1}
if [ ! -d "$VERSIONDIR" ]
then
bzr co bzr://jenkins.openstack.org/ "$VERSIONDIR"
else
( cd $VERSIONDIR ; bzr up )
fi
# Clean up after previous build
rm -rf build dist.zip
mkdir build
# Grab the most recently built artifacts
wget $HUDSON/job/${JENKINS_TARBALL_JOB}/lastBuild/artifact/dist/*zip*/dist.zip
# Shove them in build/
unzip dist.zip -d build
cd build
tarball="$(echo dist/$PROJECT*.tar.gz)"
version="${tarball%.tar.gz}"
version="${version#*$PROJECT-}"
if [ -n "${EXTRAVERSION}" ]
then
version="${version%~*}${EXTRAVERSION}~${version#*~}"
fi
tar xvzf "${tarball}"
echo ln -s "${tarball}" "${PROJECT}_${version}.orig.tar.gz"
ln -s "${tarball}" "${PROJECT}_${version}.orig.tar.gz"
# Overlay packaging
# (Intentionally using the natty branch. For these PPA builds, we don't need to diverge
# (yet, at least), so it makes the branch management easier this way.
# Note: Doing a checkout and deleting .bzr afterwards instead of just doing an export,
# because export refuses to overlay over an existing directory, so this was easier.
# (We need to not have the .bzr in there, otherwise vcsversion.py might get overwritten)
bzr checkout -r ${PACKAGING_REVNO} --lightweight $BZR_BRANCH $PROJECT-*
cd $PROJECT-*
PACKAGING_REVNO="$(bzr revno --tree)"
rm -rf .bzr
# Please don't change this. It's the only way I'll get notified
# if an upload fails.
export DEBFULLNAME="Soren Hansen"
export DEBEMAIL="soren@openstack.org"
buildno=1
while true
do
pkgversion="${version}-0ubuntu0ppa1~${buildno}"
if grep "$PROJECT $pkgversion" "$PKGRECORDFILE"
then
echo "We've already built a $pkgversion of $PROJECT. Incrementing build number."
buildno=$(($buildno + 1))
else
echo "$PROJECT $pkgversion" >> "$PKGRECORDFILE"
cat "$PKGRECORDFILE" | sort > "$PKGRECORDFILE"
( cd $VERSIONDIR ;
bzr up ;
bzr commit -m"Added $PROJECT $snapshotversion" )
break
fi
done
# Doing this in here so that we have buildno
server_name=${PACKAGE}-`echo ${pkgversion} | sed 's/\~//g'`
echo "Launching a Cloud Server"
python ${HOME}/launch_node.py ${server_name}
cp node.sh ..
dch -b --force-distribution --v "${pkgversion}" "Automated PPA build. Packaging revision: ${PACKAGING_REVNO}." -D maverick
dpkg-buildpackage -rfakeroot -sa -k32EE128C
cd ..

View File

@@ -1,97 +0,0 @@
default namespace = "https://launchpad.net/xmlns/2006/bugs"
start = lpbugs
# Data types
boolean = "True" | "False"
lpname = xsd:string { pattern = "[a-z0-9][a-z0-9\+\.\-]*" }
cvename = xsd:string { pattern = "(19|20)[0-9][0-9]-[0-9][0-9][0-9][0-9]" }
# XXX: jamesh 2006-04-11 bug=105401:
# These status and importance values need to be kept in sync with the
# rest of Launchpad. However, there are not yet any tests for this.
# https://bugs.launchpad.net/bugs/105401
status = (
"NEW" |
"INCOMPLETE" |
"INVALID" |
"WONTFIX" |
"CONFIRMED" |
"TRIAGED" |
"INPROGRESS" |
"FIXCOMMITTED" |
"FIXRELEASED" |
"UNKNOWN")
importance = (
"UNKNOWN" |
"CRITICAL" |
"HIGH" |
"MEDIUM" |
"LOW" |
"WISHLIST" |
"UNDECIDED")
# Content model for a person element. The element content is the
# person's name. For successful bug import, an email address must be
# provided.
person = (
attribute name { lpname }?,
attribute email { text }?,
text)
lpbugs = element launchpad-bugs { bug* }
bug = element bug {
attribute id { xsd:integer } &
element private { boolean }? &
element security_related { boolean }? &
element duplicateof { xsd:integer }? &
element datecreated { xsd:dateTime } &
element nickname { lpname }? &
# The following will likely be renamed summary in a future version.
element title { text } &
element description { text } &
element reporter { person } &
element status { status } &
element importance { importance } &
element milestone { lpname }? &
element assignee { person }? &
element urls {
element url { attribute href { xsd:anyURI }, text }*
}? &
element cves {
element cve { cvename }*
}? &
element tags {
element tag { lpname }*
}? &
element bugwatches {
element bugwatch { attribute href { xsd:anyURI } }*
}? &
element subscriptions {
element subscriber { person }*
}? &
comment+
}
# A bug has one or more comments. The first comment duplicates the
# reporter, datecreated, title, description of the bug.
comment = element comment {
element sender { person } &
element date { xsd:dateTime } &
element title { text }? &
element text { text } &
attachment*
}
# A bug attachment. Attachments are associated with a bug comment.
attachment = element attachment {
attribute href { xsd:anyURI }? &
element type { "PATCH" | "UNSPECIFIED" }? &
element filename { text }? &
# The following will likely be renamed summary in a future version.
element title { text }? &
element mimetype { text }? &
element contents { xsd:base64Binary }
}

View File

@@ -1,64 +0,0 @@
#!/bin/bash
set -e
# This script assumes it's being run inside of a checkout of the packaging
# for a project
PROJECT=`grep Source: debian/control | awk '{print $2}'`
VERSIONDIR=$HOME/versions
PKGRECORDFILE=$VERSIONDIR/pkgversions
PPAS=${PPAS:-ppa:$PROJECT-core/trunk}
PACKAGING_REVNO=${PACKAGING_REVNO:--1}
series=${series:-lucid}
if [ ! -d "$VERSIONDIR" ]
then
bzr co bzr://jenkins.openstack.org/ "$VERSIONDIR"
else
( cd $VERSIONDIR ; bzr up )
fi
tarball="$(echo $PROJECT*.tar.gz)"
version="${tarball%.tar.gz}"
version="${version#*$PROJECT-}"
base_version=$version
if [ -n "${EXTRAVERSION}" ]
then
version="${version%~*}${EXTRAVERSION}~${version#*~}"
fi
if [ -d .git ]
then
PACKAGING_REVNO="$(git log --oneline | wc -l)"
else
PACKAGING_REVNO="$(bzr revno --tree)"
fi
buildno=1
while true
do
pkgversion="${version}-0ubuntu0ppa1~${series}${buildno}"
if grep "$PROJECT $pkgversion" "$PKGRECORDFILE"
then
echo "We've already built a $pkgversion of $PROJECT. Incrementing build number."
buildno=$(($buildno + 1))
else
echo "$PROJECT $pkgversion" >> "$PKGRECORDFILE"
sort "$PKGRECORDFILE" > "$PKGRECORDFILE".tmp
mv "$PKGRECORDFILE".tmp "$PKGRECORDFILE"
( cd $VERSIONDIR ;
bzr up ;
bzr commit -m"Added $PROJECT $snapshotversion" )
break
fi
done
dch -b --force-distribution --v "${pkgversion}" "Automated PPA build. Packaging revision: ${PACKAGING_REVNO}." -D $series
debcommit
bzr bd -S --builder='debuild -S -sa -rfakeroot' --build-dir=build
if ! [ "$DO_UPLOAD" = "no" ]
then
for ppa in $PPAS
do
dput --force $ppa "../${PROJECT}_${pkgversion}_source.changes"
done
fi

View File

@@ -1,7 +0,0 @@
#!/bin/bash
bzr branch lp:django-nova
python tools/install_venv.py django-nova
cp local/local_settings.py.example local/local_settings.py
tools/with_venv.sh dashboard/manage.py test

View File

@@ -1,216 +0,0 @@
# -*- coding: utf-8 -*-
#
# OpenStack CI documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 18 13:42:23 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenStack CI'
copyright = u'2011, Monty Taylor, James Blair and Andrew Hutchings'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "%d.%02d" % (datetime.datetime.now().year, datetime.datetime.now().month)
# The full version, including alpha/beta/rc tags.
release = "%d.%02d.%02d" % (datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenStackCI.tex', u'OpenStack CI Documentation',
u'Monty Taylor and James Blair', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openstackci', u'OpenStack CI Documentation',
[u'Monty Taylor, James Blair and Andrew Hutchings'], 1)
]

View File

@@ -1,812 +0,0 @@
:title: Gerrit Installation
Gerrit
######
Objective
*********
A workflow where developers submit changes to gerrit, changes are
peer-reviewed and automatically tested by Jenkins before being
committed to the main repo. The public repo is on github.
References
**********
* http://gerrit.googlecode.com/svn/documentation/2.2.1/install.html
* http://feeding.cloud.geek.nz/2011/04/code-reviews-with-gerrit-and-gitorious.html
* http://feeding.cloud.geek.nz/2011/05/integrating-launchpad-and-gerrit-code.html
* http://www.infoq.com/articles/Gerrit-jenkins-hudson
* https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger
* https://wiki.mahara.org/index.php/Developer_Area/Developer_Tools
Known Issues
************
* Don't use innodb until at least gerrit 2.2.2 because of:
http://code.google.com/p/gerrit/issues/detail?id=518
Installation
************
Host Installation
=================
Prepare Host
------------
This sets the host up with the standard OpenStack system
administration configuration. Skip this if you're not setting up a
host for use by the OpenStack project.
::
apt-get install bzr puppet emacs23-nox
bzr branch lp:~mordred/+junk/osapuppetconf
cd osapuppetconf/
puppet apply --modulepath=`pwd`/modules manifests/site.pp
This sets up the firewall and installs some dependencies for Gerrit::
apt-get install ufw
ufw enable
ufw allow from any to any port 22
ufw allow from any to any port 29418
ufw allow from any to any port 80
ufw allow from any to any port 443
apt-get install git openjdk-6-jre-headless mysql-server
Install MySQL
-------------
::
mysql -u root -p
CREATE USER 'gerrit2'@'localhost' IDENTIFIED BY 'secret';
CREATE DATABASE reviewdb;
ALTER DATABASE reviewdb charset=latin1;
GRANT ALL ON reviewdb.* TO 'gerrit2'@'localhost';
FLUSH PRIVILEGES;
sudo useradd -r gerrit2
sudo chsh gerrit2 -s /bin/bash
sudo su - gerrit2
Install Gerrit
--------------
::
wget http://gerrit.googlecode.com/files/gerrit-2.2.1.war
mv gerrit-2.2.1.war gerrit.war
java -jar gerrit.war init -d review_site
::
*** Gerrit Code Review 2.2.1
***
Create '/home/gerrit2/review_site' [Y/n]?
*** Git Repositories
***
Location of Git repositories [git]:
*** SQL Database
***
Database server type [H2/?]: ?
Supported options are:
h2
postgresql
mysql
jdbc
Database server type [H2/?]: mysql
Gerrit Code Review is not shipped with MySQL Connector/J 5.1.10
** This library is required for your configuration. **
Download and install it now [Y/n]?
Downloading http://repo2.maven.org/maven2/mysql/mysql-connector-java/5.1.10/mysql-connector-java-5.1.10.jar ... OK
Checksum mysql-connector-java-5.1.10.jar OK
Server hostname [localhost]:
Server port [(MYSQL default)]:
Database name [reviewdb]:
Database username [gerrit2]:
gerrit2's password :
confirm password :
*** User Authentication
***
Authentication method [OPENID/?]:
*** Email Delivery
***
SMTP server hostname [localhost]:
SMTP server port [(default)]:
SMTP encryption [NONE/?]:
SMTP username :
*** Container Process
***
Run as [gerrit2]:
Java runtime [/usr/lib/jvm/java-6-openjdk/jre]:
Copy gerrit.war to /home/gerrit2/review_site/bin/gerrit.war [Y/n]?
Copying gerrit.war to /home/gerrit2/review_site/bin/gerrit.war
*** SSH Daemon
***
Listen on address [*]:
Listen on port [29418]:
Gerrit Code Review is not shipped with Bouncy Castle Crypto v144
If available, Gerrit can take advantage of features
in the library, but will also function without it.
Download and install it now [Y/n]?
Downloading http://www.bouncycastle.org/download/bcprov-jdk16-144.jar ... OK
Checksum bcprov-jdk16-144.jar OK
Generating SSH host key ... rsa... dsa... done
*** HTTP Daemon
***
Behind reverse proxy [y/N]? y
Proxy uses SSL (https://) [y/N]? y
Subdirectory on proxy server [/]:
Listen on address [*]:
Listen on port [8081]:
Canonical URL [https://review.openstack.org/]:
Initialized /home/gerrit2/review_site
Executing /home/gerrit2/review_site/bin/gerrit.sh start
Starting Gerrit Code Review: OK
Waiting for server to start ... OK
Opening browser ...
Please open a browser and go to https://review.openstack.org/#admin,projects
Configure Gerrit
----------------
Update etc/gerrit.config::
[user]
email = review@openstack.org
[auth]
allowedOpenID = ^https?://(login.)?launchpad.net/.*$
[commentlink "launchpad"]
match = "([Bb]ug\\s+#?)(\\d+)"
link = https://code.launchpad.net/bugs/$2
Set Gerrit to start on boot::
ln -snf /home/gerrit2/review_site/bin/gerrit.sh /etc/init.d/gerrit
update-rc.d gerrit defaults 90 10
cat <<EOF >/etc/default/gerritcodereview
GERRIT_SITE=/home/gerrit2/review_site
EOF
Add "Approved" review type to gerrit::
mysql -u root -p
use reviewdb;
insert into approval_categories values ('Approved', 'A', 2, 'MaxNoBlock', 'N', 'APRV');
insert into approval_category_values values ('No score', 'APRV', 0);
insert into approval_category_values values ('Approved', 'APRV', 1);
update approval_category_values set name = "Looks good to me (core reviewer)" where name="Looks good to me, approved";
Install Apache
--------------
::
apt-get install apache2
create: /etc/apache2/sites-available/gerrit
::
a2enmod ssl proxy proxy_http rewrite
a2ensite gerrit
a2dissite default
Install Exim
------------
::
apt-get install exim4
dpkg-reconfigure exim4-config
Choose "internet site", otherwise select defaults
edit: /etc/default/exim4 ::
QUEUEINTERVAL='5m'
GitHub Setup
============
Generate an SSH key for Gerrit for use on GitHub
------------------------------------------------
::
sudo su - gerrit2
gerrit2@gerrit:~$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/home/gerrit2/.ssh/id_rsa):
Created directory '/home/gerrit2/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
GitHub Configuration
--------------------
#. create openstack-gerrit user on github
#. add gerrit2 ssh public key to openstack-gerrit user
#. create gerrit team in openstack org on github with push/pull access
#. add openstack-gerrit to gerrit team in openstack org
#. add public master repo to gerrit team in openstack org
#. save github host key in known_hosts
::
gerrit2@gerrit:~$ ssh git@github.com
The authenticity of host 'github.com (207.97.227.239)' can't be established.
RSA key fingerprint is 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'github.com,207.97.227.239' (RSA) to the list of known hosts.
PTY allocation request failed on channel 0
Gerrit Replication to GitHub
----------------------------
::
cat <<EOF >review_site/etc/replication.config
[remote "github"]
url = git@github.com:\$\{name\}.git
EOF
Jenkins / Gerrit Integration
============================
Create a Jenkins User in Gerrit
-------------------------------
With the jenkins public key, as a gerrit admin user::
cat jenkins.pub | ssh -p29418 review.openstack.org gerrit create-account --ssh-key - --full-name Jenkins jenkins
Create "CI Systems" group in gerrit, make jenkins a member
Create a Gerrit Git Prep Job in Jenkins
---------------------------------------
When gating trunk with Jenkins, we want to test changes as they will
appear once merged by Gerrit, but the gerrit trigger plugin will, by
default, test them as submitted. If HEAD moves on while the change is
under review, it may end up getting merged with HEAD, and we want to
test the result.
To do that, make sure the "Hudson Template Project plugin" is
installed, then set up a new job called "Gerrit Git Prep", and add a
shell command build step (no other configuration)::
#!/bin/sh -x
git checkout $GERRIT_BRANCH
git reset --hard remotes/origin/$GERRIT_BRANCH
git merge FETCH_HEAD
CODE=$?
if [ ${CODE} -ne 0 ]; then
git reset --hard remotes/origin/$GERRIT_BRANCH
exit ${CODE}
fi
Later, we will configure Jenkins jobs that we want to behave this way
to use this build step.
Auto Review Expiry
==================
Puppet automatically installs a daily cron job called ``expire_old_reviews.py``
onto the gerrit servers. This script follows two rules:
#. If the review hasn't been touched in 2 weeks, mark as abandoned.
#. If there is a negative review and it hasn't been touched in 1 week, mark as
abandoned.
If your review gets touched by either of these rules it is possible to
unabandon a review on the gerrit web interface.
Launchpad Sync
==============
The launchpad user sync process consists of two scripts which are in
openstack/openstack-ci on github: sync_launchpad_gerrit.py and
insert_gerrit.py.
Both scripts should be run as gerrit2 on review.openstack.org
sync_launchpad_users.py runs and creates a python pickle file, users.pickle,
with all of the user and group information. This is a long process. (12
minutes)
insert_gerrit.py reads the pickle file and applies it to the MySQL database.
The gerrit caches must then be flushed.
Depends
-------
::
apt-get install python-mysqldb python-openid python-launchpadlib
Keys
----
The key for the launchpad sync user is in ~/.ssh/launchpad_rsa. Connecting
to Launchpad requires oauth authentication - so the first time
sync_launchpad_gerrit.py is run, it will display a URL. Open this URL in a
browser and log in to launchpad as the hudson-openstack user. Subsequent
runs will run with cached credentials.
Running
-------
::
cd openstack-ci
git pull
python sync_launchpad_gerrit.py
python insert_gerrit.py
ssh -i /home/gerrit2/.ssh/launchpadsync_rsa -p29418 review.openstack.org gerrit flush-caches
Gerrit IRC Bot
==============
Installation
------------
Ensure there is an up-to-date checkout of openstack-ci in ~gerrit2.
::
apt-get install python-irclib python-daemon
cp ~gerrit2/openstack-ci/gerritbot.init /etc/init.d
chmod a+x /etc/init.d/gerritbot
update-rc.d gerritbot defaults
su - gerrit2
ssh-keygen -f /home/gerrit2/.ssh/gerritbot_rsa
As a Gerrit admin, create a user for gerritbot::
cat ~gerrit2/.ssh/gerritbot_rsa | ssh -p29418 gerrit.openstack.org gerrit create-account --ssh-key - --full-name GerritBot gerritbot
Configure gerritbot, including which events should be announced::
cat <<EOF >~gerrit2/gerritbot.config
[ircbot]
nick=NICNAME
pass=PASSWORD
server=irc.freenode.net
channel=openstack-dev
port=6667
[gerrit]
user=gerritbot
key=/home/gerrit2/.ssh/gerritbot_rsa
host=review.openstack.org
port=29418
events=patchset-created, change-merged, x-vrif-minus-1, x-crvw-minus-2
EOF
Register an account with NickServ on FreeNode, and put the account and
password in the config file.
::
sudo /etc/init.d/gerritbot start
Launchpad Bug Integration
=========================
In addition to the hyperlinks provided by the regex in gerrit.config,
we use a Gerrit hook to update Launchpad bugs when changes referencing
them are applied.
Installation
------------
Ensure an up-to-date checkout of openstack-ci is in ~gerrit2.
::
apt-get install python-pyme
cp ~gerrit2/gerrit-hooks/change-merged ~gerrit2/review_site/hooks/
Create a GPG and register it with Launchpad::
gerrit2@gerrit:~$ gpg --gen-key
gpg (GnuPG) 1.4.11; Copyright (C) 2010 Free Software Foundation, Inc.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Please select what kind of key you want:
(1) RSA and RSA (default)
(2) DSA and Elgamal
(3) DSA (sign only)
(4) RSA (sign only)
Your selection?
RSA keys may be between 1024 and 4096 bits long.
What keysize do you want? (2048)
Requested keysize is 2048 bits
Please specify how long the key should be valid.
0 = key does not expire
<n> = key expires in n days
<n>w = key expires in n weeks
<n>m = key expires in n months
<n>y = key expires in n years
Key is valid for? (0)
Key does not expire at all
Is this correct? (y/N) y
You need a user ID to identify your key; the software constructs the user ID
from the Real Name, Comment and Email Address in this form:
"Heinrich Heine (Der Dichter) <heinrichh@duesseldorf.de>"
Real name: Openstack Gerrit
Email address: review@openstack.org
Comment:
You selected this USER-ID:
"Openstack Gerrit <review@openstack.org>"
Change (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? o
You need a Passphrase to protect your secret key.
gpg: gpg-agent is not available in this session
You don't want a passphrase - this is probably a *bad* idea!
I will do it anyway. You can change your passphrase at any time,
using this program with the option "--edit-key".
We need to generate a lot of random bytes. It is a good idea to perform
some other action (type on the keyboard, move the mouse, utilize the
disks) during the prime generation; this gives the random number
generator a better chance to gain enough entropy.
gpg: /home/gerrit2/.gnupg/trustdb.gpg: trustdb created
gpg: key 382ACA7F marked as ultimately trusted
public and secret key created and signed.
gpg: checking the trustdb
gpg: 3 marginal(s) needed, 1 complete(s) needed, PGP trust model
gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u
pub 2048R/382ACA7F 2011-07-26
Key fingerprint = 21EF 7F30 C281 F61F 44CD EC48 7424 9762 382A CA7F
uid Openstack Gerrit <review@openstack.org>
sub 2048R/95F6FA4A 2011-07-26
gerrit2@gerrit:~$ gpg --send-keys --keyserver keyserver.ubuntu.com 382ACA7F
gpg: sending key 382ACA7F to hkp server keyserver.ubuntu.com
Log into the Launchpad account and add the GPG key to the account.
Adding New Projects
*******************
Creating a Project in Gerrit
============================
Using ssh key of a gerrit admin (you)::
ssh -p 29418 review.openstack.org gerrit create-project --name openstack/PROJECT
If the project is an API project (eg, image-api), we want it to share
some extra permissions that are common to all API projects (eg, the
OpenStack documentation coordinators can approve changes, see
:ref:`acl`). Run the following command to reparent the project if it
is an API project::
ssh -p 29418 gerrit.openstack.org gerrit set-project-parent --parent API-Projects openstack/PROJECT
Add yourself to the "Project Bootstrappers" group in Gerrit which will
give you permissions to push to the repo bypassing code review.
Do the initial push of the project with::
git push ssh://USERNAME@review.openstack.org:29418/openstack/PROJECT.git HEAD:refs/heads/master
git push --tags ssh://USERNAME@review.openstack.org:29418/openstack/PROJECT.git
Remove yourself from the "Project Bootstrappers" group, and then set
the access controls as specified in :ref:`acl`.
Have Jenkins Monitor a Gerrit Project
=====================================
In jenkins, under source code management:
* select git
* url: ssh://jenkins@review.openstack.org:29418/openstack/project.git
* click "advanced"
* refspec: $GERRIT_REFSPEC
* branches: origin/$GERRIT_BRANCH
* click "advanced"
* choosing stragety: gerrit trigger
* select gerrit event under build triggers:
* Trigger on Comment Added
* Approval Category: APRV
* Approval Value: 1
* plain openstack/project
* path **
* Select "Add build step" under "Build"
* select "Use builders from another project"
* Template Project: "Gerrit Git Prep"
* make sure this build step is the first in the sequence
Create a Project in GitHub
==========================
As a github openstack admin:
* Visit https://github.com/organizations/openstack
* Click New Repository
* Visit the gerrit team admin page
* Add the new repository to the gerrit team
Pull requests can not be disabled for a project in Github, so instead
we have a script that runs from cron to close any open pull requests
with instructions to use Gerrit.
* Edit openstack/openstack-ci-puppet:manifests/site.pp
and add the project to the list of github projects in the gerrit class
for the gerrit.openstack.org node.
Migrating a Project from bzr
============================
Add the bzr PPA and install bzr-fastimport:
add-apt-repository ppa:bzr/ppa
apt-get update
apt-get install bzr-fastimport
Doing this from the bzr PPA is important to ensure at least version 0.10 of
bzr-fastimport.
Clone the git-bzr-ng from termie:
git clone https://github.com/termie/git-bzr-ng.git
In git-bzr-ng, you'll find a script, git-bzr. Put it somewhere in your path.
Then, to get a git repo which contains the migrated bzr branch, run:
git bzr clone lp:${BRANCHNAME} ${LOCATION}
So, for instance, to do glance, you would do:
git bzr clone lp:glance glance
And you will then have a git repo of glance in the glance dir. This git repo
is now suitable for uploading in to gerrit to become the new master repo.
Project Config
==============
There are a few options which need to be enabled on the project in the Admin
interface.
* Merge Strategy should be set to "Merge If Necessary"
* "Automatically resolve conflicts" should be enabled
* "Require Change-Id in commit message" should be enabled
* "Require a valid contributor agreement to upload" should be enabled
Optionally, if the PTL agrees to it:
* "Require the first line of the commit to be 50 characters or less" should
be enabled.
.. _acl:
Access Controls
***************
High level goals:
#. Anonymous users can read all projects.
#. All registered users can perform informational code review (+/-1)
on any project.
#. Jenkins can perform verification (blocking or approving: +/-1).
#. All registered users can create changes.
#. The OpenStack Release Manager and Jenkins can tag releases (push
annotated tags).
#. Members of $PROJECT-core group can perform full code review
(blocking or approving: +/- 2), and submit changes to be merged.
#. Members of openstack-release (Release Manager and PTLs), and
$PROJECT-drivers (PTL and release minded people) exclusively can
perform full code review (blocking or approving: +/- 2), and submit
changes to be merged on milestone-proposed branches.
#. Full code review (+/- 2) of API projects should be available to the
-core group of the corresponding implementation project as well as to
the OpenStack Documentation Coordinators.
#. Full code review of stable branches should be available to the
-core group of the project as well as the openstack-stable-maint
group.
To manage API project permissions collectively across projects, API
projects are reparented to the "API-Projects" meta-project instead of
"All-Projects". This causes them to inherit permissions from the
API-Projects project (which, in turn, inherits from All-Projects).
These permissions try to achieve the high level goals::
All Projects (metaproject):
refs/*
read: anonymous
push annotated tag: release managers, ci tools, project bootstrappers
forge author identity: registered users
forge committer identity: project bootstrappers
push (w/ force push): project bootstrappers
create reference: project bootstrappers, release managers
push merge commit: project bootstrappers
refs/for/refs/*
push: registered users
refs/heads/*
label code review:
-1/+1: registered users
-2/+2: project bootstrappers
label verified:
-1/+1: ci tools
-1/+1: project bootstrappers
label approved 0/+1: project bootstrappers
submit: ci tools
submit: project bootstrappers
refs/heads/milestone-proposed
label code review (exclusive):
-2/+2 openstack-release
-1/+1 registered users
label approved (exclusive): 0/+1: openstack-release
owner: openstack-release
refs/heads/stable/*
label code review (exclusive):
-2/+2 opestack-stable-maint
-1/+1 registered users
label approved (exclusive): 0/+1: opestack-stable-maint
refs/meta/config
read: project owners
API Projects (metaproject):
refs/*
owner: Administrators
refs/heads/*
label code review -2/+2: openstack-doc-core
label approved 0/+1: openstack-doc-core
project foo:
refs/*
owner: Administrators
refs/heads/*
label code review -2/+2: foo-core
label approved 0/+1: foo-core
refs/heads/milestone-proposed
label code review -2/+2: foo-drivers
label approved 0/+1: foo-drivers
Renaming a Project
******************
Renaming a project is not automated and is disruptive to developers,
so it should be avoided. Allow for an hour of downtime for the
project in question, and about 10 minutes of downtime for all of
Gerrit. All Gerrit changes, merged and open, will carry over, so
in-progress changes do not need to be merged before the move.
To rename a project:
#. Make it inacessible by editing the Access pane. Add a "read" ACL
for "Administrators", and mark it "exclusive". Be sure to save
changes.
#. Update the database::
update account_project_watches
set project_name = "openstack/OLD"
where project_name = "openstack/NEW";
update changes
set dest_project_name = "openstack/OLD"
where dest_project_name = "openstack/NEW";
#. Wait for Jenkins to be idle (or take it offline)
#. Stop Gerrit and move the Git repository::
/etc/init.d/gerrit stop
cd /home/gerrit2/review_site/git/openstack/
mv OLD.git/ NEW.git
/etc/init.d/gerrit start
#. (Bring Jenkins online if need be)
#. Rename the project in GitHub
#. Update Jenkins jobs te reference the new name. Rename the jobs
themselves as appropriate
#. Remove the read access ACL you set in the first step from project
#. Submit a change that updates .gitreview with the new location of the
project
Developers will either need to re-clone a new copy of the repository,
or manually update their remotes.
Adding A New Project On The Command Line
****************************************
All of the steps involved in adding a new project to Gerrit can be
accomplished via the commandline, with the exception of creating a new repo
on github and adding the jenkins jobs.
First of all, add the .gitreview file to the repo that will be added. Then,
assuming an ssh config alias of `review` for the gerrit instance, as a person
in the Project Bootstrappers group::
ssh review gerrit create-project --name openstack/$PROJECT
git review -s
git push gerrit HEAD:refs/heads/master
git push --tags gerrit
At this point, the branch contents will be in gerrit, and the project config
settings and ACLs need to be set. These are maintained in a special branch
inside of git in gerrit. Check out the branch from git::
git fetch gerrit +refs/meta/*:refs/remotes/gerrit-meta/*
git checkout -b config remotes/gerrit-meta/config
There will be two interesting files, `groups` and `project.config`. `groups`
contains UUIDs and names of groups that will be referenced in
`project.config`. There is a helper script in the openstack-ci repo called
`get_group_uuid.py` which will fetch the UUID for a given group. For
$PROJECT-core and $PROJECT-drivers::
openstack-ci/gerrit/get_group_uuid.py $GROUP_NAME
And make entries in `groups` for each one of them. Next, edit
`project.config` to look like::
[access "refs/*"]
owner = group Administrators
[receive]
requireChangeId = true
requireContributorAgreement = true
[submit]
mergeContent = true
[access "refs/heads/*"]
label-Code-Review = -2..+2 group $PROJECT-core
label-Approved = +0..+1 group $PROJECT-core
[access "refs/heads/milestone-proposed"]
label-Code-Review = -2..+2 group $PROJECT-drivers
label-Approved = +0..+1 group $PROJECT-drivers
Replace $PROJECT with the name of the project.
Finally, commit the changes and push the config back up to Gerrit::
git commit -m "Initial project config"
git push gerrit HEAD:refs/meta/config

View File

@@ -1,36 +0,0 @@
.. OpenStack CI documentation master file, created by
sphinx-quickstart on Mon Jul 18 13:42:23 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
OpenStack Continuous Integration
================================
This documentation covers the installation and maintenance of the
Continuous Integration (CI) infrastructure used by OpenStack. It
may be of interest to people who may want to help develop this
infrastructure or integrate their tools into it. Some instructions
may be useful to other projects that want to set up similar CI
systems.
OpenStack developers or users do not need to read this documentation.
Instead, see http://wiki.openstack.org/ to learn how contribute to or
use OpenStack.
Contents:
.. toctree::
:maxdepth: 2
systems
jenkins
gerrit
puppet
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -1,340 +0,0 @@
:title: Jenkins Configuration
Jenkins
#######
Overview
********
Jenkins is a Continuous Integration system and the central control
system for the orchestration of both pre-merge testing and post-merge
actions such as packaging and publishing of documentation.
The overall design that Jenkins is a key part of implementing is that
all code should be reviewed and tested before being merged in to trunk,
and that as many tasks around review, testing, merging and release that
can be automated should be.
Jenkis is essentially a job queing system, and everything that is done
through Jenkins can be thought of as having a few discreet components:
* Triggers - What causes a job to be run
* Location - Where do we run a job
* Steps - What actions are taken when the job runs
* Results - What is the outcome of the job
The OpenStack Jenkins can be found at http://jenkins.openstack.org
OpenStack uses :doc:`gerrit` to manage code reviews, which in turns calls
Jenkins to test those reviews.
Authorization
*************
Jenkins is set up to use OpenID in a Single Sign On mode with Launchpad.
This means that all of the user and group information is managed via
Launchpad users and teams. In the Jenkins Security Matrix, a Launchpad team
name can be specified and any members of that team will be granted those
permissions. However, because of the way the information is processed, a
user will need to re-log in upon changing either team membership on
Launchpad, or changing that team's authorization in Jenkins for the new
privileges to take effect.
Integration Testing
*******************
TODO: How others can get involved in testing and integrating with
OpenStack Jenkins.
Rackspace Bare-Metal Testing Cluster
====================================
The CI team mantains a cluster of machines supplied by Rackspace to
perform bare-metal deployment and testing of OpenStack as a whole.
This installation is intended as a reference implementation of just
one of many possible testing platforms, all of which can be integrated
with the OpenStack Jenkins system. This is a cluster of several
physical machines meaning the test environment has access to all of
the native processor features, and real-world networking, including
tagged VLANs.
Each time the trunk repo is updated, a Jenkins job will deploy an
OpenStack cluster using devstack and then run the openstack-test-rax
test suite against the cluster.
Deployment and Testing Process
------------------------------
The cluster deployment is divided into two phases: base operating
system installation, and OpenStack installation. Because the
operating system install takes considerable time (15 to 30 minutes),
has external network resource dependencies (the distribution mirror),
and has no bearing on the outcome of the OpenStack tests themselves,
the process used here effectively snapshots the machines immediately
after the base OS install and before OpenStack is installed. LVM
snapshots and kexec are used to immediately return the cluster to a
newly installed state without incurring the additional time it would
take to install from scratch. The Jenkins testing job invokes the
process starting at :ref:`rax_openstack_install`.
Installation Server Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The CI team runs the Ubuntu Orchestra server (based on cobbler) on our
Jenkins slave node to manage the OS installation on the test machines.
The configuration for the Orchestra server is kept in the CI team's
puppet modules. If you want to set up your own system, Orchestra is
not required, any system capable of performing the following steps is
suitable. However, if you want to stand up a test system as quickly
and simply as possible, you may find it easiest to base your system on
the one the CI team uses. You may use the puppet modules yourself, or
follow the instructions below.
The CI team's Orchestra configuration module is at:
https://github.com/openstack/openstack-ci-puppet/tree/master/modules/orchestra
Install Orchestra
"""""""""""""""""
Install Ubuntu 11.10 (Oneiric) and Orchestra::
sudo apt-get install ubuntu-orchestra-server ipmitool
The install process will prompt you to enter a password for Cobbler.
Have one ready and keep it in a safe place. The procedure here will
not use it, but if you later want to use the Cobbler web interface,
you will need it.
Configure Orchestra
"""""""""""""""""""
Install the following files on the Orchestra server so that it deploys
machines with our LVM/kexec test framework.
We update the dnsmasq.conf cobbler template to add
"dhcp-ignore=tag:!known", and some site-specific network
configuration::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/dnsmasq.template \
-O /etc/cobbler/dnsmasq.template
Our servers need a kernel module blacklisted in order to boot
correctly. If you don't need to blacklist any modules, you should
either create an empty file here, or remove the reference to this file
from the preseed file later::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack_module_blacklist \
-O /var/lib/cobbler/snippets/openstack_module_blacklist
This cobbler snippet uses cloud-init to set up the LVM/kexec
environment and configures TCP syslogging to the installation
server/Jenkins slave::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack_cloud_init \
-O /var/lib/cobbler/snippets/openstack_cloud_init
This snippet holds the mysql root password that will be configured at
install time. It's currently a static string, but you could
dynamically write this file, or simply replace it with something more
secure::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack_mysql_password \
-O /var/lib/cobbler/snippets/openstack_mysql_password
This preseed file manages the OS install on the test nodes. It
includes the snippets installed above::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack-test.preseed \
-O /var/lib/cobbler/kickstarts/openstack-test.preseed
The following sudoers configuration is needed to allow Jenkins to
control cobbler, remove syslog files from the test hosts before
starting new tests, and restart rsyslog::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/orchestra-jenkins-sudoers -O /etc/sudoers.d/orchestra-jenkins
Replace the Orchestra rsyslog config file with a simpler one that logs
all information from remote hosts in one file per host::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/99-orchestra.conf -O /etc/rsyslog.d/99-orchestra.conf
Make sure the syslog directories exist and restart rsyslog::
mkdir -p /var/log/orchestra/rsyslog/
chown -R syslog.syslog /var/log/orchestra/
restart rsyslog
Add an "OpenStack Test" system profile to cobbler that uses the
preseed file above::
cobbler profile add \
--name=natty-x86_64-ostest \
--parent=natty-x86_64 \
--kickstart=/var/lib/cobbler/kickstarts/openstack-test.preseed \
--kopts="priority=critical locale=en_US"
Add each of your systems to cobbler with a command similar to this
(you may need different kernel options)::
cobbler system add \
--name=baremetal1 \
--hostname=baremetal1 \
--profile=natty-x86_64-ostest \
--mac=00:11:22:33:44:55 \
--power-type=ipmitool \
--power-user=IPMI_USERNAME \
--power-pass=IPMI_PASS \
--power-address=IPMI_IP_ADDR \
--ip-address=SYSTEM_IP_ADDRESS \
--subnet=SYSTEM_SUBNET \
--kopts="netcfg/choose_interface=auto netcfg/dhcp_timeout=60 auto=true priority=critical"
When complete, have cobbler write out its configuration files::
cobbler sync
Set Up Jenkins Jobs
"""""""""""""""""""
We have Jenkins jobs to handle all of the tasks after the initial
Orchestra configuration so that we can easily run them at any time.
This includes the OS installation on the test nodes, even though we
don't run that often because the state is preserved in an LVM
snapshot, we may want to change the configuration used and make a new
snapshot. In that case we just need to trigger the Jenkins job again.
The Jenkins job that kicks off the operating system installation calls
the "baremetal-os-install.sh" script from the openstack-ci repo:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/baremetal-os-install.sh
That script instructs cobbler to install the OS on each of the test
nodes.
To speed up the devstack installation and avoid excessive traffic to
the pypi server, we build a PIP package cache on the installation
server. That is also an infrequent task that we configure as a
jenkins job. That calls:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/update-pip-cache.sh
That builds a PIP package cache that the test script later copies to
the test servers for use by devstack.
Run those two jobs, and once complete, the test nodes are ready to go.
This is the end of the operating system installation, and the system
is currently in the pristine state that will be used by the test
procedure (which is stored in the LVM volume "orig_root").
.. _rax_openstack_install:
OpenStack Installation
~~~~~~~~~~~~~~~~~~~~~~
When the deployment and integration test job runs, it does the
following, each time starting from the pristine state arrived at the
end of the previous section.
Reset the Test Nodes
""""""""""""""""""""
The Jenkins deployment and test job first runs the deployment script:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/baremetal-deploy.sh
Which invokes the following script on each host to reset it to the
pristine state:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/lvm-kexec-reset.sh
Because kexec is in use, resetting the environment and rebooting into
the pristine state takes only about 3 seconds.
The deployment script then removes the syslog files from the previous
run and restarts rsyslog to re-open them. Once the first test host
finishes booting and brings up its network, OpenStack installation
starts.
Run devstack on the Test Nodes
""""""""""""""""""""""""""""""
Devstack's build_bm_multi script is run, which invokes devstack on
each of the test nodes. First on the "head" node which runs all of
the OpenStack services for the remaining "compute" nodes.
Run Test Suite
""""""""""""""
Once devstack is complete, the test suite is run. All logs from the
test nodes should be sent via syslog to the Jenkins slave, and at the
end of the test, the logs are archived with the Job for developers to
inspect in case of problems.
Cluster Configuration
---------------------
Here are the configuration parameters of the CI team's test cluster.
The cluster is currently divided into three mini-clusters so that
independent Jenkins jobs can run in parallel on the different
clusters.
VLANs
~~~~~
+----+--------------------------------+
|VLAN| Description |
+====+================================+
|90 | Native VLAN |
+----+--------------------------------+
|91 | Internal cluster communication |
| | network: 192.168.91.0/24 |
+----+--------------------------------+
|92 | Public Internet (fake) |
| | network: 192.168.92.0/24 |
+----+--------------------------------+
Servers
~~~~~~~
The servers are located on the Rackspace network, only accessible via
VPN.
+-----------+--------------+---------------+
| Server | Primary IP | Management IP |
+===========+==============+===============+
|deploy-rax | 10.14.247.36 | 10.14.247.46 |
+-----------+--------------+---------------+
|baremetal1 | 10.14.247.37 | 10.14.247.47 |
+-----------+--------------+---------------+
|baremetal2 | 10.14.247.38 | 10.14.247.48 |
+-----------+--------------+---------------+
|baremetal3 | 10.14.247.39 | 10.14.247.49 |
+-----------+--------------+---------------+
|baremetal4 | 10.14.247.40 | 10.14.247.50 |
+-----------+--------------+---------------+
|baremetal5 | 10.14.247.41 | 10.14.247.51 |
+-----------+--------------+---------------+
|baremetal6 | 10.14.247.42 | 10.14.247.52 |
+-----------+--------------+---------------+
|baremetal7 | 10.14.247.43 | 10.14.247.53 |
+-----------+--------------+---------------+
|baremetal8 | 10.14.247.44 | 10.14.247.54 |
+-----------+--------------+---------------+
|baremetal9 | 10.14.247.45 | 10.14.247.55 |
+-----------+--------------+---------------+
deploy-rax
The deployment server and Jenkins slave. It deploys the servers
using Orchestra and Devstack, and runs the test framework. It
should not run any OpenStack components, but we can install
libraries or anything else needed to run tests.
baremetal1, baremetal4, baremetal7
Configured as "head" nodes to run nova, mysql, and glance. Each one
is the head node of a three node cluster including the two compute
nodes following it
baremetal2-3, baremtal5-6, baremetal8-9
Configured as compute nodes for each of the three mini-clusters.

View File

@@ -1,123 +0,0 @@
Puppet Modules
==============
Overview
--------
Much of the OpenStack project infrastructure is deployed and managed using
puppet.
The OpenStack CI team manage a number of custom puppet modules outlined in this
document.
Doc Server
----------
The doc_server module configures nginx [4]_ to serve the documentation for
several specified OpenStack projects. At the moment to add a site to this
you need to edit ``modules/doc_server/manifests/init.pp`` and add a line as
follows:
.. code-block:: ruby
:linenos:
doc_server::site { "swift": }
In this example nginx will be configured to serve ``swift.openstack.org``
from ``/srv/docs/swift`` and ``swift.openstack.org/tarballs/`` from
``/srv/tarballs/swift``
Lodgeit
-------
The lodgeit module installs and configures lodgeit [1]_ on required servers to
be used as paste installations. For OpenStack we use a fork of this maintained
by dcolish [2]_ which contains bug fixes necessary for us to use it.
Puppet will configure lodgeit to use drizzle [3]_ as a database backend,
nginx [4]_ as a front-end proxy and upstart scripts to run the lodgeit
instances. It will store and maintain local branch of the the mercurial
repository for lodgeit in ``/tmp/lodgeit-main``.
To use this module you need to add something similar to the following in the
main ``site.pp`` manifest:
.. code-block:: ruby
:linenos:
node "paste.openstack.org" {
include openstack_server
include lodgeit
lodgeit::site { "openstack":
port => "5000",
image => "header-bg2.png"
}
lodgeit::site { "drizzle":
port => "5001"
}
}
In this example we include the lodgeit module which will install all the
pre-requisites for Lodgeit as well as creating a checkout ready.
The ``lodgeit::site`` calls create the individual paste sites.
The name in the ``lodgeit::site`` call will be used to determine the URL, path
and name of the site. So "openstack" will create ``paste.openstack.org``,
place it in ``/srv/lodgeit/openstack`` and give it an upstart script called
``openstack-paste``. It will also change the h1 tag to say "Openstack".
The port number given needs to be a unique port which the lodgeit service will
run on. The puppet script will then configure nginx to proxy to that port.
Finally if an image is given that will be used instead of text inside the h1
tag of the site. The images need to be stored in the ``modules/lodgeit/files``
directory.
Lodgeit Backups
^^^^^^^^^^^^^^^
The lodgeit module will automatically create a git repository in ``/var/backups/lodgeit_db``. Inside this every site will have its own SQL file, for example "openstack" will have a file called ``openstack.sql``. Every day a cron job will update the SQL file (one job per file) and commit it to the git repository.
.. note::
Ideally the SQL files would have a row on every line to keep the diffs stored
in git small, but ``drizzledump`` does not yet support this.
Planet
------
The planet module installs Planet Venus [5]_ along with required dependancies
on a server. It also configures specified planets based on options given.
Planet Venus works by having a cron job which creates static files. In this
module the static files are served using nginx [4]_.
To use this module you need to add something similar to the following into the
main ``site.pp`` manifest:
.. code-block:: ruby
:linenos:
node "planet.openstack.org" {
include planet
planet::site { "openstack":
git_url => "https://github.com/openstack/openstack-planet.git"
}
}
In this example the name "openstack" is used to create the site
``paste.openstack.org``. The site will be served from
``/srv/planet/openstack/`` and the checkout of the ``git_url`` supplied will
be maintained in ``/var/lib/planet/openstack/``.
This module will also create a cron job to pull new feed data 3 minutes past each hour.
The ``git_url`` parameter needs to point to a git repository which stores the
planet.ini configuration for the planet (which stores a list of feeds) and any required theme data. This will be pulled every time puppet is run.
.. rubric:: Footnotes
.. [1] `Lodgeit homepage <http://www.pocoo.org/projects/lodgeit/>`_
.. [2] `dcolish's Lodgeit fork <https://bitbucket.org/dcolish/lodgeit-main>`_
.. [3] `Drizzle homepage <http://www.dirzzle.org/>`_
.. [4] `nginx homepage <http://nginx.org/en/>`_
.. [5] `Planet Venus <http://intertwingly.net/code/venus/docs/index.html>`_

View File

@@ -1,77 +0,0 @@
:title: Infrastructure Systems
Infrastructure Systems
######################
The OpenStack CI team maintains a number of systems that are critical
to the operation of the OpenStack project. At the time of writing,
these include:
* Gerrit (review.openstack.org)
* Jenkins (jenkins.openstack.org)
* community.openstack.org
Additionally the team maintains the project sites on Launchpad and
GitHub. The following policies have been adopted to ensure the
continued and secure operation of the project.
SSH Access
**********
For any of the systems managed by the CI team, the following practices
must be observed for SSH access:
* SSH access is only permitted with SSH public/private key
authentication.
* Users must use a strong passphrase to protect their private key. A
passphrase of several words, at least one of which is not in a
dictionary is advised, or a random string of at least 16
characters.
* To mitigate the inconvenience of using a long passphrase, users may
want to use an SSH agent so that the passphrase is only requested
once per desktop session.
* Users private keys must never be stored anywhere except their own
workstation(s). In particular, they must never be stored on any
remote server.
* If users need to 'hop' from a server or bastion host to another
machine, they must not copy a private key to the intermediate
machine (see above). Instead SSH agent forwarding may be used.
However due to the potential for a compromised intermediate machine
to ask the agent to sign requests without the users knowledge, in
this case only an SSH agent that interactively prompts the user
each time a signing request (ie, ssh-agent, but not gnome-keyring)
is received should be used, and the SSH keys should be added with
the confirmation constraint ('ssh-add -c').
* The number of SSH keys that are configured to permit access to
OpenStack machines should be kept to a minimum.
* OpenStack CI machines must use puppet to centrally manage and
configure user accounts, and the SSH authorized_keys files from the
openstack-ci-puppet repository.
* SSH keys should be periodically rotated (at least once per year).
During rotation, a new key can be added to puppet for a time, and
then the old one removed.
GitHub Access
*************
To ensure that code review and testing are not bypassed in the public
Git repositories, only Gerrit will be permitted to commit code to
OpenStack repositories. Because GitHub always allows project
administrators to commit code, accounts that have access to manage the
GitHub projects necessarily will have commit access to the
repositories. Therefore, to avoid inadvertent commits to the public
repositories, unique administrative-only accounts must be used to
manage the OpenStack GitHub organization and projects. These accounts
will not be used to check out or commit code for any project.
Launchpad Teams
***************
Each OpenStack project should have the following teams on Launchpad:
* foo -- contributors to project 'foo'
* foo-core -- core developers
* foo-bugs -- people interested in receieving bug reports
* foo-drivers -- people who may approve and target blueprints
The openstack-admins team should be a member of each of those teams.

View File

@@ -1,93 +0,0 @@
import os
import sys
import uuid
import os
import subprocess
from datetime import datetime
import StringIO
import ConfigParser
import MySQLdb
GERRIT_USER = os.environ.get('GERRIT_USER', 'launchpadsync')
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG',
'/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG',
'/home/gerrit2/review_site/etc/secure.config')
GERRIT_SSH_KEY = os.environ.get('GERRIT_SSH_KEY',
'/home/gerrit2/.ssh/launchpadsync_rsa')
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
GERRIT_BACKUP_PATH = os.environ.get('GERRIT_BACKUP_PATH',
'/home/gerrit2/dbupdates')
for check_path in (os.path.dirname(GERRIT_CACHE_DIR),
os.path.dirname(GERRIT_CREDENTIALS),
GERRIT_BACKUP_PATH):
if not os.path.exists(check_path):
os.makedirs(check_path)
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
def get_type(in_type):
if in_type == "RSA":
return "ssh-rsa"
else:
return "ssh-dsa"
gerrit_config = get_broken_config(GERRIT_CONFIG)
secure_config = get_broken_config(GERRIT_SECURE_CONFIG)
DB_USER = gerrit_config.get("database", "username")
DB_PASS = secure_config.get("database","password")
DB_DB = gerrit_config.get("database","database")
db_backup_file = "%s.%s.sql" % (DB_DB, datetime.isoformat(datetime.now()))
db_backup_path = os.path.join(GERRIT_BACKUP_PATH, db_backup_file)
retval = os.system("mysqldump --opt -u%s -p%s %s > %s" %
(DB_USER, DB_PASS, DB_DB, db_backup_path))
if retval != 0:
print "Problem taking a db dump, aborting db update"
sys.exit(retval)
conn = MySQLdb.connect(user = DB_USER, passwd = DB_PASS, db = DB_DB)
cur = conn.cursor()
cur.execute("""select account_id, email_address, external_id
from account_external_ids""")
accounts = {}
for (account_id, email_address, external_id) in cur.fetchall():
account = accounts.get(account_id, {})
if email_address is not None:
if not account.has_key('email_address'):
account['email_address'] = email_address
if external_id[:7] == "mailto:":
if not account.has_key('external_id'):
account['external_id'] = external_id
accounts[account_id] = account
for (account_id, account) in accounts.items():
if not account.has_key('external_id') and account.has_key('email_address'):
external_id = 'mailto:%s' % account['email_address']
cur.execute("""insert into account_external_ids
(account_id, email_address, external_id)
values (%s, %s, %s)""",
(account_id, account['email_address'], external_id))
os.system("ssh -i %s -p29418 %s@localhost gerrit flush-caches" %
(GERRIT_SSH_KEY, GERRIT_USER))

View File

@@ -1,78 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Close Github pull requests with instructions to use Gerrit for
# code review. The list of projects is found in github.config
# and should look like:
# [project "GITHUB_PROJECT"]
# close_pull = true
# Github authentication information is read from github.secure.config,
# which should look like:
# [github]
# username = GITHUB_USERNAME
# api_token = GITHUB_API_TOKEN
import github2.client
import os
import StringIO
import ConfigParser
import logging
import re
logging.basicConfig(level=logging.ERROR)
GITHUB_CONFIG = os.environ.get('GITHUB_CONFIG',
'/home/gerrit2/github.config')
GITHUB_SECURE_CONFIG = os.environ.get('GITHUB_SECURE_CONFIG',
'/home/gerrit2/github.secure.config')
MESSAGE = """Thank you for contributing to OpenStack!
%(project)s uses Gerrit for code review.
Please visit http://wiki.openstack.org/GerritWorkflow and follow the instructions there to upload your change to Gerrit.
"""
PROJECT_RE = re.compile(r'^project\s+"(.*)"$')
secure_config = ConfigParser.ConfigParser()
secure_config.read(GITHUB_SECURE_CONFIG)
config = ConfigParser.ConfigParser()
config.read(GITHUB_CONFIG)
github = github2.client.Github(requests_per_second=1.0,
username=secure_config.get("github", "username"),
api_token=secure_config.get("github", "api_token"))
for section in config.sections():
# Each section looks like [project "openstack/project"]
m = PROJECT_RE.match(section)
if not m: continue
project = m.group(1)
# Make sure we're supposed to close pull requests for this project:
if not (config.has_option(section, "close_pull") and
config.get(section, "close_pull").lower() == 'true'):
continue
# Close each pull request
pull_requests = github.pull_requests.list(project)
for req in pull_requests:
vars = dict(project=project)
github.issues.comment(project, req.number, MESSAGE%vars)
github.issues.close(project, req.number)

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is designed to expire old code reviews that have not been touched
# using the following rules:
# 1. if open and no activity in 2 weeks, expire
# 2. if negative comment and no activity in 1 week, expire
import os
import paramiko
import json
import logging
GERRIT_USER = os.environ.get('GERRIT_USER', 'launchpadsync')
GERRIT_SSH_KEY = os.environ.get('GERRIT_SSH_KEY',
'/home/gerrit2/.ssh/launchpadsync_rsa')
logging.basicConfig(format='%(asctime)-6s: %(name)s - %(levelname)s - %(message)s', filename='/var/log/gerrit/expire_reviews.log')
logger= logging.getLogger('expire_reviews')
logger.setLevel(logging.INFO)
logger.info('Starting expire reviews')
logger.info('Connecting to Gerrit')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('localhost', username=GERRIT_USER, key_filename=GERRIT_SSH_KEY, port=29418)
def expire_patch_set(patch_id, patch_subject, has_negative):
if has_negative:
message= 'code review expired after 1 week of no activity after a negative review'
else:
message= 'code review expired after 2 weeks of no activity'
command='gerrit review --abandon --message="{0}" {1}'.format(message, patch_id)
logger.info('Expiring: %s - %s: %s', patch_id, patch_subject, message)
stdin, stdout, stderr = ssh.exec_command(command)
if stdout.channel.recv_exit_status() != 0:
logger.error(stderr.read())
# Query all open with no activity for 2 weeks
logger.info('Searching no activity for 2 weeks')
stdin, stdout, stderr = ssh.exec_command('gerrit query --current-patch-set --format JSON status:open age:2w')
for line in stdout:
row= json.loads(line)
if not row.has_key('rowCount'):
expire_patch_set(row['currentPatchSet']['revision'], row['subject'], False)
# Query all reviewed with no activity for 1 week
logger.info('Searching no activity on negative review for 1 week')
stdin, stdout, stderr = ssh.exec_command('gerrit query --current-patch-set --all-approvals --format JSON status:reviewed age:1w')
for line in stdout:
row= json.loads(line)
if not row.has_key('rowCount'):
# Search for negative approvals
for approval in row['currentPatchSet']['approvals']:
if approval['value'] == '-1':
expire_patch_set(row['currentPatchSet']['revision'], row['subject'], True)
break
logger.info('End expire review')

View File

@@ -1,29 +0,0 @@
import argparse
import paramiko
import json
parser = argparse.ArgumentParser()
parser.add_argument("--host", dest="host", default="review.openstack.org",
help="gerrit host to connect to")
parser.add_argument("--port", dest="port", action='store', type=int,
default=29418, help="gerrit port to connect to")
parser.add_argument("groups", nargs=1)
options = parser.parse_args()
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(options.host, port=options.port)
group = options.groups[0]
query = "select group_uuid from account_groups where name = '%s'" % group
command = 'gerrit gsql --format JSON -c "%s"' % query
stdin, stdout, stderr = client.exec_command(command)
for line in stdout:
row = json.loads(line)
if row['type'] == 'row':
print row['columns']['group_uuid']
ret = stdout.channel.recv_exit_status()

View File

@@ -1,139 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is designed to be called by a gerrit hook. It searched new
# patchsets for strings like "blueprint FOO" or "bp FOO" and updates
# corresponding Launchpad blueprints with links back to the change.
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
import os
import argparse
import re
import subprocess
import StringIO
import ConfigParser
import MySQLdb
BASE_DIR = '/home/gerrit2/review_site'
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG',
'/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG',
'/home/gerrit2/review_site/etc/secure.config')
SPEC_RE = re.compile(r'(blueprint|bp)\s*[#:]?\s*(\S+)', re.I)
BODY_RE = re.compile(r'^\s+.*$')
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
GERRIT_CONFIG = get_broken_config(GERRIT_CONFIG)
SECURE_CONFIG = get_broken_config(GERRIT_SECURE_CONFIG)
DB_USER = GERRIT_CONFIG.get("database", "username")
DB_PASS = SECURE_CONFIG.get("database","password")
DB_DB = GERRIT_CONFIG.get("database","database")
def update_spec(launchpad, project, name, subject, link, topic=None):
# For testing, if a project doesn't match openstack/foo, use
# the openstack-ci project instead.
group, project = project.split('/')
if group != 'openstack':
project = 'openstack-ci'
spec = launchpad.projects[project].getSpecification(name=name)
if not spec: return
if spec.whiteboard:
wb = spec.whiteboard.strip()
else:
wb = ''
changed = False
if topic:
topiclink = '%s/#q,topic:%s,n,z' % (link[:link.find('/',8)],
topic)
if topiclink not in wb:
wb += "\n\n\nGerrit topic: %(link)s" % dict(link=topiclink)
changed = True
if link not in wb:
wb += "\n\n\nAddressed by: %(link)s\n %(subject)s\n" % dict(subject=subject,
link=link)
changed = True
if changed:
spec.whiteboard = wb
spec.lp_save()
def find_specs(launchpad, dbconn, args):
git_log = subprocess.Popen(['git',
'--git-dir=' + BASE_DIR + '/git/' + args.project + '.git',
'log', '--no-merges',
args.commit + '^1..' + args.commit],
stdout=subprocess.PIPE).communicate()[0]
cur = dbconn.cursor()
cur.execute("select subject, topic from changes where change_key=%s", args.change)
subject, topic = cur.fetchone()
specs = set([m.group(2) for m in SPEC_RE.finditer(git_log)])
if topic:
topicspec = topic.split('/')[-1]
specs |= set([topicspec])
for spec in specs:
update_spec(launchpad, args.project, spec, subject,
args.change_url, topic)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('hook')
#common
parser.add_argument('--change', default=None)
parser.add_argument('--change-url', default=None)
parser.add_argument('--project', default=None)
parser.add_argument('--branch', default=None)
parser.add_argument('--commit', default=None)
#change-merged
parser.add_argument('--submitter', default=None)
# patchset-created
parser.add_argument('--uploader', default=None)
parser.add_argument('--patchset', default=None)
args = parser.parse_args()
launchpad = Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
GERRIT_CACHE_DIR,
credentials_file = GERRIT_CREDENTIALS,
version='devel')
conn = MySQLdb.connect(user = DB_USER, passwd = DB_PASS, db = DB_DB)
find_specs(launchpad, conn, args)
if __name__ == '__main__':
main()

View File

@@ -1,199 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is designed to be called by a gerrit hook. It searched new
# patchsets for strings like "bug FOO" and updates corresponding Launchpad
# bugs status.
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
import os
import argparse
import re
import subprocess
BASE_DIR = '/home/gerrit2/review_site'
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
def add_change_proposed_message(bugtask, change_url, project, branch):
subject = 'Fix proposed to %s (%s)' % (short_project(project), branch)
body = 'Fix proposed to branch: %s\nReview: %s' % (branch, change_url)
bugtask.bug.newMessage(subject=subject, content=body)
def add_change_merged_message(bugtask, change_url, project, commit,
submitter, branch, git_log):
subject = 'Fix merged to %s (%s)' % (short_project(project), branch)
git_url = 'http://github.com/%s/commit/%s' % (project, commit)
body = '''Reviewed: %s
Committed: %s
Submitter: %s
Branch: %s\n''' % (change_url, git_url, submitter, branch)
body = body + '\n' + git_log
bugtask.bug.newMessage(subject=subject, content=body)
def set_in_progress(bugtask, launchpad, uploader, change_url):
"""Set bug In progress with assignee being the uploader"""
# Retrieve uploader from Launchpad. Use email as search key if
# provided, and only set if there is a clear match.
try:
searchkey = uploader[uploader.rindex("(") + 1:-1]
except ValueError:
searchkey = uploader
persons = launchpad.people.findPerson(text=searchkey)
if len(persons) == 1:
bugtask.assignee = persons[0]
bugtask.status = "In Progress"
bugtask.lp_save()
def set_fix_committed(bugtask):
"""Set bug fix committed"""
bugtask.status = "Fix Committed"
bugtask.lp_save()
def release_fixcommitted(bugtask):
"""Set bug FixReleased if it was FixCommitted"""
if bugtask.status == u'Fix Committed':
bugtask.status = "Fix Released"
bugtask.lp_save()
def tag_in_branchname(bugtask, branch):
"""Tag bug with in-branch-name tag (if name is appropriate)"""
lp_bug = bugtask.bug
branch_name = branch.replace('/', '-')
if branch_name.replace('-', '').isalnum():
lp_bug.tags = lp_bug.tags + ["in-%s" % branch_name]
lp_bug.tags.append("in-%s" % branch_name)
lp_bug.lp_save()
def short_project(full_project_name):
"""Return the project part of the git repository name"""
return full_project_name.split('/')[-1]
def git2lp(full_project_name):
"""Convert Git repo name to Launchpad project"""
project_map = {
'openstack/python-glanceclient': 'glance',
'openstack/python-keystoneclient': 'keystone',
'openstack/python-melangeclient': 'melange',
'openstack/python-novaclient': 'nova',
'openstack/python-quantumclient': 'quantum',
'openstack/openstack-ci-puppet': 'openstack-ci',
}
return project_map.get(full_project_name, short_project(full_project_name))
def process_bugtask(launchpad, bugtask, git_log, args):
"""Apply changes to bugtask, based on hook / branch..."""
if args.hook == "change-merged":
if args.branch == 'master':
set_fix_committed(bugtask)
elif args.branch == 'milestone-proposed':
release_fixcommitted(bugtask)
else:
tag_in_branchname(bugtask, args.branch)
add_change_merged_message(bugtask, args.change_url, args.project,
args.commit, args.submitter, args.branch,
git_log)
if args.hook == "patchset-created":
if args.branch == 'master':
set_in_progress(bugtask, launchpad, args.uploader, args.change_url)
if args.patchset == '1':
add_change_proposed_message(bugtask, args.change_url,
args.project, args.branch)
def find_bugs(launchpad, git_log, args):
"""Find bugs referenced in the git log and return related bugtasks"""
bug_regexp = r'([Bb]ug|[Ll][Pp])[\s#:]*(\d+)'
tokens = re.split(bug_regexp, git_log)
# Extract unique bug tasks
bugtasks = {}
for token in tokens:
if re.match('^\d+$', token) and (token not in bugtasks):
try:
lp_bug = launchpad.bugs[token]
for lp_task in lp_bug.bug_tasks:
if lp_task.bug_target_name == git2lp(args.project):
bugtasks[token] = lp_task
break
except KeyError:
# Unknown bug
pass
return bugtasks.values()
def extract_git_log(args):
"""Extract git log of all merged commits"""
cmd = ['git',
'--git-dir=' + BASE_DIR + '/git/' + args.project + '.git',
'log', '--no-merges', args.commit + '^1..' + args.commit]
return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('hook')
#common
parser.add_argument('--change', default=None)
parser.add_argument('--change-url', default=None)
parser.add_argument('--project', default=None)
parser.add_argument('--branch', default=None)
parser.add_argument('--commit', default=None)
#change-merged
parser.add_argument('--submitter', default=None)
#patchset-created
parser.add_argument('--uploader', default=None)
parser.add_argument('--patchset', default=None)
args = parser.parse_args()
# Connect to Launchpad
launchpad = Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
GERRIT_CACHE_DIR,
credentials_file=GERRIT_CREDENTIALS,
version='devel')
# Get git log
git_log = extract_git_log(args)
# Process bugtasks found in git log
for bugtask in find_bugs(launchpad, git_log, args):
process_bugtask(launchpad, bugtask, git_log, args)
if __name__ == '__main__':
main()

View File

@@ -1,77 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Add launchpad ids listed in the wiki CLA page to the CLA group in LP.
import os
import sys
import uuid
import os
import urllib
import re
import StringIO
import ConfigParser
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
DEBUG = False
LP_CACHE_DIR = '~/.launchpadlib/cache'
LP_CREDENTIALS = '~/.launchpadlib/creds'
CONTRIBUTOR_RE = re.compile(r'.*?\|\|\s*(?P<name>.*?)\s*\|\|\s*(?P<login>.*?)\s*\|\|\s*(?P<trans>.*?)\s*\|\|.*?')
LINK_RE = re.compile(r'\[\[.*\|\s*(?P<name>.*)\s*\]\]')
for check_path in (os.path.dirname(LP_CACHE_DIR),
os.path.dirname(LP_CREDENTIALS)):
if not os.path.exists(check_path):
os.makedirs(check_path)
wiki_members = []
for line in urllib.urlopen('http://wiki.openstack.org/Contributors?action=raw'):
m = CONTRIBUTOR_RE.match(line)
if m and m.group('login') and m.group('trans'):
login = m.group('login')
if login=="<#c0c0c0>'''Launchpad ID'''": continue
l = LINK_RE.match(login)
if l:
login = l.group('name')
wiki_members.append(login)
launchpad = Launchpad.login_with('CLA Team Sync', LPNET_SERVICE_ROOT,
LP_CACHE_DIR,
credentials_file = LP_CREDENTIALS)
lp_members = []
team = launchpad.people['openstack-cla']
for detail in team.members_details:
user = None
# detail.self_link ==
# 'https://api.launchpad.net/1.0/~team/+member/${username}'
login = detail.self_link.split('/')[-1]
status = detail.status
lp_members.append(login)
for wm in wiki_members:
if wm not in lp_members:
print "Need to add %s to LP" % (wm)
try:
person = launchpad.people[wm]
except:
print 'Unable to find %s on LP'%wm
continue
status = team.addMember(person=person, status="Approved")

View File

@@ -1,394 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Synchronize Gerrit users from Launchpad.
import os
import sys
import uuid
import os
import subprocess
from datetime import datetime
import StringIO
import ConfigParser
import MySQLdb
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
from openid.consumer import consumer
from openid.cryptutil import randomString
DEBUG = False
GERRIT_USER = os.environ.get('GERRIT_USER', 'launchpadsync')
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG',
'/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG',
'/home/gerrit2/review_site/etc/secure.config')
GERRIT_SSH_KEY = os.environ.get('GERRIT_SSH_KEY',
'/home/gerrit2/.ssh/launchpadsync_rsa')
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
GERRIT_BACKUP_PATH = os.environ.get('GERRIT_BACKUP_PATH',
'/home/gerrit2/dbupdates')
for check_path in (os.path.dirname(GERRIT_CACHE_DIR),
os.path.dirname(GERRIT_CREDENTIALS),
GERRIT_BACKUP_PATH):
if not os.path.exists(check_path):
os.makedirs(check_path)
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
def get_type(in_type):
if in_type == "RSA":
return "ssh-rsa"
else:
return "ssh-dsa"
gerrit_config = get_broken_config(GERRIT_CONFIG)
secure_config = get_broken_config(GERRIT_SECURE_CONFIG)
DB_USER = gerrit_config.get("database", "username")
DB_PASS = secure_config.get("database","password")
DB_DB = gerrit_config.get("database","database")
db_backup_file = "%s.%s.sql" % (DB_DB, datetime.isoformat(datetime.now()))
db_backup_path = os.path.join(GERRIT_BACKUP_PATH, db_backup_file)
retval = os.system("mysqldump --opt -u%s -p%s %s > %s" %
(DB_USER, DB_PASS, DB_DB, db_backup_path))
if retval != 0:
print "Problem taking a db dump, aborting db update"
sys.exit(retval)
conn = MySQLdb.connect(user = DB_USER, passwd = DB_PASS, db = DB_DB)
cur = conn.cursor()
launchpad = Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
GERRIT_CACHE_DIR,
credentials_file = GERRIT_CREDENTIALS)
def get_sub_teams(team, have_teams):
for sub_team in launchpad.people[team].sub_teams:
if sub_team.name not in have_teams:
have_teams = get_sub_teams(sub_team.name, have_teams)
have_teams.append(team)
return have_teams
teams_todo = get_sub_teams('openstack', [])
users={}
groups={}
groups_in_groups={}
group_implies_groups={}
group_ids={}
projects = subprocess.check_output(['/usr/bin/ssh', '-p', '29418',
'-i', GERRIT_SSH_KEY,
'-l', GERRIT_USER, 'localhost',
'gerrit', 'ls-projects']).split('\n')
for team_todo in teams_todo:
team = launchpad.people[team_todo]
groups[team.name] = team.display_name
# Attempt to get nested group memberships. ~nova-core, for instance, is a
# member of ~nova, so membership in ~nova-core should imply membership in
# ~nova
group_in_group = groups_in_groups.get(team.name, {})
for subgroup in team.sub_teams:
group_in_group[subgroup.name] = 1
# We should now have a dictionary of the form {'nova': {'nova-core': 1}}
groups_in_groups[team.name] = group_in_group
for detail in team.members_details:
user = None
# detail.self_link ==
# 'https://api.launchpad.net/1.0/~team/+member/${username}'
login = detail.self_link.split('/')[-1]
if users.has_key(login):
user = users[login]
else:
user = dict(add_groups=[])
status = detail.status
if (status == "Approved" or status == "Administrator"):
user['add_groups'].append(team.name)
users[login] = user
# If we picked up subgroups that were not in our original list of groups
# make sure they get added
for (supergroup, subgroups) in groups_in_groups.items():
for group in subgroups.keys():
if group not in groups.keys():
groups[group] = None
# account_groups
# groups is a dict of team name to team display name
# here, for every group we have in that dict, we're building another dict of
# group_name to group_id - and if the database doesn't already have the
# group, we're adding it
for (group_name, group_display_name) in groups.items():
if cur.execute("select group_id from account_groups where name = %s",
group_name):
group_ids[group_name] = cur.fetchall()[0][0]
else:
cur.execute("""insert into account_group_id (s) values (NULL)""");
cur.execute("select max(s) from account_group_id")
group_id = cur.fetchall()[0][0]
# Match the 40-char 'uuid' that java is producing
group_uuid = uuid.uuid4()
second_uuid = uuid.uuid4()
full_uuid = "%s%s" % (group_uuid.hex, second_uuid.hex[:8])
cur.execute("""insert into account_groups
(group_id, group_type, owner_group_id,
name, description, group_uuid)
values
(%s, 'INTERNAL', 1, %s, %s, %s)""",
(group_id, group_name, group_display_name, full_uuid))
cur.execute("""insert into account_group_names (group_id, name) values
(%s, %s)""",
(group_id, group_name))
group_ids[group_name] = group_id
# account_group_includes
# groups_in_groups should be a dict of dicts, where the key is the larger
# group and the inner dict is a list of groups that are members of the
# larger group. So {'nova': {'nova-core': 1}}
for (group_name, subgroups) in groups_in_groups.items():
for subgroup_name in subgroups.keys():
try:
cur.execute("""insert into account_group_includes
(group_id, include_id)
values (%s, %s)""",
(group_ids[group_name], group_ids[subgroup_name]))
except MySQLdb.IntegrityError:
pass
# Make a list of implied group membership
# building a list which is the opposite of groups_in_group. Here
# group_implies_groups is a dict keyed by group_id containing a list of
# group_ids of implied membership. SO: if nova is 1 and nova-core is 2:
# {'2': [1]}
for group_id in group_ids.values():
total_groups = []
groups_todo = [group_id]
while len(groups_todo) > 0:
current_group = groups_todo.pop()
total_groups.append(current_group)
cur.execute("""select group_id from account_group_includes
where include_id = %s""", (current_group))
for row in cur.fetchall():
if row[0] != 1 and row[0] not in total_groups:
groups_todo.append(row[0])
group_implies_groups[group_id] = total_groups
if DEBUG:
def get_group_name(in_group_id):
for (group_name, group_id) in group_ids.items():
if group_id == in_group_id:
return group_name
print "groups in groups"
for (k,v) in groups_in_groups.items():
print k, v
print "group_imples_groups"
for (k, v) in group_implies_groups.items():
print get_group_name(k)
new_groups=[]
for val in v:
new_groups.append(get_group_name(val))
print "\t", new_groups
for (username, user_details) in users.items():
# accounts
account_id = None
if cur.execute("""select account_id from account_external_ids where
external_id in (%s)""", ("username:%s" % username)):
account_id = cur.fetchall()[0][0]
# We have this bad boy - all we need to do is update his group membership
else:
# We need details
member = launchpad.people[username]
if not member.is_team:
openid_consumer = consumer.Consumer(dict(id=randomString(16, '0123456789abcdef')), None)
openid_request = openid_consumer.begin("https://launchpad.net/~%s" % member.name)
user_details['openid_external_id'] = openid_request.endpoint.getLocalID()
# Handle username change
if cur.execute("""select account_id from account_external_ids where
external_id in (%s)""", user_details['openid_external_id']):
account_id = cur.fetchall()[0][0]
cur.execute("""update account_external_ids
set external_id=%s
where external_id like 'username%%'
and account_id = %s""",
('username:%s' % username, account_id))
else:
user_details['ssh_keys'] = ["%s %s %s" % (get_type(key.keytype), key.keytext, key.comment) for key in member.sshkeys]
email = None
try:
email = member.preferred_email_address.email
except ValueError:
pass
user_details['email'] = email
cur.execute("""insert into account_id (s) values (NULL)""");
cur.execute("select max(s) from account_id")
account_id = cur.fetchall()[0][0]
cur.execute("""insert into accounts (account_id, full_name, preferred_email) values
(%s, %s, %s)""", (account_id, username, user_details['email']))
# account_ssh_keys
for key in user_details['ssh_keys']:
cur.execute("""select ssh_public_key from account_ssh_keys where
account_id = %s""", account_id)
db_keys = [r[0].strip() for r in cur.fetchall()]
if key.strip() not in db_keys:
cur.execute("""select max(seq)+1 from account_ssh_keys
where account_id = %s""", account_id)
seq = cur.fetchall()[0][0]
if seq is None:
seq = 1
cur.execute("""insert into account_ssh_keys
(ssh_public_key, valid, account_id, seq)
values
(%s, 'Y', %s, %s)""",
(key.strip(), account_id, seq))
# account_external_ids
## external_id
if not cur.execute("""select account_id from account_external_ids
where account_id = %s and external_id = %s""",
(account_id, user_details['openid_external_id'])):
cur.execute("""insert into account_external_ids
(account_id, email_address, external_id)
values (%s, %s, %s)""",
(account_id, user_details['email'], user_details['openid_external_id']))
if not cur.execute("""select account_id from account_external_ids
where account_id = %s and external_id = %s""",
(account_id, "username:%s" % username)):
cur.execute("""insert into account_external_ids
(account_id, external_id) values (%s, %s)""",
(account_id, "username:%s" % username))
if user_details.get('email', None) is not None:
if not cur.execute("""select account_id from account_external_ids
where account_id = %s and external_id = %s""",
(account_id, "mailto:%s" % user_details['email'])):
cur.execute("""insert into account_external_ids
(account_id, email_address, external_id)
values (%s, %s, %s)""",
(account_id, user_details['email'], "mailto:%s" %
user_details['email']))
if account_id is not None:
# account_group_members
# user_details['add_groups'] is a list of group names for which the
# user is either "Approved" or "Administrator"
groups_to_add = []
groups_to_watch = {}
groups_to_rm = {}
for group in user_details['add_groups']:
# if you are in the group nova-core, that should also put you in nova
add_groups = group_implies_groups[group_ids[group]]
add_groups.append(group_ids[group])
for add_group in add_groups:
if add_group not in groups_to_add:
groups_to_add.append(add_group)
# We only want to add watches for direct project membership groups
groups_to_watch[group_ids[group]] = group
# groups_to_add is now the full list of all groups we think the user
# should belong to. we want to limit the users groups to this list
for group in groups:
if group_ids[group] not in groups_to_add:
if group not in groups_to_rm.values():
groups_to_rm[group_ids[group]] = group
for group_id in groups_to_add:
if not cur.execute("""select account_id from account_group_members
where account_id = %s and group_id = %s""",
(account_id, group_id)):
# The current user does not exist in the group. Add it.
cur.execute("""insert into account_group_members
(account_id, group_id)
values (%s, %s)""", (account_id, group_id))
os_project_name = groups_to_watch.get(group_id, None)
if os_project_name is not None:
if os_project_name.endswith("-core"):
os_project_name = os_project_name[:-5]
os_project_name = "openstack/%s" % os_project_name
if os_project_name in projects:
if not cur.execute("""select account_id
from account_project_watches
where account_id = %s
and project_name = %s""",
(account_id, os_project_name)):
cur.execute("""insert into account_project_watches
VALUES
("Y", "N", "N", %s, %s, "*")""",
(account_id, os_project_name))
for (group_id, group_name) in groups_to_rm.items():
cur.execute("""delete from account_group_members
where account_id = %s and group_id = %s""",
(account_id, group_id))
os_project_name = "openstack/%s" % group_name
if os_project_name in projects:
cur.execute("""delete from account_project_watches
where account_id=%s and project_name=%s""",
(account_id, os_project_name))
os.system("ssh -i %s -p29418 %s@localhost gerrit flush-caches" %
(GERRIT_SSH_KEY, GERRIT_USER))

View File

@@ -1,430 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Synchronize Gerrit users from Launchpad.
# TODO items:
# 1. add a temporary (instance level) object store for the launchpad class
# 2. split out the two classes into separate files to be used as a library
import os
import ConfigParser
import StringIO
import paramiko
import json
import logging
import uuid
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
from datetime import datetime
from openid.consumer import consumer
from openid.cryptutil import randomString
GERRIT_USER = os.environ.get('GERRIT_USER', 'launchpadsync')
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG',
'/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG',
'/home/gerrit2/review_site/etc/secure.config')
GERRIT_SSH_KEY = os.environ.get('GERRIT_SSH_KEY',
'/home/gerrit2/.ssh/launchpadsync_rsa')
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
GERRIT_BACKUP_PATH = os.environ.get('GERRIT_BACKUP_PATH',
'/home/gerrit2/dbupdates')
logging.basicConfig(format='%(asctime)-6s: %(name)s - %(levelname)s - %(message)s', filename='/var/log/gerrit/update_users.log')
logger= logging.getLogger('update_users')
logger.setLevel(logging.INFO)
for check_path in (os.path.dirname(GERRIT_CACHE_DIR),
os.path.dirname(GERRIT_CREDENTIALS),
GERRIT_BACKUP_PATH):
if not os.path.exists(check_path):
os.makedirs(check_path)
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
gerrit_config = get_broken_config(GERRIT_CONFIG)
secure_config = get_broken_config(GERRIT_SECURE_CONFIG)
DB_USER = gerrit_config.get("database", "username")
DB_PASS = secure_config.get("database","password")
DB_DB = gerrit_config.get("database","database")
def make_db_backup():
db_backup_file = "%s.%s.sql" % (DB_DB, datetime.isoformat(datetime.now()))
db_backup_path = os.path.join(GERRIT_BACKUP_PATH, db_backup_file)
retval = os.system("mysqldump --opt -u%s -p%s %s > %s" %
(DB_USER, DB_PASS, DB_DB, db_backup_path))
if retval != 0:
logger.error("Problem taking a db dump, aborting db update")
sys.exit(retval)
class LaunchpadAction(object):
def __init__(self):
logger.info('Connecting to Launchpad')
self.launchpad= Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
GERRIT_CACHE_DIR,
credentials_file = GERRIT_CREDENTIALS)
logger.info('Getting Launchpad teams')
self.lp_teams= self.get_all_sub_teams('openstack', [])
def get_all_sub_teams(self, team, have_teams):
for sub_team in self.launchpad.people[team].sub_teams:
if sub_team.name not in have_teams:
have_teams = self.get_all_sub_teams(sub_team.name, have_teams)
have_teams.append(team)
return have_teams
def get_sub_teams(self, team):
sub_teams= []
for sub_team in self.launchpad.people[team].sub_teams:
sub_teams.append(sub_team.name)
return sub_teams
def get_teams(self):
return self.lp_teams
def get_all_users(self):
logger.info('Getting Launchpad users')
users= []
for team in self.lp_teams:
for detail in self.launchpad.people[team].members_details:
if (detail.status == 'Approved' or detail.status == 'Administrator'):
name= detail.self_link.split('/')[-1]
if ((users.count(name) == 0) and (name not in self.lp_teams)):
users.append(name)
return users
def get_user_data(self, user):
return self.launchpad.people[user]
def get_team_members(self, team, gerrit):
users= []
for detail in self.launchpad.people[team].members_details:
if (detail.status == 'Approved' or detail.status == 'Administrator'):
name= detail.self_link.split('/')[-1]
# if we found a subteam
if name in self.lp_teams:
# check subteam for implied subteams
for implied_group in gerrit.get_implied_groups(name):
if implied_group in self.lp_teams:
users.extend(self.get_team_members(implied_group, gerrit))
users.extend(self.get_team_members(name, gerrit))
continue
users.append(name)
# check team for implied teams
for implied_group in gerrit.get_implied_groups(team):
if implied_group in self.lp_teams:
users.extend(self.get_team_members(implied_group, gerrit))
# filter out dupes
users= list(set(users))
return users
def get_team_watches(self, team):
users= []
for detail in self.launchpad.people[team].members_details:
if (detail.status == 'Approved' or detail.status == 'Administrator'):
name= detail.self_link.split('/')[-1]
if name in self.lp_teams:
continue
if users.count(name) == 0:
users.append(name)
return users
def get_team_display_name(self, team):
team_data = self.launchpad.people[team]
return team_data.display_name
class GerritAction(object):
def __init__(self):
logger.info('Connecting to Gerrit')
self.ssh= paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect('localhost', username=GERRIT_USER, port=29418, key_filename=GERRIT_SSH_KEY)
def cleanup(self):
logger.info('Closing connection to Gerrit')
self.ssh.close()
def run_query(self, query):
command= 'gerrit gsql --format JSON -c "{0}"'.format(query)
stdin, stdout, stderr= self.ssh.exec_command(command)
# trying to get stdout return code or stderr can hang with large result sets
# for line in stderr:
# logger.error(line)
return stdout
def get_groups(self):
logger.info('Getting Gerrit groups')
groups= []
query= "select name from account_groups"
stdout= self.run_query(query)
for line in stdout:
row= json.loads(line)
if row['type'] == 'row':
group= row['columns']['name']
groups.append(group)
return groups
def get_users(self):
logger.info('Getting Gerrit users')
users= []
query= "select external_id from account_external_ids"
stdout= self.run_query(query)
for line in stdout:
row= json.loads(line)
if row['type'] == 'row':
user= row['columns']['external_id'].replace('username:','')
users.append(user)
return users
def get_group_id(self, group_name):
query= "select group_id from account_groups where name='{0}'".format(group_name)
stdout= self.run_query(query)
line= stdout.readline()
row= json.loads(line)
if row['type'] == 'row':
return row['columns']['group_id']
else:
return 0
def get_user_id(self, user_name):
query= "select account_id from account_external_ids where external_id='username:{0}'".format(user_name)
stdout= self.run_query(query)
line= stdout.readline()
row= json.loads(line)
return row['columns']['account_id']
def get_users_from_group(self, group_name):
logger.info('Getting Gerrit users from group %s', group_name)
users= []
gid= self.get_group_id(group_name)
query= "select external_id from account_external_ids join account_group_members on account_group_members.account_id=account_external_ids.account_id where account_group_members.group_id={0} and external_id like 'username%%'".format(gid)
stdout= self.run_query(query)
for line in stdout:
row= json.loads(line)
if row['type'] == 'row':
user= row['columns']['external_id'].replace('username:','')
users.append(user)
return users
def get_users_from_watches(self, group_name):
logger.info('Getting Gerrit users from watch list %s', group_name)
users= []
if group_name.endswith("-core"):
group_name = group_name[:-5]
group_name = "openstack/{0}".format(group_name)
query= "select external_id from account_external_ids join account_project_watches on account_project_watches.account_id=account_external_ids.account_id where account_project_watches.project_name like '{0}' and external_id like 'username%%'".format(group_name)
stdout= self.run_query(query)
for line in stdout:
row= json.loads(line)
if row['type'] == 'row':
user= row['columns']['external_id'].replace('username:','')
users.append(user)
return users
def get_implied_groups(self, group_name):
gid= self.get_group_id(group_name)
groups= []
query= "select name from account_groups join account_group_includes on account_group_includes.include_id=account_groups.group_id where account_group_includes.group_id={0}".format(gid)
stdout= self.run_query(query)
for line in stdout:
row= json.loads(line)
if row['type'] == 'row':
group= row['columns']['name']
groups.append(group)
return groups
def add_group(self, group_name, group_display_name):
logger.info('New group %s (%s)', group_display_name, group)
query= "insert into account_group_id (s) values (NULL)"
stdout= self.run_query(query)
row= json.loads(stdout.readline())
if row['rowCount'] is not 1:
print "Could not get a new account group ID"
raise
query= "select max(s) from account_group_id"
stdout= self.run_query(query)
row= json.loads(stdout.readline())
gid= row['columns']['max(s)']
full_uuid= "{0}{1}".format(uuid.uuid4().hex, uuid.uuid4().hex[:8])
query= "insert into account_groups (group_id, group_type, owner_group_id, name, description, group_uuid) values ({0}, 'INTERNAL', 1, '{1}', '{2}', '{3}')". format(gid, group_name, group_display_name, full_uuid)
self.run_query(query)
query= "insert into account_group_names (group_id, name) values ({0}, '{1}')".format(gid, group_name)
self.run_query(query)
def add_user(self, user_name, user_data):
logger.info("Adding Gerrit user %s", user_name)
openid_consumer = consumer.Consumer(dict(id=randomString(16, '0123456789abcdef')), None)
openid_request = openid_consumer.begin("https://launchpad.net/~%s" % user_data.name)
user_openid_external_id = openid_request.endpoint.getLocalID()
query= "select account_id from account_external_ids where external_id in ('{0}')".format(user_openid_external_id)
stdout= self.run_query(query)
row= json.loads(stdout.readline())
if row['type'] == 'row':
# we have a result so this is an updated user name
account_id= row['columns']['account_id']
query= "update account_external_ids set external_id='{0}' where external_id like 'username%%' and account_id = {1}".format('username:%s' % user_name, account_id)
self.run_query(query)
else:
# we really do have a new user
user_ssh_keys= ["%s %s %s" % ('ssh-%s' % key.keytype.lower(), key.keytext, key.comment) for key in user_data.sshkeys]
user_email= None
try:
email = user_data.preferred_email_address.email
except ValueError:
pass
query= "insert into account_id (s) values (NULL)"
self.run_query(query)
query= "select max(s) from account_id"
stdout= self.run_query(query)
row= json.loads(stdout.readline())
uid= row['columns']['max(s)']
query= "insert into accounts (account_id, full_name, preferred_email) values ({0}, '{1}', '{2}')".format(uid, user_name, user_email)
self.run_query(query)
keyno= 1
for key in user_ssh_keys:
query= "insert into account_ssh_keys (ssh_public_key, valid, account_id, seq) values ('{0}', 'Y', {1}, {2})".format(key.strip(), uid, keyno)
self.run_query(query)
keyno = keyno + 1
query= "insert into account_external_ids (account_id, email_address, external_id) values ({0}, '{1}', '{2}')".format(uid, user_email, user_openid_external_id)
self.run_query(query)
query= "insert into account_external_ids (account_id, external_id) values ({0}, '{1}')".format(uid, "username:%s" % user_name)
self.run_query(query)
if user_email is not None:
query= "insert into account_external_ids (account_id, email_address, external_id) values ({0}. '{1}', '{2}')".format(uid, user_email, "mailto:%s" % user_email)
return None
def add_user_to_group(self, user_name, group_name):
logger.info("Adding Gerrit user %s to group %s", user_name, group_name)
uid= self.get_user_id(user_name)
gid= self.get_group_id(group_name)
if gid is 0:
print "Trying to add user {0} to non-existent group {1}".format(user_name, group_name)
raise
query= "insert into account_group_members (account_id, group_id) values ({0}, {1})".format(uid, gid)
self.run_query(query)
def add_user_to_watch(self, user_name, group_name):
logger.info("Adding Gerrit user %s to watch group %s", user_name, group_name)
uid= self.get_user_id(user_name)
if group_name.endswith("-core"):
group_name = group_name[:-5]
group_name = "openstack/{0}".format(group_name)
query= "insert into account_project_watches VALUES ('Y', 'N', 'N', {0}, '{1}', '*')". format(uid, group_name)
self.run_query(query)
def del_user_from_group(self, user_name, group_name):
logger.info("Deleting Gerrit user %s from group %s", user_name, group_name)
uid= self.get_user_id(user_name)
gid= self.get_group_id(group_name)
query= "delete from account_group_members where account_id = {0} and group_id = {1}".format(uid, gid)
self.run_query(query)
if group_name.endswith("-core"):
group_name = group_name[:-5]
group_name= "openstack/{0}".format(group_name)
query= "delete from account_project_watches where account_id = {0} and project_name= '{1}'".format(uid, group_name)
self.run_query(query)
def rebuild_sub_groups(self, group, sub_groups):
gid= self.get_group_id(group)
for sub_group in sub_groups:
sgid= self.get_group_id(sub_group)
query= "select group_id from account_group_includes where group_id={0} and include_id={1}".format(gid, sgid)
stdout= self.run_query(query)
row= json.loads(stdout.readline())
if row['type'] != 'row':
logger.info('Adding implied group %s to group %s', group, sub_group)
query= "insert into account_group_includes (group_id, include_id) values ({0}, {1})".format(gid, sgid)
self.run_query(query)
# Actual work starts here!
lp= LaunchpadAction()
gerrit= GerritAction()
logger.info('Making DB backup')
make_db_backup()
logger.info('Starting group reconcile')
lp_groups= lp.get_teams()
gerrit_groups= gerrit.get_groups()
group_diff= filter(lambda a: a not in gerrit_groups, lp_groups)
for group in group_diff:
group_display_name= lp.get_team_display_name(group)
gerrit.add_group(group, group_display_name)
for group in lp_groups:
sub_group= lp.get_sub_teams(group)
if sub_group:
gerrit.rebuild_sub_groups(group, sub_group)
logger.info('End group reconcile')
logger.info('Starting user reconcile')
lp_users= lp.get_all_users()
gerrit_users= gerrit.get_users()
user_diff= filter(lambda a: a not in gerrit_users, lp_users)
for user in user_diff:
gerrit.add_user(user, lp.get_user_data(user))
logger.info('End user reconcile')
logger.info('Starting user to group reconcile')
lp_groups= lp.get_teams()
for group in lp_groups:
# First find users to attach to groups
gerrit_group_users= gerrit.get_users_from_group(group)
lp_group_users= lp.get_team_members(group, gerrit)
group_diff= filter(lambda a: a not in gerrit_group_users, lp_group_users)
for user in group_diff:
gerrit.add_user_to_group(user, group)
# Second find users to attach to watches
lp_group_watches= lp.get_team_watches(group)
gerrit_group_watches= gerrit.get_users_from_watches(group)
group_diff= filter(lambda a: a not in gerrit_group_watches, lp_group_watches)
for user in group_diff:
gerrit.add_user_to_watch(user, group)
# Third find users to remove from groups/watches
group_diff= filter(lambda a: a not in lp_group_users, gerrit_group_users)
for user in group_diff:
gerrit.del_user_from_group(user, group)
logger.info('Ending user to group reconcile')
gerrit.cleanup()

221
gerritbot
View File

@@ -1,221 +0,0 @@
#! /usr/bin/env python
# The configuration file should look like:
"""
[ircbot]
nick=NICKNAME
pass=PASSWORD
channel=CHANNEL
server=irc.freenode.net
port=6667
[gerrit]
user=gerrit2
key=/path/to/id_rsa
host=review.example.com
port=29418
events=patchset-created, change-merged
branches=master
"""
import ircbot
import time
import subprocess
import threading
import select
import json
import sys
import ConfigParser
import daemon, daemon.pidlockfile
import traceback
class GerritBot(ircbot.SingleServerIRCBot):
def __init__(self, channel, nickname, password, server, port=6667):
if channel[0] != '#': channel = '#'+channel
ircbot.SingleServerIRCBot.__init__(self,
[(server, port)],
nickname, nickname)
self.channel = channel
self.nickname = nickname
self.password = password
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
c.privmsg("nickserv", "identify %s " % self.password)
c.privmsg("nickserv", "ghost %s %s" % (self.nickname, self.password))
c.privmsg("nickserv", "release %s %s" % (self.nickname, self.password))
time.sleep(1)
c.nick(self.nickname)
def on_welcome(self, c, e):
c.privmsg("nickserv", "identify %s "% self.password)
c.join(self.channel)
def send(self, msg):
self.connection.privmsg(self.channel, msg)
time.sleep(0.5)
class Gerrit(threading.Thread):
def __init__(self, ircbot, events, branches,
username, keyfile, server, port=29418):
threading.Thread.__init__(self)
self.ircbot = ircbot
self.events = events
self.branches = branches
self.username = username
self.keyfile = keyfile
self.server = server
self.port = port
self.proc = None
self.poll = select.poll()
def _open(self):
self.proc = subprocess.Popen(['/usr/bin/ssh', '-p', str(self.port),
'-i', self.keyfile,
'-l', self.username, self.server,
'gerrit', 'stream-events'],
bufsize=1,
stdin=None,
stdout=subprocess.PIPE,
stderr=None,
)
self.poll.register(self.proc.stdout)
def _close(self):
try:
self.poll.unregister(self.proc.stdout)
except:
pass
try:
self.proc.kill()
except:
pass
self.proc = None
def patchset_created(self, data):
if 'patchset-created' in self.events:
msg = '%s proposed a change to %s: %s %s' % (
data['patchSet']['uploader']['name'],
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
def comment_added(self, data):
if 'comment-added' in self.events:
msg = 'A comment has been added to a proposed change to %s: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
for approval in data.get('approvals', []):
if (approval['type'] == 'VRIF' and approval['value'] == '-1' and
'x-vrif-minus-1' in self.events):
msg = 'Verification of a change to %s failed: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
if (approval['type'] == 'VRIF' and approval['value'] == '1' and
'x-vrif-plus-1' in self.events):
msg = 'Verification of a change to %s succeeded: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
if (approval['type'] == 'CRVW' and approval['value'] == '-2' and
'x-crvw-minus-2' in self.events):
msg = 'A change to %s has been rejected: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
if (approval['type'] == 'CRVW' and approval['value'] == '2' and
'x-crvw-plus-2' in self.events):
msg = 'A change to %s has been approved: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
def change_merged(self, data):
if 'change-merged' in self.events:
msg = 'A change was merged to %s: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(msg)
def _read(self):
l = self.proc.stdout.readline()
data = json.loads(l)
# If branches is specified, ignore notifications for other branches
if self.branches and data['change']['branch'] not in self.branches:
return
if data['type'] == 'comment-added':
self.comment_added(data)
elif data['type'] == 'patchset-created':
self.patchset_created(data)
elif data['type'] == 'change-merged':
self.change_merged(data)
def _listen(self):
while True:
ret = self.poll.poll()
for (fd, event) in ret:
if fd == self.proc.stdout.fileno():
if event == select.POLLIN:
self._read()
else:
raise Exception("event on ssh connection")
def _run(self):
try:
if not self.proc:
self._open()
self._listen()
except:
traceback.print_exc()
self._close()
time.sleep(5)
def run(self):
time.sleep(5)
while True:
self._run()
def _main():
config=ConfigParser.ConfigParser()
config.read(sys.argv[1])
bot = GerritBot(config.get('ircbot', 'channel'),
config.get('ircbot', 'nick'),
config.get('ircbot', 'pass'),
config.get('ircbot', 'server'),
config.getint('ircbot', 'port'))
g = Gerrit(bot,
config.get('gerrit', 'events'),
config.get('gerrit', 'branches'),
config.get('gerrit', 'user'),
config.get('gerrit', 'key'),
config.get('gerrit', 'host'),
config.getint('gerrit', 'port'))
g.start()
bot.start()
def main():
if len(sys.argv) != 2:
print "Usage: %s CONFIGFILE" % sys.argv[0]
sys.exit(1)
pid = daemon.pidlockfile.TimeoutPIDLockFile("/var/run/gerritbot/gerritbot.pid", 10)
with daemon.DaemonContext(pidfile=pid):
_main()
if __name__ == "__main__":
main()

View File

@@ -1,149 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: gerritbot
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Gerrit IRC Bot
# Description: Announces Gerrit events to IRC
### END INIT INFO
# Author: James Blair <james.blair@rackspace.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="GerritBot"
NAME=gerritbot
DAEMON=/home/gerrit2/$NAME
DAEMON_ARGS="/home/gerrit2/gerritbot.config"
PIDFILE=/var/run/$NAME/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
USER=gerrit2
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
mkdir -p /var/run/$NAME
chown $USER /var/run/$NAME
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
# Add code here, if necessary, that waits for the process to be ready
# to handle requests from services started subsequently which depend
# on this one. As a last resort, sleep for some time.
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --signal 9 --pidfile $PIDFILE
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
rm -f /var/run/$NAME/*
return "$RETVAL"
}
#
# Function that sends a SIGHUP to the daemon/service
#
do_reload() {
#
# If the daemon can reload its configuration without
# restarting (for example, when it is sent a SIGHUP),
# then implement that here.
#
start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
return 0
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
#reload|force-reload)
#
# If do_reload() is not implemented then leave this commented out
# and leave 'force-reload' as an alias for 'restart'.
#
#log_daemon_msg "Reloading $DESC" "$NAME"
#do_reload
#log_end_msg $?
#;;
restart|force-reload)
#
# If the "reload" option is implemented then remove the
# 'force-reload' alias
#
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
#echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:

View File

@@ -1,116 +0,0 @@
from xml.sax.saxutils import escape
from contextlib import closing
import codecs
import simplejson
import urllib2
import os
import sys
import time
if len(sys.argv) != 3:
print "A team/user and a project/repo are required arguments"
sys.exit(1)
team = sys.argv[1]
project = sys.argv[2]
def fix_bad_time(bad_time):
# This is stupid, but time.strptime doesn't support %z in 2.6
#return "%s-%s-%sT%sZ%s:%s" % (bad_time[:4], bad_time[5:7], bad_time[8:10],
# bad_time[11:19], bad_time[20:23], bad_time[23:])
return "%s-%s-%sT%sZ" % (bad_time[:4], bad_time[5:7], bad_time[8:10],
bad_time[11:19])
# TODO: Fetch the files from the internets
issues = []
for issue_state in ("open", "closed"):
full_url = "http://github.com/api/v2/json/issues/list/%s/%s/%s" % (team,
project, issue_state)
with closing(urllib2.urlopen(full_url)) as issue_json:
these_issues = simplejson.load(issue_json)
issues.extend(these_issues['issues'])
users = {}
with open("gh_to_lp_users.json", "r") as users_json:
users = simplejson.load(users_json)
outfile_name = "%s_%s_lp_bugs.xml" % (team, project)
bugs_outfile = codecs.open(outfile_name, "w", "utf-8-sig")
bugs_outfile.write("""<?xml version="1.0"?>
<launchpad-bugs xmlns="https://launchpad.net/xmlns/2006/bugs">
""")
for issue in issues:
issue['body'] = escape(issue['body'])
issue['title'] = escape(issue['title'])
issue['lower_user'] = users.get(issue['user'], issue['user'].lower())
if issue['state'] == "open":
issue['status'] = "CONFIRMED"
else:
issue['status'] = "FIXRELEASED"
for bad_time in ('updated_at', 'created_at'):
issue[bad_time] = fix_bad_time(issue[bad_time])
bugs_outfile.write("""
<bug xmlns="https://launchpad.net/xmlns/2006/bugs" id="%(number)s">
<datecreated>%(created_at)s</datecreated>
<title>%(title)s</title>
<description>%(body)s</description>
<reporter name="%(lower_user)s" email="noreply@openstack.org">%(user)s</reporter>
<status>%(status)s</status>
<importance>HIGH</importance>
""" % issue)
bugs_outfile.write("<tags>\n")
bugs_outfile.write("<tag>%s</tag>\n" % project)
for label in issue['labels']:
bugs_outfile.write("<tag>%s</tag>\n" % label.lower())
bugs_outfile.write("</tags>\n")
bugs_outfile.write("""
<comment>
<sender name="%(lower_user)s" email="noreply@openstack.org">%(user)s</sender>
<date>%(created_at)s</date>
<title>%(title)s</title>
<text>%(body)s</text>
</comment>
""" % issue)
issue['comments'] = []
full_url = "http://github.com/api/v2/json/issues/comments/%s/%s/%s" % \
(team, project, issue['number'])
# github ratelimits v2 api to 60 calls per minute
time.sleep(1)
print full_url
with closing(urllib2.urlopen(full_url)) as comments_json:
try:
comments = simplejson.load(comments_json)
issue['comments'] = comments['comments']
except:
issue['comments'] = []
for comment in issue['comments']:
for bad_time in ('updated_at', 'created_at'):
comment[bad_time] = fix_bad_time(comment[bad_time])
comment['body'] = escape(comment['body'])
comment['lower_user'] = users.get(comment['user'],
comment['user'].lower())
try:
bugs_outfile.write("""
<comment>
<sender name="%(lower_user)s" email="noreply@openstack.org">%(user)s</sender>
<date>%(created_at)s</date>
<text>%(body)s</text>
</comment>""" % comment)
except:
print comment
sys.exit(1)
bugs_outfile.write("\n</bug>\n")
bugs_outfile.write("\n</launchpad-bugs>\n")
bugs_outfile.close()
os.system("rnv bug-export.rnc %s" % outfile_name)

View File

@@ -1,20 +0,0 @@
{
"YorikSar": "yorik-sar",
"ziadsawalha": "ziad-sawalha",
"anotherjesse": "anotherjess",
"jakedahn": "jakedahn",
"heckj": "heckj",
"jaypipes": "jaypipes",
"anark": "anark",
"xtoddx": "xtoddx",
"dolph": "dolph",
"dtroyer": "dtroyer",
"yogirackspace": "yogesh-srikrishnan",
"robin-norwood": "robin-norwood",
"gholt": "gholt",
"mgius": "mgius",
"sleepsonthefloor": "sleepsonthefloor",
"klmitch": "klmitch",
"emonty": "mordred",
"jeblair": "jeblair"
}

View File

@@ -1,145 +0,0 @@
import MySQLdb
import pickle
import uuid
import os
import StringIO
import ConfigParser
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG','/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG','/home/gerrit2/review_site/etc/secure.config')
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
gerrit_config = get_broken_config(GERRIT_CONFIG)
secure_config = get_broken_config(GERRIT_SECURE_CONFIG)
conn = MySQLdb.connect(user=gerrit_config.get("database","username"),
passwd=secure_config.get("database","password"),
db=gerrit_config.get("database","database"))
cur = conn.cursor()
users={}
groups={}
groups_in_groups={}
group_ids={}
with open("users.pickle","r") as users_file:
(users, groups, groups_in_groups) = pickle.load(users_file)
# squish in unknown groups
for (k,v) in groups_in_groups.items():
for g in v:
if g not in groups.keys():
groups[g] = None
# account_groups
for (k,v) in groups.items():
if cur.execute("select group_id from account_groups where name = %s", k):
group_ids[k] = cur.fetchall()[0][0]
else:
cur.execute("""insert into account_group_id (s) values (NULL)""");
cur.execute("select max(s) from account_group_id")
group_id = cur.fetchall()[0][0]
# Match the 40-char 'uuid' that java is producing
group_uuid = uuid.uuid4()
second_uuid = uuid.uuid4()
full_uuid = "%s%s" % (group_uuid.hex, second_uuid.hex[:8])
cur.execute("""insert into account_groups
(group_id, group_type, owner_group_id,
name, description, group_uuid)
values
(%s, 'INTERNAL', 1, %s, %s, %s)""",
(group_id, k,v, full_uuid))
cur.execute("""insert into account_group_names (group_id, name) values
(%s, %s)""",
(group_id, k))
group_ids[k] = group_id
# account_group_includes
for (k,v) in groups_in_groups.items():
for g in v:
try:
cur.execute("""insert into account_group_includes
(group_id, include_id)
values (%s, %s)""",
(group_ids[k], group_ids[g]))
except MySQLdb.IntegrityError:
pass
for (k,v) in users.items():
# accounts
account_id = None
if cur.execute("""select account_id from account_external_ids where
external_id in (%s, %s)""", (v['openid_external_id'], "username:%s" % k)):
account_id = cur.fetchall()[0][0]
else:
cur.execute("""insert into account_id (s) values (NULL)""");
cur.execute("select max(s) from account_id")
account_id = cur.fetchall()[0][0]
cur.execute("""insert into accounts (account_id, full_name, preferred_email) values
(%s, %s, %s)""", (account_id, v['name'],v['email']))
# account_ssh_keys
for key in v['ssh_keys']:
cur.execute("""select ssh_public_key from account_ssh_keys where
account_id = %s""", account_id)
db_keys = [r[0].strip() for r in cur.fetchall()]
if key.strip() not in db_keys:
cur.execute("""select max(seq)+1 from account_ssh_keys
where account_id = %s""", account_id)
seq = cur.fetchall()[0][0]
if seq is None:
seq = 1
cur.execute("""insert into account_ssh_keys
(ssh_public_key, valid, account_id, seq)
values
(%s, 'Y', %s, %s)""",
(key.strip(), account_id, seq))
# account_external_ids
## external_id
if not cur.execute("""select account_id from account_external_ids
where account_id = %s and external_id = %s""",
(account_id, v['openid_external_id'])):
cur.execute("""insert into account_external_ids
(account_id, email_address, external_id)
values (%s, %s, %s)""",
(account_id, v['email'], v['openid_external_id']))
if not cur.execute("""select account_id from account_external_ids
where account_id = %s and external_id = %s""",
(account_id, "username:%s" % k)):
cur.execute("""insert into account_external_ids
(account_id, external_id) values (%s, %s)""",
(account_id, "username:%s" % k))
# account_group_memebers
for group in v['add_groups']:
if not cur.execute("""select account_id from account_group_members
where account_id = %s and group_id = %s""",
(account_id, group_ids[group])):
cur.execute("""insert into account_group_members
(account_id, group_id)
values (%s, %s)""", (account_id, group_ids[group]))
for group in v['rm_groups']:
cur.execute("""delete from account_group_members
where account_id = %s and group_id = %s""",
(account_id, group_ids[group]))

View File

@@ -1,161 +0,0 @@
#!/usr/bin/env python
# Turn over a devstack configured machine to the developer who
# proposed the change that is being tested.
# Copyright (C) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
from libcloud.dns.types import Provider as DnsProvider
from libcloud.dns.types import RecordType
from libcloud.dns.providers import get_driver as dns_get_driver
import os, sys
import getopt
import paramiko
CLOUD_SERVERS_DRIVER = os.environ.get('CLOUD_SERVERS_DRIVER','rackspace')
CLOUD_SERVERS_USERNAME = os.environ['CLOUD_SERVERS_USERNAME']
CLOUD_SERVERS_API_KEY = os.environ['CLOUD_SERVERS_API_KEY']
CLOUD_SERVERS_HOST = os.environ.get('CLOUD_SERVERS_HOST', None)
CLOUD_SERVERS_PATH = os.environ.get('CLOUD_SERVERS_PATH', None)
DNS_CLOUD_SERVERS_USERNAME = os.environ.get('DNS_CLOUD_SERVERS_USERNAME',
CLOUD_SERVERS_USERNAME)
DNS_CLOUD_SERVERS_API_KEY = os.environ.get('DNS_CLOUD_SERVERS_API_KEY',
CLOUD_SERVERS_API_KEY)
def ssh(action, x):
stdin, stdout, stderr = client.exec_command(x)
print x
output = ''
for x in stdout:
output += x
sys.stdout.write(x)
ret = stdout.channel.recv_exit_status()
print stderr.read()
if ret:
raise Exception("Unable to %s" % action)
return output
def scp(source, dest):
print 'copy', source, dest
ftp = client.open_sftp()
ftp.put(source, dest)
ftp.close()
(option_pairs, args) = getopt.getopt(sys.argv[1:], '', ["image=", "nodns"])
DNS = True
for o,v in option_pairs:
if o=='--nodns':
DNS = False
if len(args) == 0:
print "Node Name required!"
sys.exit(1)
host_name = args[0]
node_name = "%s.openstack.org" % host_name
node_size = '3'
image_name = 'Ubuntu 11.10'
if CLOUD_SERVERS_DRIVER == 'rackspace':
for (name, value) in option_pairs:
if name == "--image":
image_name = value
Driver = get_driver(Provider.RACKSPACE)
conn = Driver(CLOUD_SERVERS_USERNAME, CLOUD_SERVERS_API_KEY)
images = conn.list_images()
size = [sz for sz in conn.list_sizes() if sz.id == node_size][0]
image = [img for img in conn.list_images() if img.name == image_name][0]
elif CLOUD_SERVERS_DRIVER == 'eucalyptus':
node_type = 'ami-000004da'
node_size = 'standard.small'
Driver = get_driver(Provider.EUCALYPTUS)
conn = Driver(CLOUD_SERVERS_USERNAME, CLOUD_SERVERS_API_KEY,
host=CLOUD_SERVERS_HOST, path=CLOUD_SERVERS_PATH)
image = NodeImage(id=node_type, name="", driver="")
size = NodeSize(id=node_size, name="", ram=None, disk=None,
bandwidth=None, price=None, driver="")
# a task that first installs the ssh key, and then runs the script
if CLOUD_SERVERS_DRIVER == 'rackspace':
# read your public key in
keypath = os.path.expanduser("~/.ssh/id_rsa.pub")
if not os.path.exists(keypath):
keypath = os.path.expanduser("~/.ssh/authorized_keys")
sd = SSHKeyDeployment(open(keypath).read())
else:
private_key_path = os.path.expanduser("~/.ssh/%s.pem" % node_name)
if not os.path.exists(private_key_path):
resp = conn.ex_create_keypair(name=node_name)
key_material = resp.get('keyMaterial')
if not key_material:
print "Couldn't create keypair"
sys.exit(1)
with open(private_key_path, 'w') as private_key:
private_key.write(key_material + '\n')
os.chmod(private_key_path, 0600)
# deploy_node takes the same base keyword arguments as create_node.
if CLOUD_SERVERS_DRIVER == 'rackspace':
print "Deploying %s" % node_name
node = conn.deploy_node(name=node_name, image=image, size=size, deploy=sd)
else:
node = conn.create_node(name=node_name, image=image, size=size,
ex_keyname=node_name, ex_userdata=launch_script)
if DNS:
dns_provider = dns_get_driver(DnsProvider.RACKSPACE_US)
dns_ctx = dns_provider(DNS_CLOUD_SERVERS_USERNAME,
DNS_CLOUD_SERVERS_API_KEY)
host_shortname= host_name
domain = [z for z in dns_ctx.list_zones() if z.domain == 'openstack.org'][0]
records = [z for z in domain.list_records() if z == host_shortname]
if len(records) == 0:
domain.create_record(host_shortname, RecordType.A, node.public_ip[0])
else:
records[0].update(data=node.public_ip[0])
with open("%s.node.sh" % node_name,"w") as node_file:
node_file.write("ipAddr=%s\n" % node.public_ip[0])
node_file.write("nodeId=%s\n" % node.id)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(node.public_ip[0])
if CLOUD_SERVERS_DRIVER == 'eucalyptus':
ssh("set hostname", "hostname %s" % node_name)
ssh("update apt cache", "apt-get update")
ssh("upgrading system packages", "apt-get -y --force-yes upgrade")
ssh("install git and puppet", "apt-get install -y --force-yes git puppet")
ssh("clone puppret repo",
"git clone https://review.openstack.org/p/openstack/openstack-ci-puppet.git /root/openstack-ci-puppet")
ssh("run puppet", "puppet apply --modulepath=/root/openstack-ci-puppet/modules /root/openstack-ci-puppet/manifests/site.pp")
client.close()

View File

@@ -1,42 +0,0 @@
import os, sys, time
import getopt
import clouddns
import openstack.compute
USERNAME = os.environ['CLOUD_SERVERS_USERNAME']
API_KEY = os.environ['CLOUD_SERVERS_API_KEY']
compute = openstack.compute.Compute(username=USERNAME, apikey=API_KEY, cloud_api='RACKSPACE')
(option_pairs, args) = getopt.getopt(sys.argv[1:], [], ["distro="])
if len(args) == 0:
print "Node Name required!"
sys.exit(1)
node_name = args[0]
node = compute.servers.create(name=node_name, image=15330720, flavor=3)
while node.status != u'ACTIVE':
print "sleep"
time.sleep(1)
node = compute.servers.get(node.id)
dns_ctx = clouddns.connection.Connection(USERNAME,
API_KEY)
domain_name = ".".join(node_name.split(".")[-2:])
domain = dns_ctx.get_domain(name=domain_name)
try:
record = domain.get_record(name=node_name)
except:
record = None
if record is None:
domain.create_record(node_name, node.public_ip, "A")
else:
record.update(data=node.public_ip)
print "node ip", node.public_ip
with open("%s.node.sh" % node_name,"w") as node_file:
node_file.write("ipAddr=%s\n" % node.public_ip)
node_file.write("nodeId=%s\n" % node.id)

View File

@@ -1,164 +0,0 @@
#!/usr/bin/env python
# Turn over a devstack configured machine to the developer who
# proposed the change that is being tested.
# Copyright (C) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
from libcloud.dns.types import Provider as DnsProvider
from libcloud.dns.types import RecordType
from libcloud.dns.providers import get_driver as dns_get_driver
import os, sys
import getopt
import paramiko
CLOUD_SERVERS_DRIVER = os.environ.get('CLOUD_SERVERS_DRIVER','rackspace')
CLOUD_SERVERS_USERNAME = os.environ['CLOUD_SERVERS_USERNAME']
CLOUD_SERVERS_API_KEY = os.environ['CLOUD_SERVERS_API_KEY']
CLOUD_SERVERS_HOST = os.environ.get('CLOUD_SERVERS_HOST', None)
CLOUD_SERVERS_PATH = os.environ.get('CLOUD_SERVERS_PATH', None)
DNS_CLOUD_SERVERS_USERNAME = os.environ.get('DNS_CLOUD_SERVERS_USERNAME',
CLOUD_SERVERS_USERNAME)
DNS_CLOUD_SERVERS_API_KEY = os.environ.get('DNS_CLOUD_SERVERS_API_KEY',
CLOUD_SERVERS_API_KEY)
def ssh(action, x):
stdin, stdout, stderr = client.exec_command(x)
print x
output = ''
for x in stdout:
output += x
sys.stdout.write(x)
ret = stdout.channel.recv_exit_status()
print stderr.read()
if ret:
raise Exception("Unable to %s" % action)
return output
def scp(source, dest):
print 'copy', source, dest
ftp = client.open_sftp()
ftp.put(source, dest)
ftp.close()
(option_pairs, args) = getopt.getopt(sys.argv[1:], '', ["image=", "nodns"])
DNS = True
for o,v in option_pairs:
if o=='--nodns':
DNS = False
if len(args) == 0:
print "Node Name required!"
sys.exit(1)
host_name = args[0]
node_name = "%s.slave.openstack.org" % host_name
node_size = '3'
image_name = 'Ubuntu 11.10'
if CLOUD_SERVERS_DRIVER == 'rackspace':
for (name, value) in option_pairs:
if name == "--image":
image_name = value
Driver = get_driver(Provider.RACKSPACE)
conn = Driver(CLOUD_SERVERS_USERNAME, CLOUD_SERVERS_API_KEY)
images = conn.list_images()
size = [sz for sz in conn.list_sizes() if sz.id == node_size][0]
image = [img for img in conn.list_images() if img.name == image_name][0]
elif CLOUD_SERVERS_DRIVER == 'eucalyptus':
node_type = 'ami-000004da'
node_size = 'standard.small'
Driver = get_driver(Provider.EUCALYPTUS)
conn = Driver(CLOUD_SERVERS_USERNAME, CLOUD_SERVERS_API_KEY,
host=CLOUD_SERVERS_HOST, path=CLOUD_SERVERS_PATH)
image = NodeImage(id=node_type, name="", driver="")
size = NodeSize(id=node_size, name="", ram=None, disk=None,
bandwidth=None, price=None, driver="")
# a task that first installs the ssh key, and then runs the script
if CLOUD_SERVERS_DRIVER == 'rackspace':
# read your public key in
keypath = os.path.expanduser("~/.ssh/id_rsa.pub")
if not os.path.exists(keypath):
keypath = os.path.expanduser("~/.ssh/authorized_keys")
sd = SSHKeyDeployment(open(keypath).read())
else:
private_key_path = os.path.expanduser("~/.ssh/%s.pem" % node_name)
if not os.path.exists(private_key_path):
resp = conn.ex_create_keypair(name=node_name)
key_material = resp.get('keyMaterial')
if not key_material:
print "Couldn't create keypair"
sys.exit(1)
with open(private_key_path, 'w') as private_key:
private_key.write(key_material + '\n')
os.chmod(private_key_path, 0600)
# deploy_node takes the same base keyword arguments as create_node.
if CLOUD_SERVERS_DRIVER == 'rackspace':
print "Deploying %s" % node_name
node = conn.deploy_node(name=node_name, image=image, size=size, deploy=sd)
else:
node = conn.create_node(name=node_name, image=image, size=size,
ex_keyname=node_name, ex_userdata=launch_script)
if DNS:
dns_provider = dns_get_driver(DnsProvider.RACKSPACE_US)
dns_ctx = dns_provider(DNS_CLOUD_SERVERS_USERNAME,
DNS_CLOUD_SERVERS_API_KEY)
host_shortname= "%s.slave" % host_name
domain = [z for z in dns_ctx.list_zones() if z.domain == 'openstack.org'][0]
records = [z for z in domain.list_records() if z == host_shortname]
if len(records) == 0:
domain.create_record(host_shortname, RecordType.A, node.public_ip[0])
else:
records[0].update(data=node.public_ip[0])
with open("%s.node.sh" % node_name,"w") as node_file:
node_file.write("ipAddr=%s\n" % node.public_ip[0])
node_file.write("nodeId=%s\n" % node.id)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(node.public_ip[0])
if CLOUD_SERVERS_DRIVER == 'eucalyptus':
ssh("set hostname", "hostname %s" % node_name)
ssh("update apt cache", "apt-get update")
ssh("upgrading system packages", "apt-get -y --force-yes upgrade")
ssh("install git and puppet", "apt-get install -y --force-yes git puppet")
ssh("clone puppret repo",
"git clone https://review.openstack.org/p/openstack/openstack-ci-puppet.git /root/openstack-ci-puppet")
for key in ("slave_private_key", "slave_gpg_key", "slave_tarmac_key",
"glance_s3.conf", "glance_swift.conf"):
scp(key, "/root/openstack-ci-puppet/modules/jenkins_slave/files/%s" % key)
ssh("run puppet", "puppet apply --modulepath=/root/openstack-ci-puppet/modules /root/openstack-ci-puppet/manifests/site.pp")
client.close()

View File

@@ -1,2 +0,0 @@
nova: add-apt-repository ppa:nova-core/trunk ; apt-get update ; apt-get
build-dep nova

View File

@@ -1,38 +0,0 @@
#!/bin/sh
set -e
if [ -z "$PROJECT" ]
then
echo '$PROJECT not set.'
exit 1
fi
VERSIONDIR="$HOME/versions"
RECORDFILE="$VERSIONDIR/tarballversions"
if [ ! -d "$VERSIONDIR" ]
then
bzr co bzr://jenkins.openstack.org/ "$VERSIONDIR"
else
( cd $VERSIONDIR ; bzr up )
fi
SEPARATOR=${SEPARATOR:-'~'}
revno=$(bzr revno)
datestamp="$(date +%Y%m%d)"
if grep "^$PROJECT $revno$" "$RECORDFILE";
then
echo "Tarball already built. Not rebuilding."
exit 0
fi
python setup.py sdist
tarball=$(echo dist/*.tar.gz)
mv "$tarball" "dist/$(basename $tarball .tar.gz)${SEPARATOR}bzr${revno}.tar.gz"
echo "$PROJECT $revno" >> "$RECORDFILE"
sort "$RECORDFILE" > "$RECORDFILE".tmp
mv "$RECORDFILE".tmp "$RECORDFILE"
( cd $VERSIONDIR ; bzr up ; bzr commit -m"Added $PROJECT $snapshotversion" )

View File

@@ -1,93 +0,0 @@
#!/bin/sh
set -e
if [ -z "$PROJECT" ]
then
echo '$PROJECT not set.'
exit 1
fi
HUDSON=http://localhost:8080/
VERSIONDIR=$HOME/versions
PKGRECORDFILE=$VERSIONDIR/pkgversions
# We keep packaging for openstack trunk in lp:~o-u-p/$project/ubuntu
# For a release (diablo, essex), it's in lp:~o-u-p/$project/$release
OPENSTACK_RELEASE=${OPENSTACK_RELEASE:-ubuntu}
BZR_BRANCH=${BZR_BRANCH:-lp:~openstack-ubuntu-packagers/$PROJECT/${OPENSTACK_RELEASE}}
PPAS=${PPAS:-ppa:$PROJECT-core/trunk}
PACKAGING_REVNO=${PACKAGING_REVNO:--1}
series=${series:-lucid}
if [ ! -d "$VERSIONDIR" ]
then
bzr co bzr://jenkins.openstack.org/ "$VERSIONDIR"
else
( cd $VERSIONDIR ; bzr up )
fi
cd build
tarball="$(echo dist/$PROJECT*.tar.gz)"
version="${tarball%.tar.gz}"
version="${version#*$PROJECT-}"
base_version=$version
if [ -n "${EXTRAVERSION}" ]
then
version="${version%~*}${EXTRAVERSION}~${version#*~}"
fi
tar xvzf "${tarball}"
echo ln -s "${tarball}" "${PROJECT}_${version}.orig.tar.gz"
ln -s "${tarball}" "${PROJECT}_${version}.orig.tar.gz"
# Overlay packaging
# (Intentionally using the natty branch. For these PPA builds, we don't need to diverge
# (yet, at least), so it makes the branch management easier this way.
# Note: Doing a checkout and deleting .bzr afterwards instead of just doing an export,
# because export refuses to overlay over an existing directory, so this was easier.
# (We need to not have the .bzr in there, otherwise vcsversion.py might get overwritten)
echo bzr checkout -r ${PACKAGING_REVNO} --lightweight $BZR_BRANCH $PROJECT-*
bzr checkout -r ${PACKAGING_REVNO} --lightweight $BZR_BRANCH $PROJECT-*
cd $PROJECT-*
if [ -d .git ]
then
PACKAGING_REVNO="$(git log --oneline | wc -l)"
rm -rf .git
else
PACKAGING_REVNO="$(bzr revno --tree)"
rm -rf .bzr
fi
# Please don't change this. It's the only way I'll get notified
# if an upload fails.
export DEBFULLNAME="Soren Hansen"
export DEBEMAIL="soren@openstack.org"
buildno=1
while true
do
pkgversion="${version}-0ubuntu0ppa1~${series}${buildno}"
if grep "$PROJECT $pkgversion" "$PKGRECORDFILE"
then
echo "We've already built a $pkgversion of $PROJECT. Incrementing build number."
buildno=$(($buildno + 1))
else
echo "$PROJECT $pkgversion" >> "$PKGRECORDFILE"
sort "$PKGRECORDFILE" > "$PKGRECORDFILE".tmp
mv "$PKGRECORDFILE".tmp "$PKGRECORDFILE"
( cd $VERSIONDIR ;
bzr up ;
bzr commit -m"Added $PROJECT $snapshotversion" )
break
fi
done
dch -b --force-distribution --v "${pkgversion}" "Automated PPA build. Packaging revision: ${PACKAGING_REVNO}." -D $series
dpkg-buildpackage -rfakeroot -S -sa -nc -k32EE128C
if ! [ "$DO_UPLOAD" = "no" ]
then
for ppa in $PPAS
do
dput --force $ppa "../${PROJECT}_${pkgversion}_source.changes"
done
fi
cd ..

View File

@@ -1,18 +0,0 @@
for slave in \
burrow \
burrow-java \
dashboard \
glance \
keystone \
libburrow \
nova \
manuals \
openstack-ci \
swift
do
source ${slave}.slave.openstack.org.node.sh
cloudservers delete ${nodeId}
python launch_slave.py ${slave}.slave.openstack.org `echo ${slave} | tr - _`
source ${slave}.slave.openstack.org.node.sh
echo ${slave} IP: ${ipAddr}
done

View File

@@ -1,107 +0,0 @@
import os
import sys
import uuid
import os
import subprocess
from datetime import datetime
import StringIO
import ConfigParser
import MySQLdb
GERRIT_USER = os.environ.get('GERRIT_USER', 'launchpadsync')
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG',
'/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG',
'/home/gerrit2/review_site/etc/secure.config')
GERRIT_SSH_KEY = os.environ.get('GERRIT_SSH_KEY',
'/home/gerrit2/.ssh/launchpadsync_rsa')
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
GERRIT_BACKUP_PATH = os.environ.get('GERRIT_BACKUP_PATH',
'/home/gerrit2/dbupdates')
for check_path in (os.path.dirname(GERRIT_CACHE_DIR),
os.path.dirname(GERRIT_CREDENTIALS),
GERRIT_BACKUP_PATH):
if not os.path.exists(check_path):
os.makedirs(check_path)
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
def get_type(in_type):
if in_type == "RSA":
return "ssh-rsa"
else:
return "ssh-dsa"
gerrit_config = get_broken_config(GERRIT_CONFIG)
secure_config = get_broken_config(GERRIT_SECURE_CONFIG)
DB_USER = gerrit_config.get("database", "username")
DB_PASS = secure_config.get("database","password")
DB_DB = gerrit_config.get("database","database")
db_backup_file = "%s.%s.sql" % (DB_DB, datetime.isoformat(datetime.now()))
db_backup_path = os.path.join(GERRIT_BACKUP_PATH, db_backup_file)
retval = os.system("mysqldump --opt -u%s -p%s %s > %s" %
(DB_USER, DB_PASS, DB_DB, db_backup_path))
if retval != 0:
print "Problem taking a db dump, aborting db update"
sys.exit(retval)
projects = None
if len(sys.argv) > 1:
projects = ["openstack/%s" % sys.argv[1]]
else:
projects = subprocess.check_output(['/usr/bin/ssh', '-p', '29418',
'-i', GERRIT_SSH_KEY,
'-l', GERRIT_USER, 'localhost',
'gerrit', 'ls-projects']).split('\n')
conn = MySQLdb.connect(user = DB_USER, passwd = DB_PASS, db = DB_DB)
cur = conn.cursor()
cur.execute("select name, group_id from account_groups")
for (group_name, group_id) in cur.fetchall():
os_project_name = 'openstack/%s' % group_name
if os_project_name in projects:
# Grab all of the groups that are included in this group too.
total_groups = []
groups_todo = [group_id]
while len(groups_todo) > 0:
current_group = groups_todo.pop()
total_groups.append(current_group)
cur.execute("""select include_id from account_group_includes
where group_id = %s""", (current_group))
for row in cur.fetchall():
if row[0] != 1 and row[0] not in total_groups:
groups_todo.append(row[0])
for current_group in total_groups:
cur.execute("""insert into account_project_watches
select "Y", "N", "N", g.account_id, %s, "*"
from account_group_members g
where g.group_id = %s and g.account_id not in
(select w.account_id from
account_project_watches w
where g.account_id = w.account_id and
w.project_name = %s)""",
(os_project_name, current_group, os_project_name))
os.system("ssh -i %s -p29418 %s@localhost gerrit flush-caches" %
(GERRIT_SSH_KEY, GERRIT_USER))

View File

@@ -1,21 +0,0 @@
import datetime
from setuptools import setup
from sphinx.setup_command import BuildDoc
ci_cmdclass={}
class local_BuildDoc(BuildDoc):
def run(self):
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
BuildDoc.run(self)
ci_cmdclass['build_sphinx'] = local_BuildDoc
setup(name='nova',
version="%d.%02d" % (datetime.datetime.now().year, datetime.datetime.now().month),
description="OpenStack Continuous Integration Scripts",
author="OpenStack CI Team",
author_email="openstack-ci@lists.launchpad.net",
url="http://launchpad.net/openstack-ci",
cmdclass=ci_cmdclass)

View File

@@ -1,5 +0,0 @@
#!/bin/bash
for host in $HEAD_HOST ${COMPUTE_HOSTS//,/ }; do
cp /var/log/orchestra/rsyslog/$host/syslog $WORKSPACE/logs/$host-syslog.txt
done

View File

@@ -1,44 +0,0 @@
#!/bin/bash -x
WORKSPACE=`pwd`
mkdir -p logs
rm -f logs/*
cd `dirname "$0"`
echo "Jenkins: resetting hosts..."
for host in $HEAD_HOST ${COMPUTE_HOSTS//,/ }; do
scp lvm-kexec-reset.sh root@$host:/var/tmp/
ssh root@$host /var/tmp/lvm-kexec-reset.sh
sudo rm -f /var/log/orchestra/rsyslog/$host/syslog
done
# Have rsyslog reopen log files we rm'd from under it
sudo restart rsyslog
# wait for the host to come up (2 ping responses or timeout after 5 minutes)
echo "Jenkins: Waiting for head host to return after reset..."
sleep 10
if ! timeout 300 ./ping.py $HEAD_HOST; then
echo "Jenkins: ERROR: Head node did not come back up after reset"
exit 1
fi
echo "Jenkins: Pre-populating PIP cache"
for host in $HEAD_HOST ${COMPUTE_HOSTS//,/ }; do
scp -r ~/cache/pip root@$host:/var/cache/pip
done
echo "Jenkins: Caching images."
cd ~/devstack
source stackrc
for image_url in ${IMAGE_URLS//,/ }; do
# Downloads the image (uec ami+aki style), then extracts it.
IMAGE_FNAME=`echo "$image_url" | python -c "import sys; print sys.stdin.read().split('/')[-1]"`
IMAGE_NAME=`echo "$IMAGE_FNAME" | python -c "import sys; print sys.stdin.read().split('.tar.gz')[0].split('.tgz')[0]"`
if [ ! -f files/$IMAGE_FNAME ]; then
wget -c $image_url -O files/$IMAGE_FNAME
fi
done
echo "Jenkins: Executing build_bm_multi.sh."
./tools/build_bm_multi.sh

View File

@@ -1,22 +0,0 @@
#!/bin/bash
set -x
sudo cobbler sync
sudo cobbler system edit --netboot-enabled=Y --name=baremetal1
sudo cobbler system edit --netboot-enabled=Y --name=baremetal2
sudo cobbler system edit --netboot-enabled=Y --name=baremetal3
sudo cobbler system edit --netboot-enabled=Y --name=baremetal4
sudo cobbler system edit --netboot-enabled=Y --name=baremetal5
sudo cobbler system edit --netboot-enabled=Y --name=baremetal6
sudo cobbler system edit --netboot-enabled=Y --name=baremetal7
sudo cobbler system edit --netboot-enabled=Y --name=baremetal8
sudo cobbler system edit --netboot-enabled=Y --name=baremetal9
sudo cobbler system reboot --name=baremetal1
sudo cobbler system reboot --name=baremetal2
sudo cobbler system reboot --name=baremetal3
sudo cobbler system reboot --name=baremetal4
sudo cobbler system reboot --name=baremetal5
sudo cobbler system reboot --name=baremetal6
sudo cobbler system reboot --name=baremetal7
sudo cobbler system reboot --name=baremetal8
sudo cobbler system reboot --name=baremetal9

View File

@@ -1,31 +0,0 @@
#!/bin/bash -xe
# Make sure there is a location on this builder to cache pip downloads
mkdir -p ~/cache/pip
export PIP_DOWNLOAD_CACHE=~/cache/pip
# Start with a clean slate
rm -fr jenkins_venvs
mkdir -p jenkins_venvs
# Update the list of remote refs to pick up new branches
git remote update
# Build a venv for every known branch
for branch in `git branch -r |grep "origin/"|grep -v HEAD|sed "s/origin\///"`
do
echo "Building venv for $branch"
git checkout $branch
mkdir -p jenkins_venvs/$branch
python tools/install_venv.py
virtualenv --relocatable .venv
if [ -e tools/test-requires ]
then
pip bundle .cache.bundle -r tools/pip-requires -r tools/test-requires
else
pip bundle .cache.bundle -r tools/pip-requires
fi
tar cvfz jenkins_venvs/$branch/venv.tgz .venv .cache.bundle
rm -fr .venv .cache.bundle
done
git checkout master

View File

@@ -1,26 +0,0 @@
#!/bin/bash -xe
# Support jobs, such as nova-docs, which are not yet triggered by gerrit
if [ "x$GERRIT_BRANCH" = "x" ] ; then
GERRIT_BRANCH=master
fi
mv jenkins_venvs/$GERRIT_BRANCH/venv.tgz .
rm -fr jenkins_venvs
tar xfz venv.tgz
rm venv.tgz
if [ -f .cache.bundle ] ; then
rm -rf .venv
virtualenv --no-site-packages .venv
pip install --upgrade -E .venv file://`pwd`/.cache.bundle
rm .cache.bundle
else
perl -MCwd -ple '$CWD=Cwd::abs_path();s,^VIRTUAL_ENV=.*,VIRTUAL_ENV="$CWD/.venv",' -i .venv/bin/activate
# This gets easy-install.pth as well as $PROJECT.pth. Examples for glance:
# easy-install.pth looks like: /home/jenkins/workspace/glance-venv/.venv/src/keystone
# $PROJECT.pth looks like: /home/jenkins/workspace/glance-venv
perl -MCwd -ple '$CWD=Cwd::abs_path();s,^/.*/workspace/[^/]+(/.*)$,$CWD$1,' -i .venv/lib/python2.7/site-packages/*.pth
# This is needed for the keystone install for glance
perl -MCwd -ple '$CWD=Cwd::abs_path();s,^/.*/.venv/src/(.*),$CWD/.venv/src/$1,' -i .venv/lib/python2.7/site-packages/*.egg-link
# This is needed for the keystone install for glance
perl -MCwd -ple '$CWD=Cwd::abs_path();s,/.*/.venv/src/(.*),$CWD/.venv/src/$1,' -i .venv/bin/*
fi

View File

@@ -1,7 +0,0 @@
#!/bin/bash -x
lvremove -f /dev/main/last_root
lvrename /dev/main/root last_root
lvcreate -L20G -s -n root /dev/main/orig_root
APPEND="`cat /proc/cmdline`"
kexec -l /vmlinuz --initrd=/initrd.img --append="$APPEND"
nohup bash -c "sleep 2; kexec -e" </dev/null >/dev/null 2>&1 &

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env python
import sys
from subprocess import *
p = Popen(["ping", sys.argv[1]], stdout=PIPE)
while True:
line = p.stdout.readline().strip()
if 'bytes from' in line:
p.terminate()
sys.exit(0)

View File

@@ -1,10 +0,0 @@
#!/bin/bash
mkdir -p ~/cache/pip
VENV=`mktemp -d`
virtualenv --no-site-packages $VENV
cd $VENV
. bin/activate
PIP_DOWNLOAD_CACHE=~/cache/pip pip install `cat ~/devstack/files/pips/*`
cd
rm -fr $VENV

View File

@@ -1,12 +0,0 @@
#!/bin/bash
URL=$1
echo "Jenkins: Waiting for Nova to start on infrastructure node"
RET=7
while [ $RET != 0 ]; do
curl -s $URL >/dev/null
RET=$?
sleep 1
done
echo "Jenkins: Nova is running."

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# wait_for_pupet.sh LOGFILE HOSTNAME [HOSTNAME...]
# Search LOGFILE for puppet completion on each host
FINISH_RE="puppet-agent\[.*\]: Finished catalog run in .* seconds"
LOGFILE=$1
shift
HOSTS=$@
echo "Jenkins: Waiting for puppet to complete on all nodes"
DONE=0
while [ $DONE != 1 ]; do
DONE=1
for hostname in $HOSTS
do
if !(grep "$hostname $FINISH_RE" $LOGFILE >/dev/null); then DONE=0; fi
done
sleep 5
done
echo "Jenkins: Puppet is complete."

View File

@@ -1,101 +0,0 @@
import os, sys, subprocess
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
from openid.consumer import consumer
from openid.cryptutil import randomString
import pickle
cachedir="~/.launchpadlib/cache"
credentials="~/.launchpadlib/creds"
if not os.path.exists("~/.launchpadlib"):
os.makedirs("~/.launchpadlib")
launchpad = Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
cachedir, credentials_file=credentials)
def get_type(in_type):
if in_type == "RSA":
return "ssh-rsa"
else:
return "ssh-dsa"
teams_todo = [
"burrow",
"burrow-core",
"glance",
"glance-core",
"keystone",
"keystone-core",
"openstack",
"openstack-admins",
"openstack-ci",
"lunr-core",
"nova",
"nova-core",
"swift",
"swift-core",
]
users={}
groups={}
groups_in_groups={}
for team_todo in teams_todo:
team = launchpad.people[team_todo]
details = [detail for detail in team.members_details]
groups[team.name] = team.display_name
for detail in details:
user = None
member = detail.member
if member.is_team:
group_in_group = groups_in_groups.get(team.name, [])
group_in_group.append(member.name)
groups_in_groups[team.name] = group_in_group
else:
status = detail.status
login = member.name
if users.has_key(login):
user = users[login]
else:
full_name = member.display_name
ssh_keys = ["%s %s %s" % (get_type(key.keytype), key.keytext, key.comment) for key in member.sshkeys]
openid_consumer = consumer.Consumer(dict(id=randomString(16, '0123456789abcdef')), None)
openid_request = openid_consumer.begin("https://launchpad.net/~%s" % member.name)
openid_external_id = openid_request.endpoint.getLocalID()
email = None
try:
email = member.preferred_email_address.email
except ValueError:
pass
user = dict(name=full_name,
ssh_keys=ssh_keys,
openid_external_id=openid_external_id,
email=email,
add_groups=[],
rm_groups=[])
if (status == "Approved" or status == "Administrator") and member.is_valid:
user['add_groups'].append(team.name)
else:
user['rm_groups'].append(team.name)
users[login] = user
with open("users.pickle", "w") as user_file:
pickle.dump([users, groups, groups_in_groups], user_file)

View File

@@ -1,66 +0,0 @@
#!/bin/sh
retval=0
STATEPATH=${HOME}/versions
BNT=in_bzr_but_not_in_tarball.txt
TNB=in_tarball_but_not_in_bzr.txt
BNTSAVED=$STATEPATH/$BNT.saved
TNBSAVED=$STATEPATH/$TNB.saved
if [ ! -d "$STATEPATH" ]
then
bzr co bzr://jenkins.openstack.org/ "$STATEPATH"
else
(cd $STATEPATH ; bzr up)
fi
bzr ls -R . --versioned | sort > bzr.lst
tar tzf nova-*.tar.gz | cut -f2- -d/ | grep -v ^$ | sort -g > tarball.lst
rm -rf dist dist.zip
diff -u bzr.lst tarball.lst | grep -v ^--- | grep -v ^+++ > diff
grep ^- diff | sed -e s/^.// > $BNT
grep ^+ diff | sed -e s/^.// > $TNB
if [ "$1" = "ack" ]
then
cp $BNT $BNTSAVED
cp $TNB $TNBSAVED
( cd $STATEPATH ; bzr commit "Ran ack" )
exit 0
fi
> report.txt
if ! diff -Nq $BNTSAVED $BNT > /dev/null
then
retval=1
echo "The list of files in bzr, but not in the tarball changed." >> report.txt
echo "Lines beginning with - denote files that were either removed from bzr or recently included in the tarball." >> report.txt
echo "Lines beginning with + denote files that were either got added to bzr recently or got removed from the tarball." >> report.txt
diff -uN $BNTSAVED $BNT >> report.txt
fi
if ! diff -qN $TNBSAVED $TNB > /dev/null
then
retval=1
echo "The list of files in the tarball, but not in bzr changed." >> report.txt
echo "Lines beginning with - denote files that were removed from the tarball, but is still in bzr." >> report.txt
echo "Lines beginning with + denote files that were either got added to the tarball recently or which disappeared from bzr, but stayed in the tarball." >> report.txt
diff -uN $TNBSAVED $TNB >> report.txt
fi
mkdir -p html/
echo '<html><title>Tarball vs bzr delta changes</title><body><pre>' > html/report.html
cat report.txt >> html/report.html
echo '</pre>' >> html/report.html
if [ $retval = 1 ]
then
echo "<p>If these differences are ok, <a href="http://hudson.openstack.org/job/nova-tarball-bzr-delta/build">run the job again</a> and check the 'ack' box.</p>" >> report.txt
fi
echo '</body></html>' >> html/report.html
( cd $STATEPATH ; bzr commit "Finished bzr diff" )
exit $retval

View File

@@ -1,147 +0,0 @@
#!/bin/sh
set -e
find_next_version() {
datestamp="${datestamp:-$(date +%Y%m%d)}"
index=1
MILESTONEDIR="${MILESTONEDIR:-$HOME/versions/milestone}"
BRANCH=${BRANCH:-trunk}
milestonefile="${MILESTONEDIR}/${PROJECT}-${BRANCH}"
if [ ! -e "${milestonefile}" ]
then
if [ "$NOMILESTONE" = "true" ]
then
milestonever=""
else
echo "Milestone file ${milestonefile} not found. Bailing out." >&2
exit 1
fi
else
milestonever="$(cat ${milestonefile})"
fi
version="$milestonever"
if [ -n "$version" ]
then
version="${version}~"
fi
if [ -d .git ]
then
revno="${revno:-$(git log --oneline | wc -l)}"
else
revno="${revno:-$(bzr revno)}"
fi
version="$(printf %s%s.%s%d "$version" "$datestamp" "$REVNOPREFIX" "$revno")"
if grep -q "^$PROJECT $version$" "$RECORDFILE"
then
echo "$version of $PROJECT already exists. Bailing out." >&2
exit 1
fi
printf "%s" "$version"
}
if [ "$1" = "test" ]
then
PROJECT="testproj"
datestamp="12345678"
RECORDFILE=$(mktemp)
MILESTONEDIR=$(mktemp -d)
BRANCH=foo
revno="99923"
REVNOPREFIX="r"
# Verify that we skip already built versions
echo "d2" > "${MILESTONEDIR}/$PROJECT-${BRANCH}"
echo "$PROJECT d2~$datestamp.001" > $RECORDFILE
expected_version="d2~12345678.r99923"
actual_version="$(find_next_version)"
test "${actual_version}" = "${expected_version}" || (echo Got ${actual_version}, expected ${expected_version} ; exit 1)
echo "For a milestoned project, we'd get: ${expected_version}"
PROJECT="testproj2"
NOMILESTONE=true
expected_version="12345678.r99923"
actual_version="$(find_next_version)"
test "${actual_version}" = "${expected_version}" || (echo Got ${actual_version}, expected ${expected_version} ; exit 1)
echo "For a non-milestoned project, we'd get: ${expected_version}"
echo All tests passed
exit 0
fi
if [ -z "$PROJECT" ]
then
echo '$PROJECT not set.'
exit 1
fi
# If there is a bundle file, build a virtualenv from the
# bundle we use for tox
if [ -e ".cache.bundle" ]
then
mv .cache.bundle .cache.pybundle
virtualenv --no-site-packages .venv
.venv/bin/pip install .cache.pybundle
rm .cache.pybundle
fi
VERSIONDIR="$HOME/versions"
RECORDFILE="$VERSIONDIR/tarballversions"
if [ ! -d "$VERSIONDIR" ]
then
bzr co bzr://jenkins.openstack.org/ "$VERSIONDIR"
else
( cd $VERSIONDIR ; bzr up ; bzr revert)
fi
snapshotversion=$(find_next_version)
# Should be ~ if tarball version is the one we're working *toward*. (By far preferred!)
# Should be + if tarball version is already released and we're moving forward after it.
SEPARATOR=${SEPARATOR:-'~'}
rm -f dist/*.tar.gz
if [ -f setup.py ] ; then
# swift has no virtualenv information in its tree.
if [ -d .venv -o -f tools/with_venv.sh ] ; then
tools/with_venv.sh python setup.py sdist
else
python setup.py sdist
fi
# There should only be one, so this should be safe.
tarball=$(echo dist/*.tar.gz)
echo mv "$tarball" "dist/$(basename $tarball .tar.gz)${SEPARATOR}${snapshotversion}.tar.gz"
mv "$tarball" "dist/$(basename $tarball .tar.gz)${SEPARATOR}${snapshotversion}.tar.gz"
else
# This handles the horizon case until we get it refactored
upcoming_version=`cat ${VERSIONDIR}/upcoming_version`
projectversion=${PROJECT}-${upcoming_version}${SEPARATOR}${snapshotversion}
projectversion=${PROJECT}-${upcoming_version}
mkdir ${projectversion}
for f in * .??* ; do
if [ "${f}" != "${projectversion}" ] ; then
mv "$f" ${projectversion}
fi
done
if [ -d ${projectversion}/.git ] ; then
mv ${projectversion}/.git .
fi
mkdir dist
tar cvfz dist/${projectversion}${SEPARATOR}${snapshotversion}.tar.gz ${projectversion}
fi
(cd $VERSIONDIR; bzr up)
echo "$PROJECT ${snapshotversion}" >> "$RECORDFILE"
sort "$RECORDFILE" > "$RECORDFILE".tmp
mv "$RECORDFILE".tmp "$RECORDFILE"
(cd $VERSIONDIR; bzr commit -m"Added $PROJECT ${snapshotversion}")

View File

@@ -1,3 +0,0 @@
#!/bin/sh
bash run_tests.sh -N && python setup.py sdist && pep8 --repeat nova

View File

@@ -1,145 +0,0 @@
#!/bin/sh -e
# Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
# This initial version of this file was taken from the source tree
# of GlusterFS. It was not directly attributed, but is assumed to be
# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3
# Subsequent modifications are Copyright (c) 2011 OpenStack, LLC.
#
# GlusterFS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# GlusterFS is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
branch="master";
set_hooks_commit_msg()
{
top_dir=`git rev-parse --show-toplevel`
f="${top_dir}/.git/hooks/commit-msg";
u="https://review.openstack.org/tools/hooks/commit-msg";
if [ -x "$f" ]; then
return;
fi
curl -o $f $u || wget -O $f $u;
chmod +x $f;
GIT_EDITOR=true git commit --amend
}
add_remote()
{
username=$1
project=$2
echo "No remote set, testing ssh://$username@review.openstack.org:29418"
if project_list=`ssh -p29418 -o StrictHostKeyChecking=no $username@review.openstack.org gerrit ls-projects 2>/dev/null`
then
echo "$username@review.openstack.org:29418 worked."
if echo $project_list | grep $project >/dev/null
then
echo "Creating a git remote called gerrit that maps to:"
echo " ssh://$username@review.openstack.org:29418/$project"
git remote add gerrit ssh://$username@review.openstack.org:29418/$project
else
echo "The current project name, $project, is not a known project."
echo "Please either reclone from github/gerrit or create a"
echo "remote named gerrit that points to the intended project."
return 1
fi
return 0
fi
return 1
}
check_remote()
{
if ! git remote | grep gerrit >/dev/null 2>&1
then
origin_project=`git remote show origin | grep 'Fetch URL' | perl -nle '@fields = split(m|[:/]|); $len = $#fields; print $fields[$len-1], "/", $fields[$len];'`
if add_remote $USERNAME $origin_project
then
return 0
else
echo "Your local name doesn't work on Gerrit."
echo -n "Enter Gerrit username (same as launchpad): "
read gerrit_user
if add_remote $gerrit_user $origin_project
then
return 0
else
echo "Can't infer where gerrit is - please set a remote named"
echo "gerrit manually and then try again."
echo
echo "For more information, please see:"
echo "\thttp://wiki.openstack.org/GerritWorkflow"
exit 1
fi
fi
fi
}
rebase_changes()
{
git fetch;
GIT_EDITOR=true git rebase -i origin/$branch || exit $?;
}
assert_diverge()
{
if ! git diff origin/$branch..HEAD | grep -q .
then
echo "No changes between the current branch and origin/$branch."
exit 1
fi
}
main()
{
set_hooks_commit_msg;
check_remote;
rebase_changes;
assert_diverge;
bug=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]ug|[Ll][Pp])\s*[#:]?\s*(\d+)/) {print "$2"; exit}')
bp=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)/) {print "$2"; exit}')
if [ "$DRY_RUN" = 1 ]; then
drier='echo -e Please use the following command to send your commits to review:\n\n'
else
drier=
fi
local_branch=`git branch | grep -Ei "\* (.*)" | cut -f2 -d' '`
if [ -z "$bug" ]; then
if [ -z "$bp" ]; then
$drier git push gerrit HEAD:refs/for/$branch/$local_branch;
else
$drier git push gerrit HEAD:refs/for/$branch/bp/$bp;
fi
else
$drier git push gerrit HEAD:refs/for/$branch/bug/$bug;
fi
}
main "$@"