Add missing tools directory
Signed-off-by: Chuck Short <chuck.short@canonical.com>
This commit is contained in:
parent
b76dae0610
commit
148f0dc977
81
tools/abandon_old_reviews.sh
Executable file
81
tools/abandon_old_reviews.sh
Executable file
@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
#
|
||||
#
|
||||
# before you run this modify your .ssh/config to create a
|
||||
# review.openstack.org entry:
|
||||
#
|
||||
# Host review.openstack.org
|
||||
# User <yourgerritusername>
|
||||
# Port 29418
|
||||
#
|
||||
|
||||
# Note: due to gerrit bug somewhere, this double posts messages. :(
|
||||
|
||||
# first purge the all reviews that are more than 4w old and blocked by a core -2
|
||||
|
||||
set -o errexit
|
||||
|
||||
function abandon_review {
|
||||
local gitid=$1
|
||||
shift
|
||||
local msg=$@
|
||||
echo "Abandoning $gitid"
|
||||
ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\"
|
||||
}
|
||||
|
||||
|
||||
blocked_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json project:openstack/nova status:open age:4w label:Code-Review<=-2" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g')
|
||||
|
||||
blocked_msg=$(cat <<EOF
|
||||
|
||||
This review is > 4 weeks without comment and currently blocked by a
|
||||
core reviewer with a -2. We are abandoning this for now.
|
||||
|
||||
Feel free to reactivate the review by pressing the restore button and
|
||||
contacting the reviewer with the -2 on this review to ensure you
|
||||
address their concerns.
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
# For testing, put in a git rev of something you own and uncomment
|
||||
# blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f"
|
||||
|
||||
for review in $blocked_reviews; do
|
||||
# echo ssh review.openstack.org gerrit review $review --abandon --message \"$msg\"
|
||||
echo "Blocked review $review"
|
||||
abandon_review $review $blocked_msg
|
||||
done
|
||||
|
||||
# then purge all the reviews that are > 4w with no changes and Jenkins has -1ed
|
||||
|
||||
failing_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json project:openstack/nova status:open age:4w NOT label:Verified>=1,jenkins" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g')
|
||||
|
||||
failing_msg=$(cat <<EOF
|
||||
|
||||
This review is > 4 weeks without comment, and failed Jenkins the last
|
||||
time it was checked. We are abandoning this for now.
|
||||
|
||||
Feel free to reactivate the review by pressing the restore button and
|
||||
leaving a 'recheck' comment to get fresh test results.
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
for review in $failing_reviews; do
|
||||
echo "Failing review $review"
|
||||
abandon_review $review $failing_msg
|
||||
done
|
24
tools/clean-vlans
Executable file
24
tools/clean-vlans
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
export LC_ALL=C
|
||||
|
||||
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
|
||||
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo
|
||||
sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
|
||||
sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ip link del foo
|
326
tools/colorizer.py
Executable file
326
tools/colorizer.py
Executable file
@ -0,0 +1,326 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013, Nebula, Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Colorizer Code is borrowed from Twisted:
|
||||
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
"""Display a subunit stream through a colorized unittest test runner."""
|
||||
|
||||
import heapq
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import subunit
|
||||
import testtools
|
||||
|
||||
|
||||
class _AnsiColorizer(object):
|
||||
"""A colorizer is an object that loosely wraps around a stream, allowing
|
||||
callers to write text to the stream in a particular color.
|
||||
|
||||
Colorizer classes must implement C{supported()} and C{write(text, color)}.
|
||||
"""
|
||||
_colors = dict(black=30, red=31, green=32, yellow=33,
|
||||
blue=34, magenta=35, cyan=36, white=37)
|
||||
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def supported(cls, stream=sys.stdout):
|
||||
"""A class method that returns True if the current platform supports
|
||||
coloring terminal output using this method. Returns False otherwise.
|
||||
"""
|
||||
if not stream.isatty():
|
||||
return False # auto color only on TTYs
|
||||
try:
|
||||
import curses
|
||||
except ImportError:
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
try:
|
||||
return curses.tigetnum("colors") > 2
|
||||
except curses.error:
|
||||
curses.setupterm()
|
||||
return curses.tigetnum("colors") > 2
|
||||
except Exception:
|
||||
# guess false in case of error
|
||||
return False
|
||||
supported = classmethod(supported)
|
||||
|
||||
def write(self, text, color):
|
||||
"""Write the given text to the stream in the given color.
|
||||
|
||||
@param text: Text to be written to the stream.
|
||||
|
||||
@param color: A string label for a color. e.g. 'red', 'white'.
|
||||
"""
|
||||
color = self._colors[color]
|
||||
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
|
||||
|
||||
|
||||
class _Win32Colorizer(object):
|
||||
"""See _AnsiColorizer docstring."""
|
||||
def __init__(self, stream):
|
||||
import win32console
|
||||
red, green, blue, bold = (win32console.FOREGROUND_RED,
|
||||
win32console.FOREGROUND_GREEN,
|
||||
win32console.FOREGROUND_BLUE,
|
||||
win32console.FOREGROUND_INTENSITY)
|
||||
self.stream = stream
|
||||
self.screenBuffer = win32console.GetStdHandle(
|
||||
win32console.STD_OUT_HANDLE)
|
||||
self._colors = {
|
||||
'normal': red | green | blue,
|
||||
'red': red | bold,
|
||||
'green': green | bold,
|
||||
'blue': blue | bold,
|
||||
'yellow': red | green | bold,
|
||||
'magenta': red | blue | bold,
|
||||
'cyan': green | blue | bold,
|
||||
'white': red | green | blue | bold
|
||||
}
|
||||
|
||||
def supported(cls, stream=sys.stdout):
|
||||
try:
|
||||
import win32console
|
||||
screenBuffer = win32console.GetStdHandle(
|
||||
win32console.STD_OUT_HANDLE)
|
||||
except ImportError:
|
||||
return False
|
||||
import pywintypes
|
||||
try:
|
||||
screenBuffer.SetConsoleTextAttribute(
|
||||
win32console.FOREGROUND_RED |
|
||||
win32console.FOREGROUND_GREEN |
|
||||
win32console.FOREGROUND_BLUE)
|
||||
except pywintypes.error:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
supported = classmethod(supported)
|
||||
|
||||
def write(self, text, color):
|
||||
color = self._colors[color]
|
||||
self.screenBuffer.SetConsoleTextAttribute(color)
|
||||
self.stream.write(text)
|
||||
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
|
||||
|
||||
|
||||
class _NullColorizer(object):
|
||||
"""See _AnsiColorizer docstring."""
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def supported(cls, stream=sys.stdout):
|
||||
return True
|
||||
supported = classmethod(supported)
|
||||
|
||||
def write(self, text, color):
|
||||
self.stream.write(text)
|
||||
|
||||
|
||||
def get_elapsed_time_color(elapsed_time):
|
||||
if elapsed_time > 1.0:
|
||||
return 'red'
|
||||
elif elapsed_time > 0.25:
|
||||
return 'yellow'
|
||||
else:
|
||||
return 'green'
|
||||
|
||||
|
||||
class NovaTestResult(testtools.TestResult):
|
||||
def __init__(self, stream, descriptions, verbosity):
|
||||
super(NovaTestResult, self).__init__()
|
||||
self.stream = stream
|
||||
self.showAll = verbosity > 1
|
||||
self.num_slow_tests = 10
|
||||
self.slow_tests = [] # this is a fixed-sized heap
|
||||
self.colorizer = None
|
||||
# NOTE(vish): reset stdout for the terminal check
|
||||
stdout = sys.stdout
|
||||
sys.stdout = sys.__stdout__
|
||||
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
|
||||
if colorizer.supported():
|
||||
self.colorizer = colorizer(self.stream)
|
||||
break
|
||||
sys.stdout = stdout
|
||||
self.start_time = None
|
||||
self.last_time = {}
|
||||
self.results = {}
|
||||
self.last_written = None
|
||||
|
||||
def _writeElapsedTime(self, elapsed):
|
||||
color = get_elapsed_time_color(elapsed)
|
||||
self.colorizer.write(" %.2f" % elapsed, color)
|
||||
|
||||
def _addResult(self, test, *args):
|
||||
try:
|
||||
name = test.id()
|
||||
except AttributeError:
|
||||
name = 'Unknown.unknown'
|
||||
test_class, test_name = name.rsplit('.', 1)
|
||||
|
||||
elapsed = (self._now() - self.start_time).total_seconds()
|
||||
item = (elapsed, test_class, test_name)
|
||||
if len(self.slow_tests) >= self.num_slow_tests:
|
||||
heapq.heappushpop(self.slow_tests, item)
|
||||
else:
|
||||
heapq.heappush(self.slow_tests, item)
|
||||
|
||||
self.results.setdefault(test_class, [])
|
||||
self.results[test_class].append((test_name, elapsed) + args)
|
||||
self.last_time[test_class] = self._now()
|
||||
self.writeTests()
|
||||
|
||||
def _writeResult(self, test_name, elapsed, long_result, color,
|
||||
short_result, success):
|
||||
if self.showAll:
|
||||
self.stream.write(' %s' % str(test_name).ljust(66))
|
||||
self.colorizer.write(long_result, color)
|
||||
if success:
|
||||
self._writeElapsedTime(elapsed)
|
||||
self.stream.writeln()
|
||||
else:
|
||||
self.colorizer.write(short_result, color)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(NovaTestResult, self).addSuccess(test)
|
||||
self._addResult(test, 'OK', 'green', '.', True)
|
||||
|
||||
def addFailure(self, test, err):
|
||||
if test.id() == 'process-returncode':
|
||||
return
|
||||
super(NovaTestResult, self).addFailure(test, err)
|
||||
self._addResult(test, 'FAIL', 'red', 'F', False)
|
||||
|
||||
def addError(self, test, err):
|
||||
super(NovaTestResult, self).addFailure(test, err)
|
||||
self._addResult(test, 'ERROR', 'red', 'E', False)
|
||||
|
||||
def addSkip(self, test, reason=None, details=None):
|
||||
super(NovaTestResult, self).addSkip(test, reason, details)
|
||||
self._addResult(test, 'SKIP', 'blue', 'S', True)
|
||||
|
||||
def startTest(self, test):
|
||||
self.start_time = self._now()
|
||||
super(NovaTestResult, self).startTest(test)
|
||||
|
||||
def writeTestCase(self, cls):
|
||||
if not self.results.get(cls):
|
||||
return
|
||||
if cls != self.last_written:
|
||||
self.colorizer.write(cls, 'white')
|
||||
self.stream.writeln()
|
||||
for result in self.results[cls]:
|
||||
self._writeResult(*result)
|
||||
del self.results[cls]
|
||||
self.stream.flush()
|
||||
self.last_written = cls
|
||||
|
||||
def writeTests(self):
|
||||
time = self.last_time.get(self.last_written, self._now())
|
||||
if not self.last_written or (self._now() - time).total_seconds() > 2.0:
|
||||
diff = 3.0
|
||||
while diff > 2.0:
|
||||
classes = self.results.keys()
|
||||
oldest = min(classes, key=lambda x: self.last_time[x])
|
||||
diff = (self._now() - self.last_time[oldest]).total_seconds()
|
||||
self.writeTestCase(oldest)
|
||||
else:
|
||||
self.writeTestCase(self.last_written)
|
||||
|
||||
def done(self):
|
||||
self.stopTestRun()
|
||||
|
||||
def stopTestRun(self):
|
||||
for cls in list(self.results.iterkeys()):
|
||||
self.writeTestCase(cls)
|
||||
self.stream.writeln()
|
||||
self.writeSlowTests()
|
||||
|
||||
def writeSlowTests(self):
|
||||
# Pare out 'fast' tests
|
||||
slow_tests = [item for item in self.slow_tests
|
||||
if get_elapsed_time_color(item[0]) != 'green']
|
||||
if slow_tests:
|
||||
slow_total_time = sum(item[0] for item in slow_tests)
|
||||
slow = ("Slowest %i tests took %.2f secs:"
|
||||
% (len(slow_tests), slow_total_time))
|
||||
self.colorizer.write(slow, 'yellow')
|
||||
self.stream.writeln()
|
||||
last_cls = None
|
||||
# sort by name
|
||||
for elapsed, cls, name in sorted(slow_tests,
|
||||
key=lambda x: x[1] + x[2]):
|
||||
if cls != last_cls:
|
||||
self.colorizer.write(cls, 'white')
|
||||
self.stream.writeln()
|
||||
last_cls = cls
|
||||
self.stream.write(' %s' % str(name).ljust(68))
|
||||
self._writeElapsedTime(elapsed)
|
||||
self.stream.writeln()
|
||||
|
||||
def printErrors(self):
|
||||
if self.showAll:
|
||||
self.stream.writeln()
|
||||
self.printErrorList('ERROR', self.errors)
|
||||
self.printErrorList('FAIL', self.failures)
|
||||
|
||||
def printErrorList(self, flavor, errors):
|
||||
for test, err in errors:
|
||||
self.colorizer.write("=" * 70, 'red')
|
||||
self.stream.writeln()
|
||||
self.colorizer.write(flavor, 'red')
|
||||
self.stream.writeln(": %s" % test.id())
|
||||
self.colorizer.write("-" * 70, 'red')
|
||||
self.stream.writeln()
|
||||
self.stream.writeln("%s" % err)
|
||||
|
||||
|
||||
test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
|
||||
|
||||
if sys.version_info[0:2] <= (2, 6):
|
||||
runner = unittest.TextTestRunner(verbosity=2)
|
||||
else:
|
||||
runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
|
||||
|
||||
if runner.run(test).wasSuccessful():
|
||||
exit_code = 0
|
||||
else:
|
||||
exit_code = 1
|
||||
sys.exit(exit_code)
|
20
tools/config/README
Normal file
20
tools/config/README
Normal file
@ -0,0 +1,20 @@
|
||||
This generate_sample.sh tool is used to generate etc/nova/nova.conf.sample
|
||||
|
||||
Run it from the top-level working directory i.e.
|
||||
|
||||
$> ./tools/config/generate_sample.sh -b ./ -p nova -o etc/nova
|
||||
|
||||
Watch out for warnings about modules like libvirt, qpid and zmq not
|
||||
being found - these warnings are significant because they result
|
||||
in options not appearing in the generated config file.
|
||||
|
||||
|
||||
The analyze_opts.py tool is used to find options which appear in
|
||||
/etc/nova/nova.conf but not in etc/nova/nova.conf.sample
|
||||
This helps identify options in the nova.conf file which are not used by nova.
|
||||
The tool also identifies any options which are set to the default value.
|
||||
|
||||
Run it from the top-level working directory i.e.
|
||||
|
||||
$> ./tools/config/analyze_opts.py
|
||||
|
81
tools/config/analyze_opts.py
Executable file
81
tools/config/analyze_opts.py
Executable file
@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2012, Cloudscaling
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
'''
|
||||
find_unused_options.py
|
||||
|
||||
Compare the nova.conf file with the nova.conf.sample file to find any unused
|
||||
options or default values in nova.conf
|
||||
'''
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.getcwd())
|
||||
from oslo.config import iniparser
|
||||
|
||||
|
||||
class PropertyCollecter(iniparser.BaseParser):
|
||||
def __init__(self):
|
||||
super(PropertyCollecter, self).__init__()
|
||||
self.key_value_pairs = {}
|
||||
|
||||
def assignment(self, key, value):
|
||||
self.key_value_pairs[key] = value
|
||||
|
||||
def new_section(self, section):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def collect_properties(cls, lineiter, sample_format=False):
|
||||
def clean_sample(f):
|
||||
for line in f:
|
||||
if line.startswith("#") and not line.startswith("# "):
|
||||
line = line[1:]
|
||||
yield line
|
||||
pc = cls()
|
||||
if sample_format:
|
||||
lineiter = clean_sample(lineiter)
|
||||
pc.parse(lineiter)
|
||||
return pc.key_value_pairs
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='''Compare the nova.conf
|
||||
file with the nova.conf.sample file to find any unused options or
|
||||
default values in nova.conf''')
|
||||
|
||||
parser.add_argument('-c', action='store',
|
||||
default='/etc/nova/nova.conf',
|
||||
help='path to nova.conf\
|
||||
(defaults to /etc/nova/nova.conf)')
|
||||
parser.add_argument('-s', default='./etc/nova/nova.conf.sample',
|
||||
help='path to nova.conf.sample\
|
||||
(defaults to ./etc/nova/nova.conf.sample')
|
||||
options = parser.parse_args()
|
||||
|
||||
conf_file_options = PropertyCollecter.collect_properties(open(options.c))
|
||||
sample_conf_file_options = PropertyCollecter.collect_properties(
|
||||
open(options.s), sample_format=True)
|
||||
|
||||
for k, v in sorted(conf_file_options.items()):
|
||||
if k not in sample_conf_file_options:
|
||||
print("Unused:", k)
|
||||
for k, v in sorted(conf_file_options.items()):
|
||||
if k in sample_conf_file_options and v == sample_conf_file_options[k]:
|
||||
print("Default valued:", k)
|
25
tools/config/check_uptodate.sh
Executable file
25
tools/config/check_uptodate.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PROJECT_NAME=${PROJECT_NAME:-nova}
|
||||
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
|
||||
|
||||
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
|
||||
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
|
||||
elif [ -e etc/${CFGFILE_NAME} ]; then
|
||||
CFGFILE=etc/${CFGFILE_NAME}
|
||||
else
|
||||
echo "${0##*/}: can not find config file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
|
||||
trap "rm -rf $TEMPDIR" EXIT
|
||||
|
||||
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
|
||||
|
||||
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
|
||||
then
|
||||
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
|
||||
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
|
||||
exit 1
|
||||
fi
|
119
tools/config/generate_sample.sh
Executable file
119
tools/config/generate_sample.sh
Executable file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
print_hint() {
|
||||
echo "Try \`${0##*/} --help' for more information." >&2
|
||||
}
|
||||
|
||||
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
|
||||
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
|
||||
|
||||
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
|
||||
|
||||
eval set -- "$PARSED_OPTIONS"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
echo "${0##*/} [options]"
|
||||
echo ""
|
||||
echo "options:"
|
||||
echo "-h, --help show brief help"
|
||||
echo "-b, --base-dir=DIR project base directory"
|
||||
echo "-p, --package-name=NAME project package name"
|
||||
echo "-o, --output-dir=DIR file output directory"
|
||||
echo "-m, --module=MOD extra python module to interrogate for options"
|
||||
echo "-l, --library=LIB extra library that registers options for discovery"
|
||||
exit 0
|
||||
;;
|
||||
-b|--base-dir)
|
||||
shift
|
||||
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
|
||||
shift
|
||||
;;
|
||||
-p|--package-name)
|
||||
shift
|
||||
PACKAGENAME=`echo $1`
|
||||
shift
|
||||
;;
|
||||
-o|--output-dir)
|
||||
shift
|
||||
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
|
||||
shift
|
||||
;;
|
||||
-m|--module)
|
||||
shift
|
||||
MODULES="$MODULES -m $1"
|
||||
shift
|
||||
;;
|
||||
-l|--library)
|
||||
shift
|
||||
LIBRARIES="$LIBRARIES -l $1"
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
BASEDIR=${BASEDIR:-`pwd`}
|
||||
if ! [ -d $BASEDIR ]
|
||||
then
|
||||
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
|
||||
elif [[ $BASEDIR != /* ]]
|
||||
then
|
||||
BASEDIR=$(cd "$BASEDIR" && pwd)
|
||||
fi
|
||||
|
||||
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
|
||||
TARGETDIR=$BASEDIR/$PACKAGENAME
|
||||
if ! [ -d $TARGETDIR ]
|
||||
then
|
||||
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
|
||||
fi
|
||||
|
||||
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
|
||||
# NOTE(bnemec): Some projects put their sample config in etc/,
|
||||
# some in etc/$PACKAGENAME/
|
||||
if [ -d $OUTPUTDIR/$PACKAGENAME ]
|
||||
then
|
||||
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
|
||||
elif ! [ -d $OUTPUTDIR ]
|
||||
then
|
||||
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
|
||||
find $TARGETDIR -type f -name "*.pyc" -delete
|
||||
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
|
||||
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
||||
|
||||
RC_FILE="`dirname $0`/oslo.config.generator.rc"
|
||||
if test -r "$RC_FILE"
|
||||
then
|
||||
source "$RC_FILE"
|
||||
fi
|
||||
|
||||
for mod in ${NOVA_CONFIG_GENERATOR_EXTRA_MODULES}; do
|
||||
MODULES="$MODULES -m $mod"
|
||||
done
|
||||
|
||||
for lib in ${NOVA_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
|
||||
LIBRARIES="$LIBRARIES -l $lib"
|
||||
done
|
||||
|
||||
export EVENTLET_NO_GREENDNS=yes
|
||||
|
||||
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
||||
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
|
||||
DEFAULT_MODULEPATH=nova.openstack.common.config.generator
|
||||
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
|
||||
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
||||
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
|
||||
|
||||
# Hook to allow projects to append custom config file snippets
|
||||
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
|
||||
for CONCAT_FILE in $CONCAT_FILES; do
|
||||
cat $CONCAT_FILE >> $OUTPUTFILE
|
||||
done
|
2
tools/config/oslo.config.generator.rc
Normal file
2
tools/config/oslo.config.generator.rc
Normal file
@ -0,0 +1,2 @@
|
||||
NOVA_CONFIG_GENERATOR_EXTRA_LIBRARIES="oslo.messaging oslo.db oslo.concurrency"
|
||||
NOVA_CONFIG_GENERATOR_EXTRA_MODULES=keystonemiddleware.auth_token
|
284
tools/db/schema_diff.py
Executable file
284
tools/db/schema_diff.py
Executable file
@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utility for diff'ing two versions of the DB schema.
|
||||
|
||||
Each release cycle the plan is to compact all of the migrations from that
|
||||
release into a single file. This is a manual and, unfortunately, error-prone
|
||||
process. To ensure that the schema doesn't change, this tool can be used to
|
||||
diff the compacted DB schema to the original, uncompacted form.
|
||||
|
||||
The database is specified by providing a SQLAlchemy connection URL WITHOUT the
|
||||
database-name portion (that will be filled in automatically with a temporary
|
||||
database name).
|
||||
|
||||
The schema versions are specified by providing a git ref (a branch name or
|
||||
commit hash) and a SQLAlchemy-Migrate version number:
|
||||
|
||||
Run like:
|
||||
|
||||
MYSQL:
|
||||
|
||||
./tools/db/schema_diff.py mysql://root@localhost \
|
||||
master:latest my_branch:82
|
||||
|
||||
POSTGRESQL:
|
||||
|
||||
./tools/db/schema_diff.py postgresql://localhost \
|
||||
master:latest my_branch:82
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from nova.i18n import _
|
||||
|
||||
|
||||
# Dump
|
||||
|
||||
|
||||
def dump_db(db_driver, db_name, db_url, migration_version, dump_filename):
|
||||
if not db_url.endswith('/'):
|
||||
db_url += '/'
|
||||
|
||||
db_url += db_name
|
||||
|
||||
db_driver.create(db_name)
|
||||
try:
|
||||
_migrate(db_url, migration_version)
|
||||
db_driver.dump(db_name, dump_filename)
|
||||
finally:
|
||||
db_driver.drop(db_name)
|
||||
|
||||
|
||||
# Diff
|
||||
|
||||
|
||||
def diff_files(filename1, filename2):
|
||||
pipeline = ['diff -U 3 %(filename1)s %(filename2)s'
|
||||
% {'filename1': filename1, 'filename2': filename2}]
|
||||
|
||||
# Use colordiff if available
|
||||
if subprocess.call(['which', 'colordiff']) == 0:
|
||||
pipeline.append('colordiff')
|
||||
|
||||
pipeline.append('less -R')
|
||||
|
||||
cmd = ' | '.join(pipeline)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
# Database
|
||||
|
||||
|
||||
class Mysql(object):
|
||||
def create(self, name):
|
||||
subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name])
|
||||
|
||||
def drop(self, name):
|
||||
subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name])
|
||||
|
||||
def dump(self, name, dump_filename):
|
||||
subprocess.check_call(
|
||||
'mysqldump -u root %(name)s > %(dump_filename)s'
|
||||
% {'name': name, 'dump_filename': dump_filename},
|
||||
shell=True)
|
||||
|
||||
|
||||
class Postgresql(object):
|
||||
def create(self, name):
|
||||
subprocess.check_call(['createdb', name])
|
||||
|
||||
def drop(self, name):
|
||||
subprocess.check_call(['dropdb', name])
|
||||
|
||||
def dump(self, name, dump_filename):
|
||||
subprocess.check_call(
|
||||
'pg_dump %(name)s > %(dump_filename)s'
|
||||
% {'name': name, 'dump_filename': dump_filename},
|
||||
shell=True)
|
||||
|
||||
|
||||
def _get_db_driver_class(db_url):
|
||||
try:
|
||||
return globals()[db_url.split('://')[0].capitalize()]
|
||||
except KeyError:
|
||||
raise Exception(_("database %s not supported") % db_url)
|
||||
|
||||
|
||||
# Migrate
|
||||
|
||||
|
||||
MIGRATE_REPO = os.path.join(os.getcwd(), "nova/db/sqlalchemy/migrate_repo")
|
||||
|
||||
|
||||
def _migrate(db_url, migration_version):
|
||||
earliest_version = _migrate_get_earliest_version()
|
||||
|
||||
# NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of
|
||||
# migration numbers.
|
||||
_migrate_cmd(
|
||||
db_url, 'version_control', str(earliest_version - 1))
|
||||
|
||||
upgrade_cmd = ['upgrade']
|
||||
if migration_version != 'latest':
|
||||
upgrade_cmd.append(str(migration_version))
|
||||
|
||||
_migrate_cmd(db_url, *upgrade_cmd)
|
||||
|
||||
|
||||
def _migrate_cmd(db_url, *cmd):
|
||||
manage_py = os.path.join(MIGRATE_REPO, 'manage.py')
|
||||
|
||||
args = ['python', manage_py]
|
||||
args += cmd
|
||||
args += ['--repository=%s' % MIGRATE_REPO,
|
||||
'--url=%s' % db_url]
|
||||
|
||||
subprocess.check_call(args)
|
||||
|
||||
|
||||
def _migrate_get_earliest_version():
|
||||
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
|
||||
|
||||
versions = []
|
||||
for path in glob.iglob(versions_glob):
|
||||
filename = os.path.basename(path)
|
||||
prefix = filename.split('_', 1)[0]
|
||||
try:
|
||||
version = int(prefix)
|
||||
except ValueError:
|
||||
pass
|
||||
versions.append(version)
|
||||
|
||||
versions.sort()
|
||||
return versions[0]
|
||||
|
||||
|
||||
# Git
|
||||
|
||||
|
||||
def git_current_branch_name():
|
||||
ref_name = git_symbolic_ref('HEAD', quiet=True)
|
||||
current_branch_name = ref_name.replace('refs/heads/', '')
|
||||
return current_branch_name
|
||||
|
||||
|
||||
def git_symbolic_ref(ref, quiet=False):
|
||||
args = ['git', 'symbolic-ref', ref]
|
||||
if quiet:
|
||||
args.append('-q')
|
||||
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
|
||||
stdout, stderr = proc.communicate()
|
||||
return stdout.strip()
|
||||
|
||||
|
||||
def git_checkout(branch_name):
|
||||
subprocess.check_call(['git', 'checkout', branch_name])
|
||||
|
||||
|
||||
def git_has_uncommited_changes():
|
||||
return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1
|
||||
|
||||
|
||||
# Command
|
||||
|
||||
|
||||
def die(msg):
|
||||
print("ERROR: %s" % msg, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def usage(msg=None):
|
||||
if msg:
|
||||
print("ERROR: %s" % msg, file=sys.stderr)
|
||||
|
||||
prog = "schema_diff.py"
|
||||
args = ["<db-url>", "<orig-branch:orig-version>",
|
||||
"<new-branch:new-version>"]
|
||||
|
||||
print("usage: %s %s" % (prog, ' '.join(args)), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_options():
|
||||
try:
|
||||
db_url = sys.argv[1]
|
||||
except IndexError:
|
||||
usage("must specify DB connection url")
|
||||
|
||||
try:
|
||||
orig_branch, orig_version = sys.argv[2].split(':')
|
||||
except IndexError:
|
||||
usage('original branch and version required (e.g. master:82)')
|
||||
|
||||
try:
|
||||
new_branch, new_version = sys.argv[3].split(':')
|
||||
except IndexError:
|
||||
usage('new branch and version required (e.g. master:82)')
|
||||
|
||||
return db_url, orig_branch, orig_version, new_branch, new_version
|
||||
|
||||
|
||||
def main():
|
||||
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
ORIG_DB = 'orig_db_%s' % timestamp
|
||||
NEW_DB = 'new_db_%s' % timestamp
|
||||
|
||||
ORIG_DUMP = ORIG_DB + ".dump"
|
||||
NEW_DUMP = NEW_DB + ".dump"
|
||||
|
||||
options = parse_options()
|
||||
db_url, orig_branch, orig_version, new_branch, new_version = options
|
||||
|
||||
# Since we're going to be switching branches, ensure user doesn't have any
|
||||
# uncommited changes
|
||||
if git_has_uncommited_changes():
|
||||
die("You have uncommited changes. Please commit them before running "
|
||||
"this command.")
|
||||
|
||||
db_driver = _get_db_driver_class(db_url)()
|
||||
|
||||
users_branch = git_current_branch_name()
|
||||
git_checkout(orig_branch)
|
||||
|
||||
try:
|
||||
# Dump Original Schema
|
||||
dump_db(db_driver, ORIG_DB, db_url, orig_version, ORIG_DUMP)
|
||||
|
||||
# Dump New Schema
|
||||
git_checkout(new_branch)
|
||||
dump_db(db_driver, NEW_DB, db_url, new_version, NEW_DUMP)
|
||||
|
||||
diff_files(ORIG_DUMP, NEW_DUMP)
|
||||
finally:
|
||||
git_checkout(users_branch)
|
||||
|
||||
if os.path.exists(ORIG_DUMP):
|
||||
os.unlink(ORIG_DUMP)
|
||||
|
||||
if os.path.exists(NEW_DUMP):
|
||||
os.unlink(NEW_DUMP)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
42
tools/enable-pre-commit-hook.sh
Executable file
42
tools/enable-pre-commit-hook.sh
Executable file
@ -0,0 +1,42 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
PRE_COMMIT_SCRIPT=.git/hooks/pre-commit
|
||||
|
||||
make_hook() {
|
||||
echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT
|
||||
chmod +x $PRE_COMMIT_SCRIPT
|
||||
|
||||
if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then
|
||||
echo "pre-commit hook was created successfully"
|
||||
else
|
||||
echo "unable to create pre-commit hook"
|
||||
fi
|
||||
}
|
||||
|
||||
# NOTE(jk0): Make sure we are in nova's root directory before adding the hook.
|
||||
if [ ! -d ".git" ]; then
|
||||
echo "unable to find .git; moving up a directory"
|
||||
cd ..
|
||||
if [ -d ".git" ]; then
|
||||
make_hook
|
||||
else
|
||||
echo "still unable to find .git; hook not created"
|
||||
fi
|
||||
else
|
||||
make_hook
|
||||
fi
|
||||
|
73
tools/install_venv.py
Normal file
73
tools/install_venv.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Copyright 2010 OpenStack Foundation
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import install_venv_common as install_venv
|
||||
|
||||
|
||||
def print_help(venv, root):
|
||||
help = """
|
||||
Nova development environment setup is complete.
|
||||
|
||||
Nova development uses virtualenv to track and manage Python dependencies
|
||||
while in development and testing.
|
||||
|
||||
To activate the Nova virtualenv for the extent of your current shell
|
||||
session you can run:
|
||||
|
||||
$ source %s/bin/activate
|
||||
|
||||
Or, if you prefer, you can run commands in the virtualenv on a case by case
|
||||
basis by running:
|
||||
|
||||
$ %s/tools/with_venv.sh <your command>
|
||||
|
||||
Also, make test will automatically use the virtualenv.
|
||||
"""
|
||||
print(help % (venv, root))
|
||||
|
||||
|
||||
def main(argv):
|
||||
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
if os.environ.get('tools_path'):
|
||||
root = os.environ['tools_path']
|
||||
venv = os.path.join(root, '.venv')
|
||||
if os.environ.get('venv'):
|
||||
venv = os.environ['venv']
|
||||
|
||||
pip_requires = os.path.join(root, 'requirements.txt')
|
||||
test_requires = os.path.join(root, 'test-requirements.txt')
|
||||
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
||||
project = 'Nova'
|
||||
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
|
||||
py_version, project)
|
||||
options = install.parse_args(argv)
|
||||
install.check_python_version()
|
||||
install.check_dependencies()
|
||||
install.create_virtualenv(no_site_packages=options.no_site_packages)
|
||||
install.install_dependencies()
|
||||
print_help(venv, root)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
172
tools/install_venv_common.py
Normal file
172
tools/install_venv_common.py
Normal file
@ -0,0 +1,172 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Provides methods needed by installation script for OpenStack development
|
||||
virtual environments.
|
||||
|
||||
Since this script is used to bootstrap a virtualenv from the system's Python
|
||||
environment, it should be kept strictly compatible with Python 2.6.
|
||||
|
||||
Synced in from openstack-common
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class InstallVenv(object):
|
||||
|
||||
def __init__(self, root, venv, requirements,
|
||||
test_requirements, py_version,
|
||||
project):
|
||||
self.root = root
|
||||
self.venv = venv
|
||||
self.requirements = requirements
|
||||
self.test_requirements = test_requirements
|
||||
self.py_version = py_version
|
||||
self.project = project
|
||||
|
||||
def die(self, message, *args):
|
||||
print(message % args, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def check_python_version(self):
|
||||
if sys.version_info < (2, 6):
|
||||
self.die("Need Python Version >= 2.6")
|
||||
|
||||
def run_command_with_code(self, cmd, redirect_output=True,
|
||||
check_exit_code=True):
|
||||
"""Runs a command in an out-of-process shell.
|
||||
|
||||
Returns the output of that command. Working directory is self.root.
|
||||
"""
|
||||
if redirect_output:
|
||||
stdout = subprocess.PIPE
|
||||
else:
|
||||
stdout = None
|
||||
|
||||
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
|
||||
output = proc.communicate()[0]
|
||||
if check_exit_code and proc.returncode != 0:
|
||||
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
|
||||
return (output, proc.returncode)
|
||||
|
||||
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
|
||||
return self.run_command_with_code(cmd, redirect_output,
|
||||
check_exit_code)[0]
|
||||
|
||||
def get_distro(self):
|
||||
if (os.path.exists('/etc/fedora-release') or
|
||||
os.path.exists('/etc/redhat-release')):
|
||||
return Fedora(
|
||||
self.root, self.venv, self.requirements,
|
||||
self.test_requirements, self.py_version, self.project)
|
||||
else:
|
||||
return Distro(
|
||||
self.root, self.venv, self.requirements,
|
||||
self.test_requirements, self.py_version, self.project)
|
||||
|
||||
def check_dependencies(self):
|
||||
self.get_distro().install_virtualenv()
|
||||
|
||||
def create_virtualenv(self, no_site_packages=True):
|
||||
"""Creates the virtual environment and installs PIP.
|
||||
|
||||
Creates the virtual environment and installs PIP only into the
|
||||
virtual environment.
|
||||
"""
|
||||
if not os.path.isdir(self.venv):
|
||||
print('Creating venv...', end=' ')
|
||||
if no_site_packages:
|
||||
self.run_command(['virtualenv', '-q', '--no-site-packages',
|
||||
self.venv])
|
||||
else:
|
||||
self.run_command(['virtualenv', '-q', self.venv])
|
||||
print('done.')
|
||||
else:
|
||||
print("venv already exists...")
|
||||
pass
|
||||
|
||||
def pip_install(self, *args):
|
||||
self.run_command(['tools/with_venv.sh',
|
||||
'pip', 'install', '--upgrade'] + list(args),
|
||||
redirect_output=False)
|
||||
|
||||
def install_dependencies(self):
|
||||
print('Installing dependencies with pip (this can take a while)...')
|
||||
|
||||
# First things first, make sure our venv has the latest pip and
|
||||
# setuptools and pbr
|
||||
self.pip_install('pip>=1.4')
|
||||
self.pip_install('setuptools')
|
||||
self.pip_install('pbr')
|
||||
|
||||
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
|
||||
|
||||
def parse_args(self, argv):
|
||||
"""Parses command-line arguments."""
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-n', '--no-site-packages',
|
||||
action='store_true',
|
||||
help="Do not inherit packages from global Python "
|
||||
"install.")
|
||||
return parser.parse_args(argv[1:])[0]
|
||||
|
||||
|
||||
class Distro(InstallVenv):
|
||||
|
||||
def check_cmd(self, cmd):
|
||||
return bool(self.run_command(['which', cmd],
|
||||
check_exit_code=False).strip())
|
||||
|
||||
def install_virtualenv(self):
|
||||
if self.check_cmd('virtualenv'):
|
||||
return
|
||||
|
||||
if self.check_cmd('easy_install'):
|
||||
print('Installing virtualenv via easy_install...', end=' ')
|
||||
if self.run_command(['easy_install', 'virtualenv']):
|
||||
print('Succeeded')
|
||||
return
|
||||
else:
|
||||
print('Failed')
|
||||
|
||||
self.die('ERROR: virtualenv not found.\n\n%s development'
|
||||
' requires virtualenv, please install it using your'
|
||||
' favorite package management tool' % self.project)
|
||||
|
||||
|
||||
class Fedora(Distro):
|
||||
"""This covers all Fedora-based distributions.
|
||||
|
||||
Includes: Fedora, RHEL, CentOS, Scientific Linux
|
||||
"""
|
||||
|
||||
def check_pkg(self, pkg):
|
||||
return self.run_command_with_code(['rpm', '-q', pkg],
|
||||
check_exit_code=False)[1] == 0
|
||||
|
||||
def install_virtualenv(self):
|
||||
if self.check_cmd('virtualenv'):
|
||||
return
|
||||
|
||||
if not self.check_pkg('python-virtualenv'):
|
||||
self.die("Please install 'python-virtualenv'.")
|
||||
|
||||
super(Fedora, self).install_virtualenv()
|
37
tools/nova-manage.bash_completion
Normal file
37
tools/nova-manage.bash_completion
Normal file
@ -0,0 +1,37 @@
|
||||
# bash completion for openstack nova-manage
|
||||
|
||||
_nova_manage_opts="" # lazy init
|
||||
_nova_manage_opts_exp="" # lazy init
|
||||
|
||||
# dict hack for bash 3
|
||||
_set_nova_manage_subopts () {
|
||||
eval _nova_manage_subopts_"$1"='$2'
|
||||
}
|
||||
_get_nova_manage_subopts () {
|
||||
eval echo '${_nova_manage_subopts_'"$1"'#_nova_manage_subopts_}'
|
||||
}
|
||||
|
||||
_nova_manage()
|
||||
{
|
||||
local cur prev subopts
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
|
||||
if [ "x$_nova_manage_opts" == "x" ] ; then
|
||||
_nova_manage_opts="`nova-manage bash-completion 2>/dev/null`"
|
||||
_nova_manage_opts_exp="`echo $_nova_manage_opts | sed -e "s/\s/|/g"`"
|
||||
fi
|
||||
|
||||
if [[ " `echo $_nova_manage_opts` " =~ " $prev " ]] ; then
|
||||
if [ "x$(_get_nova_manage_subopts "$prev")" == "x" ] ; then
|
||||
subopts="`nova-manage bash-completion $prev 2>/dev/null`"
|
||||
_set_nova_manage_subopts "$prev" "$subopts"
|
||||
fi
|
||||
COMPREPLY=($(compgen -W "$(_get_nova_manage_subopts "$prev")" -- ${cur}))
|
||||
elif [[ ! " ${COMP_WORDS[@]} " =~ " "($_nova_manage_opts_exp)" " ]] ; then
|
||||
COMPREPLY=($(compgen -W "${_nova_manage_opts}" -- ${cur}))
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
complete -F _nova_manage nova-manage
|
6
tools/pretty_tox.sh
Executable file
6
tools/pretty_tox.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
TESTRARGS=$1
|
||||
python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
|
109
tools/regression_tester.py
Executable file
109
tools/regression_tester.py
Executable file
@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""Tool for checking if patch contains a regression test.
|
||||
|
||||
By default runs against current patch but can be set to use any gerrit review
|
||||
as specified by change number (uses 'git review -d').
|
||||
|
||||
Idea: take tests from patch to check, and run against code from previous patch.
|
||||
If new tests pass, then no regression test, if new tests fails against old code
|
||||
then either
|
||||
* new tests depend on new code and cannot confirm regression test is valid
|
||||
(false positive)
|
||||
* new tests detects the bug being fixed (detect valid regression test)
|
||||
Due to the risk of false positives, the results from this need some human
|
||||
interpretation.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import optparse
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def run(cmd, fail_ok=False):
|
||||
print("running: %s" % cmd)
|
||||
obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
shell=True)
|
||||
obj.wait()
|
||||
if obj.returncode != 0 and not fail_ok:
|
||||
print("The above command terminated with an error.")
|
||||
sys.exit(obj.returncode)
|
||||
return obj.stdout.read()
|
||||
|
||||
|
||||
def main():
|
||||
usage = """
|
||||
Tool for checking if a patch includes a regression test.
|
||||
|
||||
Usage: %prog [options]"""
|
||||
parser = optparse.OptionParser(usage)
|
||||
parser.add_option("-r", "--review", dest="review",
|
||||
help="gerrit review number to test")
|
||||
(options, args) = parser.parse_args()
|
||||
if options.review:
|
||||
original_branch = run("git rev-parse --abbrev-ref HEAD")
|
||||
run("git review -d %s" % options.review)
|
||||
else:
|
||||
print ("no gerrit review number specified, running on latest commit"
|
||||
"on current branch.")
|
||||
|
||||
test_works = False
|
||||
|
||||
# run new tests with old code
|
||||
run("git checkout HEAD^ nova")
|
||||
run("git checkout HEAD nova/tests")
|
||||
|
||||
# identify which tests have changed
|
||||
tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
|
||||
"| cut -f2").split()
|
||||
test_list = []
|
||||
for test in tests:
|
||||
test_list.append(string.replace(test[0:-3], '/', '.'))
|
||||
|
||||
if test_list == []:
|
||||
test_works = False
|
||||
expect_failure = ""
|
||||
else:
|
||||
# run new tests, expect them to fail
|
||||
expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
|
||||
fail_ok=True)
|
||||
if "FAILED (id=" in expect_failure:
|
||||
test_works = True
|
||||
|
||||
# cleanup
|
||||
run("git checkout HEAD nova")
|
||||
if options.review:
|
||||
new_branch = run("git status | head -1 | cut -d ' ' -f 4")
|
||||
run("git checkout %s" % original_branch)
|
||||
run("git branch -D %s" % new_branch)
|
||||
|
||||
print(expect_failure)
|
||||
print("")
|
||||
print("*******************************")
|
||||
if test_works:
|
||||
print("FOUND a regression test")
|
||||
else:
|
||||
print("NO regression test")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
7
tools/with_venv.sh
Executable file
7
tools/with_venv.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
tools_path=${tools_path:-$(dirname $0)}
|
||||
venv_path=${venv_path:-${tools_path}}
|
||||
venv_dir=${venv_name:-/../.venv}
|
||||
TOOLS=${tools_path}
|
||||
VENV=${venv:-${venv_path}/${venv_dir}}
|
||||
source ${VENV}/bin/activate && "$@"
|
123
tools/xenserver/cleanup_sm_locks.py
Executable file
123
tools/xenserver/cleanup_sm_locks.py
Executable file
@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Script to cleanup old XenServer /var/lock/sm locks.
|
||||
|
||||
XenServer 5.6 and 6.0 do not appear to always cleanup locks when using a
|
||||
FileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998)
|
||||
locks laying around, builds will begin to fail because we can't create any
|
||||
additional locks. This cleanup script is something we can run periodically as
|
||||
a stop-gap measure until this is fixed upstream.
|
||||
|
||||
This script should be run on the dom0 of the affected machine.
|
||||
"""
|
||||
import errno
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
BASE = '/var/lock/sm'
|
||||
|
||||
|
||||
def _get_age_days(secs):
|
||||
return float(time.time() - secs) / 86400
|
||||
|
||||
|
||||
def _parse_args():
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option("-d", "--dry-run",
|
||||
action="store_true", dest="dry_run", default=False,
|
||||
help="don't actually remove locks")
|
||||
parser.add_option("-l", "--limit",
|
||||
action="store", type='int', dest="limit",
|
||||
default=sys.maxint,
|
||||
help="max number of locks to delete (default: no limit)")
|
||||
parser.add_option("-v", "--verbose",
|
||||
action="store_true", dest="verbose", default=False,
|
||||
help="don't print status messages to stdout")
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
try:
|
||||
days_old = int(args[0])
|
||||
except (IndexError, ValueError):
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
return options, days_old
|
||||
|
||||
|
||||
def main():
|
||||
options, days_old = _parse_args()
|
||||
|
||||
if not os.path.exists(BASE):
|
||||
print >> sys.stderr, "error: '%s' doesn't exist. Make sure you're"\
|
||||
" running this on the dom0." % BASE
|
||||
sys.exit(1)
|
||||
|
||||
lockpaths_removed = 0
|
||||
nspaths_removed = 0
|
||||
|
||||
for nsname in os.listdir(BASE)[:options.limit]:
|
||||
nspath = os.path.join(BASE, nsname)
|
||||
|
||||
if not os.path.isdir(nspath):
|
||||
continue
|
||||
|
||||
# Remove old lockfiles
|
||||
removed = 0
|
||||
locknames = os.listdir(nspath)
|
||||
for lockname in locknames:
|
||||
lockpath = os.path.join(nspath, lockname)
|
||||
lock_age_days = _get_age_days(os.path.getmtime(lockpath))
|
||||
if lock_age_days > days_old:
|
||||
lockpaths_removed += 1
|
||||
removed += 1
|
||||
|
||||
if options.verbose:
|
||||
print 'Removing old lock: %03d %s' % (lock_age_days,
|
||||
lockpath)
|
||||
|
||||
if not options.dry_run:
|
||||
os.unlink(lockpath)
|
||||
|
||||
# Remove empty namespace paths
|
||||
if len(locknames) == removed:
|
||||
nspaths_removed += 1
|
||||
|
||||
if options.verbose:
|
||||
print 'Removing empty namespace: %s' % nspath
|
||||
|
||||
if not options.dry_run:
|
||||
try:
|
||||
os.rmdir(nspath)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOTEMPTY:
|
||||
print >> sys.stderr, "warning: directory '%s'"\
|
||||
" not empty" % nspath
|
||||
else:
|
||||
raise
|
||||
|
||||
if options.dry_run:
|
||||
print "** Dry Run **"
|
||||
|
||||
print "Total locks removed: ", lockpaths_removed
|
||||
print "Total namespaces removed: ", nspaths_removed
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
68
tools/xenserver/destroy_cached_images.py
Normal file
68
tools/xenserver/destroy_cached_images.py
Normal file
@ -0,0 +1,68 @@
|
||||
"""
|
||||
destroy_cached_images.py
|
||||
|
||||
This script is used to clean up Glance images that are cached in the SR. By
|
||||
default, this script will only cleanup unused cached images.
|
||||
|
||||
Options:
|
||||
|
||||
--dry_run - Don't actually destroy the VDIs
|
||||
--all_cached - Destroy all cached images instead of just unused cached
|
||||
images.
|
||||
"""
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, POSSIBLE_TOPDIR)
|
||||
|
||||
from nova import config
|
||||
from nova import utils
|
||||
from nova.virt.xenapi import driver as xenapi_driver
|
||||
from nova.virt.xenapi import vm_utils
|
||||
|
||||
destroy_opts = [
|
||||
cfg.BoolOpt('all_cached',
|
||||
default=False,
|
||||
help='Destroy all cached images instead of just unused cached'
|
||||
' images.'),
|
||||
cfg.BoolOpt('dry_run',
|
||||
default=False,
|
||||
help='Don\'t actually delete the VDIs.')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(destroy_opts)
|
||||
|
||||
|
||||
def main():
|
||||
config.parse_args(sys.argv)
|
||||
utils.monkey_patch()
|
||||
|
||||
xenapi = xenapi_driver.XenAPIDriver()
|
||||
session = xenapi._session
|
||||
|
||||
sr_ref = vm_utils.safe_find_sr(session)
|
||||
destroyed = vm_utils.destroy_cached_images(
|
||||
session, sr_ref, all_cached=CONF.all_cached,
|
||||
dry_run=CONF.dry_run)
|
||||
|
||||
if '--verbose' in sys.argv:
|
||||
print '\n'.join(destroyed)
|
||||
|
||||
print "Destroyed %d cached VDIs" % len(destroyed)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
103
tools/xenserver/populate_other_config.py
Normal file
103
tools/xenserver/populate_other_config.py
Normal file
@ -0,0 +1,103 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
One-time script to populate VDI.other_config.
|
||||
|
||||
We use metadata stored in VDI.other_config to associate a VDI with a given
|
||||
instance so that we may safely cleanup orphaned VDIs.
|
||||
|
||||
We had a bug in the code that meant that the vast majority of VDIs created
|
||||
would not have the other_config populated.
|
||||
|
||||
After deploying the fixed code, this script is intended to be run against all
|
||||
compute-workers in a cluster so that existing VDIs can have their other_configs
|
||||
populated.
|
||||
|
||||
Run on compute-worker (not Dom0):
|
||||
|
||||
python ./tools/xenserver/populate_other_config.py [--dry-run|--verbose]
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
possible_topdir = os.getcwd()
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import config
|
||||
from nova.openstack.common import uuidutils
|
||||
from nova.virt import virtapi
|
||||
from nova.virt.xenapi import driver as xenapi_driver
|
||||
from nova.virt.xenapi import vm_utils
|
||||
from oslo.config import cfg
|
||||
|
||||
cli_opts = [
|
||||
cfg.BoolOpt('dry-run',
|
||||
default=False,
|
||||
help='Whether to actually update other_config.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(cli_opts)
|
||||
|
||||
|
||||
def main():
|
||||
config.parse_args(sys.argv)
|
||||
|
||||
xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI())
|
||||
session = xenapi._session
|
||||
|
||||
vdi_refs = session.call_xenapi('VDI.get_all')
|
||||
for vdi_ref in vdi_refs:
|
||||
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
|
||||
|
||||
other_config = vdi_rec['other_config']
|
||||
|
||||
# Already set...
|
||||
if 'nova_instance_uuid' in other_config:
|
||||
continue
|
||||
|
||||
name_label = vdi_rec['name_label']
|
||||
|
||||
# We only want name-labels of form instance-<UUID>-[optional-suffix]
|
||||
if not name_label.startswith('instance-'):
|
||||
continue
|
||||
|
||||
# Parse out UUID
|
||||
instance_uuid = name_label.replace('instance-', '')[:36]
|
||||
if not uuidutils.is_uuid_like(instance_uuid):
|
||||
print "error: name label '%s' wasn't UUID-like" % name_label
|
||||
continue
|
||||
|
||||
vdi_type = vdi_rec['name_description']
|
||||
|
||||
# We don't need a full instance record, just the UUID
|
||||
instance = {'uuid': instance_uuid}
|
||||
|
||||
if not CONF.dry_run:
|
||||
vm_utils._set_vdi_info(session, vdi_ref, vdi_type, name_label,
|
||||
vdi_type, instance)
|
||||
|
||||
if CONF.verbose:
|
||||
print "Setting other_config for instance_uuid=%s vdi_uuid=%s" % (
|
||||
instance_uuid, vdi_rec['uuid'])
|
||||
|
||||
if CONF.dry_run:
|
||||
print "Dry run completed"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
65
tools/xenserver/rotate_xen_guest_logs.sh
Executable file
65
tools/xenserver/rotate_xen_guest_logs.sh
Executable file
@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
# Script to rotate console logs
|
||||
#
|
||||
# Should be run on Dom0, with cron, every minute:
|
||||
# * * * * * /root/rotate_xen_guest_logs.sh
|
||||
#
|
||||
# Should clear out the guest logs on every boot
|
||||
# because the domain ids may get re-used for a
|
||||
# different tenant after the reboot
|
||||
#
|
||||
# /var/log/xen/guest should be mounted into a
|
||||
# small loopback device to stop any guest being
|
||||
# able to fill dom0 file system
|
||||
|
||||
log_dir="/var/log/xen/guest"
|
||||
kb=1024
|
||||
max_size_bytes=$(($kb*$kb))
|
||||
truncated_size_bytes=$((5*$kb))
|
||||
list_domains=/opt/xensource/bin/list_domains
|
||||
|
||||
log_file_base="${log_dir}/console."
|
||||
tmp_file_base="${log_dir}/tmp.console."
|
||||
|
||||
# Ensure logging is setup correctly for all domains
|
||||
xenstore-write /local/logconsole/@ "${log_file_base}%d"
|
||||
|
||||
# Move logs we want to keep
|
||||
domains=$($list_domains | sed '/^id*/d' | sed 's/|.*|.*$//g' | xargs)
|
||||
for i in $domains; do
|
||||
log="${log_file_base}$i"
|
||||
tmp="${tmp_file_base}$i"
|
||||
mv $log $tmp || true
|
||||
done
|
||||
|
||||
# Delete all console logs,
|
||||
# mostly to remove logs from recently killed domains
|
||||
rm -f ${log_dir}/console.*
|
||||
|
||||
# Reload domain list, in case it changed
|
||||
# (note we may have just deleted a new console log)
|
||||
domains=$($list_domains | sed '/^id*/d' | sed 's/|.*|.*$//g' | xargs)
|
||||
for i in $domains; do
|
||||
log="${log_file_base}$i"
|
||||
tmp="${tmp_file_base}$i"
|
||||
|
||||
if [ -e "$tmp" ]; then
|
||||
size=$(stat -c%s "$tmp")
|
||||
|
||||
# Trim the log if required
|
||||
if [ "$size" -gt "$max_size_bytes" ]; then
|
||||
tail -c $truncated_size_bytes $tmp > $log || true
|
||||
else
|
||||
mv $tmp $log || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Notify xen that it needs to reload the file
|
||||
xenstore-write /local/logconsole/$i $log
|
||||
xenstore-rm /local/logconsole/$i
|
||||
done
|
||||
|
||||
# Delete all the tmp files
|
||||
rm -f ${tmp_file_base}* || true
|
172
tools/xenserver/stress_test.py
Normal file
172
tools/xenserver/stress_test.py
Normal file
@ -0,0 +1,172 @@
|
||||
"""
|
||||
This script concurrently builds and migrates instances. This can be useful when
|
||||
troubleshooting race-conditions in virt-layer code.
|
||||
|
||||
Expects:
|
||||
|
||||
novarc to be sourced in the environment
|
||||
|
||||
Helper Script for Xen Dom0:
|
||||
|
||||
# cat /tmp/destroy_cache_vdis
|
||||
#!/bin/bash
|
||||
xe vdi-list | grep "Glance Image" -C1 | grep "^uuid" | awk '{print $5}' |
|
||||
xargs -n1 -I{} xe vdi-destroy uuid={}
|
||||
"""
|
||||
import argparse
|
||||
import contextlib
|
||||
import multiprocessing
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
DOM0_CLEANUP_SCRIPT = "/tmp/destroy_cache_vdis"
|
||||
|
||||
|
||||
def run(cmd):
|
||||
ret = subprocess.call(cmd, shell=True)
|
||||
if ret != 0:
|
||||
print >> sys.stderr, "Command exited non-zero: %s" % cmd
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def server_built(server_name, image_name, flavor=1, cleanup=True):
|
||||
run("nova boot --image=%(image_name)s --flavor=%(flavor)s"
|
||||
" --poll %(server_name)s" % locals())
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if cleanup:
|
||||
run("nova delete %(server_name)s" % locals())
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def snapshot_taken(server_name, snapshot_name, cleanup=True):
|
||||
run("nova image-create %(server_name)s %(snapshot_name)s"
|
||||
" --poll" % locals())
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if cleanup:
|
||||
run("nova image-delete %(snapshot_name)s" % locals())
|
||||
|
||||
|
||||
def migrate_server(server_name):
|
||||
run("nova migrate %(server_name)s --poll" % locals())
|
||||
|
||||
cmd = "nova list | grep %(server_name)s | awk '{print $6}'" % locals()
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
|
||||
stdout, stderr = proc.communicate()
|
||||
status = stdout.strip()
|
||||
if status.upper() != 'VERIFY_RESIZE':
|
||||
print >> sys.stderr, "Server %(server_name)s failed to rebuild"\
|
||||
% locals()
|
||||
return False
|
||||
|
||||
# Confirm the resize
|
||||
run("nova resize-confirm %(server_name)s" % locals())
|
||||
return True
|
||||
|
||||
|
||||
def test_migrate(context):
|
||||
count, args = context
|
||||
server_name = "server%d" % count
|
||||
cleanup = args.cleanup
|
||||
with server_built(server_name, args.image, cleanup=cleanup):
|
||||
# Migrate A -> B
|
||||
result = migrate_server(server_name)
|
||||
if not result:
|
||||
return False
|
||||
|
||||
# Migrate B -> A
|
||||
return migrate_server(server_name)
|
||||
|
||||
|
||||
def rebuild_server(server_name, snapshot_name):
|
||||
run("nova rebuild %(server_name)s %(snapshot_name)s --poll" % locals())
|
||||
|
||||
cmd = "nova list | grep %(server_name)s | awk '{print $6}'" % locals()
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
|
||||
stdout, stderr = proc.communicate()
|
||||
status = stdout.strip()
|
||||
if status != 'ACTIVE':
|
||||
print >> sys.stderr, "Server %(server_name)s failed to rebuild"\
|
||||
% locals()
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_rebuild(context):
|
||||
count, args = context
|
||||
server_name = "server%d" % count
|
||||
snapshot_name = "snap%d" % count
|
||||
cleanup = args.cleanup
|
||||
with server_built(server_name, args.image, cleanup=cleanup):
|
||||
with snapshot_taken(server_name, snapshot_name, cleanup=cleanup):
|
||||
return rebuild_server(server_name, snapshot_name)
|
||||
|
||||
|
||||
def _parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Test Nova for Race Conditions.')
|
||||
|
||||
parser.add_argument('tests', metavar='TESTS', type=str, nargs='*',
|
||||
default=['rebuild', 'migrate'],
|
||||
help='tests to run: [rebuilt|migrate]')
|
||||
|
||||
parser.add_argument('-i', '--image', help="image to build from",
|
||||
required=True)
|
||||
parser.add_argument('-n', '--num-runs', type=int, help="number of runs",
|
||||
default=1)
|
||||
parser.add_argument('-c', '--concurrency', type=int, default=5,
|
||||
help="number of concurrent processes")
|
||||
parser.add_argument('--no-cleanup', action='store_false', dest="cleanup",
|
||||
default=True)
|
||||
parser.add_argument('-d', '--dom0-ips',
|
||||
help="IP of dom0's to run cleanup script")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
dom0_cleanup_script = DOM0_CLEANUP_SCRIPT
|
||||
args = _parse_args()
|
||||
|
||||
if args.dom0_ips:
|
||||
dom0_ips = args.dom0_ips.split(',')
|
||||
else:
|
||||
dom0_ips = []
|
||||
|
||||
start_time = time.time()
|
||||
batch_size = min(args.num_runs, args.concurrency)
|
||||
pool = multiprocessing.Pool(processes=args.concurrency)
|
||||
|
||||
results = []
|
||||
for test in args.tests:
|
||||
test_func = globals().get("test_%s" % test)
|
||||
if not test_func:
|
||||
print >> sys.stderr, "test '%s' not found" % test
|
||||
sys.exit(1)
|
||||
|
||||
contexts = [(x, args) for x in range(args.num_runs)]
|
||||
|
||||
try:
|
||||
results += pool.map(test_func, contexts)
|
||||
finally:
|
||||
if args.cleanup:
|
||||
for dom0_ip in dom0_ips:
|
||||
run("ssh root@%(dom0_ip)s %(dom0_cleanup_script)s"
|
||||
% locals())
|
||||
|
||||
success = all(results)
|
||||
result = "SUCCESS" if success else "FAILED"
|
||||
|
||||
duration = time.time() - start_time
|
||||
print "%s, finished in %.2f secs" % (result, duration)
|
||||
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
128
tools/xenserver/vdi_chain_cleanup.py
Normal file
128
tools/xenserver/vdi_chain_cleanup.py
Normal file
@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script is designed to cleanup any VHDs (and their descendents) which have
|
||||
a bad parent pointer.
|
||||
|
||||
The script needs to be run in the dom0 of the affected host.
|
||||
|
||||
The available actions are:
|
||||
|
||||
- print: display the filenames of the affected VHDs
|
||||
- delete: remove the affected VHDs
|
||||
- move: move the affected VHDs out of the SR into another directory
|
||||
"""
|
||||
import glob
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class ExecutionFailed(Exception):
|
||||
def __init__(self, returncode, stdout, stderr, max_stream_length=32):
|
||||
self.returncode = returncode
|
||||
self.stdout = stdout[:max_stream_length]
|
||||
self.stderr = stderr[:max_stream_length]
|
||||
self.max_stream_length = max_stream_length
|
||||
|
||||
def __repr__(self):
|
||||
return "<ExecutionFailed returncode=%s out='%s' stderr='%s'>" % (
|
||||
self.returncode, self.stdout, self.stderr)
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
|
||||
def execute(cmd, ok_exit_codes=None):
|
||||
if ok_exit_codes is None:
|
||||
ok_exit_codes = [0]
|
||||
|
||||
proc = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
(stdout, stderr) = proc.communicate()
|
||||
|
||||
if proc.returncode not in ok_exit_codes:
|
||||
raise ExecutionFailed(proc.returncode, stdout, stderr)
|
||||
|
||||
return proc.returncode, stdout, stderr
|
||||
|
||||
|
||||
def usage():
|
||||
print "usage: %s <SR PATH> <print|delete|move>" % sys.argv[0]
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
usage()
|
||||
|
||||
sr_path = sys.argv[1]
|
||||
action = sys.argv[2]
|
||||
|
||||
if action not in ('print', 'delete', 'move'):
|
||||
usage()
|
||||
|
||||
if action == 'move':
|
||||
if len(sys.argv) < 4:
|
||||
print "error: must specify where to move bad VHDs"
|
||||
sys.exit(1)
|
||||
|
||||
bad_vhd_path = sys.argv[3]
|
||||
if not os.path.exists(bad_vhd_path):
|
||||
os.makedirs(bad_vhd_path)
|
||||
|
||||
bad_leaves = []
|
||||
descendents = {}
|
||||
|
||||
for fname in glob.glob(os.path.join(sr_path, "*.vhd")):
|
||||
(returncode, stdout, stderr) = execute(
|
||||
['vhd-util', 'query', '-n', fname, '-p'], ok_exit_codes=[0, 22])
|
||||
|
||||
stdout = stdout.strip()
|
||||
|
||||
if stdout.endswith('.vhd'):
|
||||
try:
|
||||
descendents[stdout].append(fname)
|
||||
except KeyError:
|
||||
descendents[stdout] = [fname]
|
||||
elif 'query failed' in stdout:
|
||||
bad_leaves.append(fname)
|
||||
|
||||
def walk_vhds(root):
|
||||
yield root
|
||||
if root in descendents:
|
||||
for child in descendents[root]:
|
||||
for vhd in walk_vhds(child):
|
||||
yield vhd
|
||||
|
||||
for bad_leaf in bad_leaves:
|
||||
for bad_vhd in walk_vhds(bad_leaf):
|
||||
print bad_vhd
|
||||
if action == "print":
|
||||
pass
|
||||
elif action == "delete":
|
||||
os.unlink(bad_vhd)
|
||||
elif action == "move":
|
||||
new_path = os.path.join(bad_vhd_path,
|
||||
os.path.basename(bad_vhd))
|
||||
os.rename(bad_vhd, new_path)
|
||||
else:
|
||||
raise Exception("invalid action %s" % action)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
329
tools/xenserver/vm_vdi_cleaner.py
Executable file
329
tools/xenserver/vm_vdi_cleaner.py
Executable file
@ -0,0 +1,329 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""vm_vdi_cleaner.py - List or clean orphaned VDIs/instances on XenServer."""
|
||||
|
||||
import doctest
|
||||
import os
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
import XenAPI
|
||||
|
||||
possible_topdir = os.getcwd()
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import config
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from oslo.utils import timeutils
|
||||
from nova.virt import virtapi
|
||||
from nova.virt.xenapi import driver as xenapi_driver
|
||||
|
||||
|
||||
cleaner_opts = [
|
||||
cfg.IntOpt('zombie_instance_updated_at_window',
|
||||
default=172800,
|
||||
help='Number of seconds zombie instances are cleaned up.'),
|
||||
]
|
||||
|
||||
cli_opt = cfg.StrOpt('command',
|
||||
help='Cleaner command')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(cleaner_opts)
|
||||
CONF.register_cli_opt(cli_opt)
|
||||
CONF.import_opt('verbose', 'nova.openstack.common.log')
|
||||
CONF.import_opt("resize_confirm_window", "nova.compute.manager")
|
||||
|
||||
|
||||
ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances",
|
||||
"clean-instances", "test"]
|
||||
|
||||
|
||||
def call_xenapi(xenapi, method, *args):
|
||||
"""Make a call to xapi."""
|
||||
return xenapi._session.call_xenapi(method, *args)
|
||||
|
||||
|
||||
def find_orphaned_instances(xenapi):
|
||||
"""Find and return a list of orphaned instances."""
|
||||
ctxt = context.get_admin_context(read_deleted="only")
|
||||
|
||||
orphaned_instances = []
|
||||
|
||||
for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi):
|
||||
try:
|
||||
uuid = vm_rec['other_config']['nova_uuid']
|
||||
instance = db.instance_get_by_uuid(ctxt, uuid)
|
||||
except (KeyError, exception.InstanceNotFound):
|
||||
# NOTE(jk0): Err on the side of caution here. If we don't know
|
||||
# anything about the particular instance, ignore it.
|
||||
print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0)
|
||||
continue
|
||||
|
||||
# NOTE(jk0): This would be triggered if a VM was deleted but the
|
||||
# actual deletion process failed somewhere along the line.
|
||||
is_active_and_deleting = (instance.vm_state == "active" and
|
||||
instance.task_state == "deleting")
|
||||
|
||||
# NOTE(jk0): A zombie VM is an instance that is not active and hasn't
|
||||
# been updated in over the specified period.
|
||||
is_zombie_vm = (instance.vm_state != "active"
|
||||
and timeutils.is_older_than(instance.updated_at,
|
||||
CONF.zombie_instance_updated_at_window))
|
||||
|
||||
if is_active_and_deleting or is_zombie_vm:
|
||||
orphaned_instances.append((vm_ref, vm_rec, instance))
|
||||
|
||||
return orphaned_instances
|
||||
|
||||
|
||||
def cleanup_instance(xenapi, instance, vm_ref, vm_rec):
|
||||
"""Delete orphaned instances."""
|
||||
xenapi._vmops._destroy(instance, vm_ref)
|
||||
|
||||
|
||||
def _get_applicable_vm_recs(xenapi):
|
||||
"""An 'applicable' VM is one that is not a template and not the control
|
||||
domain.
|
||||
"""
|
||||
for vm_ref in call_xenapi(xenapi, 'VM.get_all'):
|
||||
try:
|
||||
vm_rec = call_xenapi(xenapi, 'VM.get_record', vm_ref)
|
||||
except XenAPI.Failure, e:
|
||||
if e.details[0] != 'HANDLE_INVALID':
|
||||
raise
|
||||
continue
|
||||
|
||||
if vm_rec["is_a_template"] or vm_rec["is_control_domain"]:
|
||||
continue
|
||||
yield vm_ref, vm_rec
|
||||
|
||||
|
||||
def print_xen_object(obj_type, obj, indent_level=0, spaces_per_indent=4):
|
||||
"""Pretty-print a Xen object.
|
||||
|
||||
Looks like:
|
||||
|
||||
VM (abcd-abcd-abcd): 'name label here'
|
||||
"""
|
||||
if not CONF.verbose:
|
||||
return
|
||||
uuid = obj["uuid"]
|
||||
try:
|
||||
name_label = obj["name_label"]
|
||||
except KeyError:
|
||||
name_label = ""
|
||||
msg = "%(obj_type)s (%(uuid)s) '%(name_label)s'" % locals()
|
||||
indent = " " * spaces_per_indent * indent_level
|
||||
print "".join([indent, msg])
|
||||
|
||||
|
||||
def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids):
|
||||
"""Find VDIs which are connected to VBDs which are connected to VMs."""
|
||||
def _is_null_ref(ref):
|
||||
return ref == "OpaqueRef:NULL"
|
||||
|
||||
def _add_vdi_and_parents_to_connected(vdi_rec, indent_level):
|
||||
indent_level += 1
|
||||
|
||||
vdi_and_parent_uuids = []
|
||||
cur_vdi_rec = vdi_rec
|
||||
while True:
|
||||
cur_vdi_uuid = cur_vdi_rec["uuid"]
|
||||
print_xen_object("VDI", vdi_rec, indent_level=indent_level)
|
||||
connected_vdi_uuids.add(cur_vdi_uuid)
|
||||
vdi_and_parent_uuids.append(cur_vdi_uuid)
|
||||
|
||||
try:
|
||||
parent_vdi_uuid = vdi_rec["sm_config"]["vhd-parent"]
|
||||
except KeyError:
|
||||
parent_vdi_uuid = None
|
||||
|
||||
# NOTE(sirp): VDI's can have themselves as a parent?!
|
||||
if parent_vdi_uuid and parent_vdi_uuid != cur_vdi_uuid:
|
||||
indent_level += 1
|
||||
cur_vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid',
|
||||
parent_vdi_uuid)
|
||||
try:
|
||||
cur_vdi_rec = call_xenapi(xenapi, 'VDI.get_record',
|
||||
cur_vdi_ref)
|
||||
except XenAPI.Failure, e:
|
||||
if e.details[0] != 'HANDLE_INVALID':
|
||||
raise
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi):
|
||||
indent_level = 0
|
||||
print_xen_object("VM", vm_rec, indent_level=indent_level)
|
||||
|
||||
vbd_refs = vm_rec["VBDs"]
|
||||
for vbd_ref in vbd_refs:
|
||||
try:
|
||||
vbd_rec = call_xenapi(xenapi, 'VBD.get_record', vbd_ref)
|
||||
except XenAPI.Failure, e:
|
||||
if e.details[0] != 'HANDLE_INVALID':
|
||||
raise
|
||||
continue
|
||||
|
||||
indent_level = 1
|
||||
print_xen_object("VBD", vbd_rec, indent_level=indent_level)
|
||||
|
||||
vbd_vdi_ref = vbd_rec["VDI"]
|
||||
|
||||
if _is_null_ref(vbd_vdi_ref):
|
||||
continue
|
||||
|
||||
try:
|
||||
vdi_rec = call_xenapi(xenapi, 'VDI.get_record', vbd_vdi_ref)
|
||||
except XenAPI.Failure, e:
|
||||
if e.details[0] != 'HANDLE_INVALID':
|
||||
raise
|
||||
continue
|
||||
|
||||
_add_vdi_and_parents_to_connected(vdi_rec, indent_level)
|
||||
|
||||
|
||||
def _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids):
|
||||
"""Collects all VDIs and adds system VDIs to the connected set."""
|
||||
def _system_owned(vdi_rec):
|
||||
vdi_name = vdi_rec["name_label"]
|
||||
return (vdi_name.startswith("USB") or
|
||||
vdi_name.endswith(".iso") or
|
||||
vdi_rec["type"] == "system")
|
||||
|
||||
for vdi_ref in call_xenapi(xenapi, 'VDI.get_all'):
|
||||
try:
|
||||
vdi_rec = call_xenapi(xenapi, 'VDI.get_record', vdi_ref)
|
||||
except XenAPI.Failure, e:
|
||||
if e.details[0] != 'HANDLE_INVALID':
|
||||
raise
|
||||
continue
|
||||
vdi_uuid = vdi_rec["uuid"]
|
||||
all_vdi_uuids.add(vdi_uuid)
|
||||
|
||||
# System owned and non-managed VDIs should be considered 'connected'
|
||||
# for our purposes.
|
||||
if _system_owned(vdi_rec):
|
||||
print_xen_object("SYSTEM VDI", vdi_rec, indent_level=0)
|
||||
connected_vdi_uuids.add(vdi_uuid)
|
||||
elif not vdi_rec["managed"]:
|
||||
print_xen_object("UNMANAGED VDI", vdi_rec, indent_level=0)
|
||||
connected_vdi_uuids.add(vdi_uuid)
|
||||
|
||||
|
||||
def find_orphaned_vdi_uuids(xenapi):
|
||||
"""Walk VM -> VBD -> VDI change and accumulate connected VDIs."""
|
||||
connected_vdi_uuids = set()
|
||||
|
||||
_find_vdis_connected_to_vm(xenapi, connected_vdi_uuids)
|
||||
|
||||
all_vdi_uuids = set()
|
||||
_find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids)
|
||||
|
||||
orphaned_vdi_uuids = all_vdi_uuids - connected_vdi_uuids
|
||||
return orphaned_vdi_uuids
|
||||
|
||||
|
||||
def list_orphaned_vdis(vdi_uuids):
|
||||
"""List orphaned VDIs."""
|
||||
for vdi_uuid in vdi_uuids:
|
||||
if CONF.verbose:
|
||||
print "ORPHANED VDI (%s)" % vdi_uuid
|
||||
else:
|
||||
print vdi_uuid
|
||||
|
||||
|
||||
def clean_orphaned_vdis(xenapi, vdi_uuids):
|
||||
"""Clean orphaned VDIs."""
|
||||
for vdi_uuid in vdi_uuids:
|
||||
if CONF.verbose:
|
||||
print "CLEANING VDI (%s)" % vdi_uuid
|
||||
|
||||
vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid', vdi_uuid)
|
||||
try:
|
||||
call_xenapi(xenapi, 'VDI.destroy', vdi_ref)
|
||||
except XenAPI.Failure, exc:
|
||||
print >> sys.stderr, "Skipping %s: %s" % (vdi_uuid, exc)
|
||||
|
||||
|
||||
def list_orphaned_instances(orphaned_instances):
|
||||
"""List orphaned instances."""
|
||||
for vm_ref, vm_rec, orphaned_instance in orphaned_instances:
|
||||
if CONF.verbose:
|
||||
print "ORPHANED INSTANCE (%s)" % orphaned_instance.name
|
||||
else:
|
||||
print orphaned_instance.name
|
||||
|
||||
|
||||
def clean_orphaned_instances(xenapi, orphaned_instances):
|
||||
"""Clean orphaned instances."""
|
||||
for vm_ref, vm_rec, instance in orphaned_instances:
|
||||
if CONF.verbose:
|
||||
print "CLEANING INSTANCE (%s)" % instance.name
|
||||
|
||||
cleanup_instance(xenapi, instance, vm_ref, vm_rec)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main loop."""
|
||||
config.parse_args(sys.argv)
|
||||
args = CONF(args=sys.argv[1:], usage='%(prog)s [options] --command={' +
|
||||
'|'.join(ALLOWED_COMMANDS) + '}')
|
||||
|
||||
command = CONF.command
|
||||
if not command or command not in ALLOWED_COMMANDS:
|
||||
CONF.print_usage()
|
||||
sys.exit(1)
|
||||
|
||||
if CONF.zombie_instance_updated_at_window < CONF.resize_confirm_window:
|
||||
raise Exception("`zombie_instance_updated_at_window` has to be longer"
|
||||
" than `resize_confirm_window`.")
|
||||
|
||||
# NOTE(blamar) This tool does not require DB access, so passing in the
|
||||
# 'abstract' VirtAPI class is acceptable
|
||||
xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI())
|
||||
|
||||
if command == "list-vdis":
|
||||
if CONF.verbose:
|
||||
print "Connected VDIs:\n"
|
||||
orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi)
|
||||
if CONF.verbose:
|
||||
print "\nOrphaned VDIs:\n"
|
||||
list_orphaned_vdis(orphaned_vdi_uuids)
|
||||
elif command == "clean-vdis":
|
||||
orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi)
|
||||
clean_orphaned_vdis(xenapi, orphaned_vdi_uuids)
|
||||
elif command == "list-instances":
|
||||
orphaned_instances = find_orphaned_instances(xenapi)
|
||||
list_orphaned_instances(orphaned_instances)
|
||||
elif command == "clean-instances":
|
||||
orphaned_instances = find_orphaned_instances(xenapi)
|
||||
clean_orphaned_instances(xenapi, orphaned_instances)
|
||||
elif command == "test":
|
||||
doctest.testmod()
|
||||
else:
|
||||
print "Unknown command '%s'" % command
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user