Merging trunk, small fixes
This commit is contained in:
@@ -38,3 +38,4 @@ include nova/tests/db/nova.austin.sqlite
|
|||||||
include plugins/xenapi/README
|
include plugins/xenapi/README
|
||||||
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
||||||
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
|
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
|
||||||
|
global-exclude *.pyc
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ from eventlet.green import urllib2
|
|||||||
|
|
||||||
import exceptions
|
import exceptions
|
||||||
import gettext
|
import gettext
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@@ -48,9 +47,11 @@ from nova import utils
|
|||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
flags.DEFINE_integer('ajax_console_idle_timeout', 300,
|
flags.DEFINE_integer('ajax_console_idle_timeout', 300,
|
||||||
'Seconds before idle connection destroyed')
|
'Seconds before idle connection destroyed')
|
||||||
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.ajax_console_proxy')
|
LOG = logging.getLogger('nova.ajax_console_proxy')
|
||||||
LOG.setLevel(logging.DEBUG)
|
LOG.setLevel(logging.DEBUG)
|
||||||
@@ -130,6 +131,7 @@ class AjaxConsoleProxy(object):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
FLAGS(sys.argv)
|
FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
server = wsgi.Server()
|
server = wsgi.Server()
|
||||||
acp = AjaxConsoleProxy()
|
acp = AjaxConsoleProxy()
|
||||||
acp.register_listeners()
|
acp.register_listeners()
|
||||||
|
|||||||
26
bin/nova-api
26
bin/nova-api
@@ -39,11 +39,18 @@ from nova import log as logging
|
|||||||
from nova import version
|
from nova import version
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
|
|
||||||
logging.basicConfig()
|
|
||||||
LOG = logging.getLogger('nova.api')
|
LOG = logging.getLogger('nova.api')
|
||||||
LOG.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_string('ec2_listen', "0.0.0.0",
|
||||||
|
'IP address for EC2 API to listen')
|
||||||
|
flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
|
||||||
|
flags.DEFINE_string('osapi_listen', "0.0.0.0",
|
||||||
|
'IP address for OpenStack API to listen')
|
||||||
|
flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
|
||||||
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
|
|
||||||
API_ENDPOINTS = ['ec2', 'osapi']
|
API_ENDPOINTS = ['ec2', 'osapi']
|
||||||
|
|
||||||
@@ -57,21 +64,15 @@ def run_app(paste_config_file):
|
|||||||
LOG.debug(_("No paste configuration for app: %s"), api)
|
LOG.debug(_("No paste configuration for app: %s"), api)
|
||||||
continue
|
continue
|
||||||
LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
|
LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
|
||||||
wsgi.paste_config_to_flags(config, {
|
|
||||||
"verbose": FLAGS.verbose,
|
|
||||||
"%s_host" % api: config.get('host', '0.0.0.0'),
|
|
||||||
"%s_port" % api: getattr(FLAGS, "%s_port" % api)})
|
|
||||||
LOG.info(_("Running %s API"), api)
|
LOG.info(_("Running %s API"), api)
|
||||||
app = wsgi.load_paste_app(paste_config_file, api)
|
app = wsgi.load_paste_app(paste_config_file, api)
|
||||||
apps.append((app, getattr(FLAGS, "%s_port" % api),
|
apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
|
||||||
getattr(FLAGS, "%s_host" % api)))
|
getattr(FLAGS, "%s_listen" % api)))
|
||||||
if len(apps) == 0:
|
if len(apps) == 0:
|
||||||
LOG.error(_("No known API applications configured in %s."),
|
LOG.error(_("No known API applications configured in %s."),
|
||||||
paste_config_file)
|
paste_config_file)
|
||||||
return
|
return
|
||||||
|
|
||||||
# NOTE(todd): redo logging config, verbose could be set in paste config
|
|
||||||
logging.basicConfig()
|
|
||||||
server = wsgi.Server()
|
server = wsgi.Server()
|
||||||
for app in apps:
|
for app in apps:
|
||||||
server.start(*app)
|
server.start(*app)
|
||||||
@@ -80,8 +81,13 @@ def run_app(paste_config_file):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
FLAGS(sys.argv)
|
FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
LOG.audit(_("Starting nova-api node (version %s)"),
|
LOG.audit(_("Starting nova-api node (version %s)"),
|
||||||
version.version_string_with_vcs())
|
version.version_string_with_vcs())
|
||||||
|
LOG.debug(_("Full set of FLAGS:"))
|
||||||
|
for flag in FLAGS:
|
||||||
|
flag_get = FLAGS.get(flag, None)
|
||||||
|
LOG.debug("%(flag)s : %(flag_get)s" % locals())
|
||||||
conf = wsgi.paste_config_file('nova-api.conf')
|
conf = wsgi.paste_config_file('nova-api.conf')
|
||||||
if conf:
|
if conf:
|
||||||
run_app(conf)
|
run_app(conf)
|
||||||
|
|||||||
@@ -1,80 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Combined starter script for Nova services."""
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
eventlet.monkey_patch()
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
|
||||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
|
||||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|
||||||
os.pardir,
|
|
||||||
os.pardir))
|
|
||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|
||||||
sys.path.insert(0, possible_topdir)
|
|
||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import log as logging
|
|
||||||
from nova import service
|
|
||||||
from nova import utils
|
|
||||||
from nova import wsgi
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
utils.default_flagfile()
|
|
||||||
FLAGS(sys.argv)
|
|
||||||
logging.basicConfig()
|
|
||||||
|
|
||||||
compute = service.Service.create(binary='nova-compute')
|
|
||||||
network = service.Service.create(binary='nova-network')
|
|
||||||
volume = service.Service.create(binary='nova-volume')
|
|
||||||
scheduler = service.Service.create(binary='nova-scheduler')
|
|
||||||
#objectstore = service.Service.create(binary='nova-objectstore')
|
|
||||||
|
|
||||||
service.serve(compute, network, volume, scheduler)
|
|
||||||
|
|
||||||
apps = []
|
|
||||||
paste_config_file = wsgi.paste_config_file('nova-api.conf')
|
|
||||||
for api in ['osapi', 'ec2']:
|
|
||||||
config = wsgi.load_paste_configuration(paste_config_file, api)
|
|
||||||
if config is None:
|
|
||||||
continue
|
|
||||||
wsgi.paste_config_to_flags(config, {
|
|
||||||
"verbose": FLAGS.verbose,
|
|
||||||
"%s_host" % api: config.get('host', '0.0.0.0'),
|
|
||||||
"%s_port" % api: getattr(FLAGS, "%s_port" % api)})
|
|
||||||
app = wsgi.load_paste_app(paste_config_file, api)
|
|
||||||
apps.append((app, getattr(FLAGS, "%s_port" % api),
|
|
||||||
getattr(FLAGS, "%s_host" % api)))
|
|
||||||
if len(apps) > 0:
|
|
||||||
logging.basicConfig()
|
|
||||||
server = wsgi.Server()
|
|
||||||
for app in apps:
|
|
||||||
server.start(*app)
|
|
||||||
server.wait()
|
|
||||||
@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
service.serve()
|
service.serve()
|
||||||
service.wait()
|
service.wait()
|
||||||
|
|||||||
@@ -35,10 +35,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
service.serve()
|
service.serve()
|
||||||
service.wait()
|
service.wait()
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ def main():
|
|||||||
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
|
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
|
||||||
utils.default_flagfile(flagfile)
|
utils.default_flagfile(flagfile)
|
||||||
argv = FLAGS(sys.argv)
|
argv = FLAGS(sys.argv)
|
||||||
logging.basicConfig()
|
logging.setup()
|
||||||
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
|
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
|
||||||
if int(os.environ.get('TESTING', '0')):
|
if int(os.environ.get('TESTING', '0')):
|
||||||
FLAGS.fake_rabbit = True
|
FLAGS.fake_rabbit = True
|
||||||
@@ -113,7 +113,9 @@ def main():
|
|||||||
FLAGS.num_networks = 5
|
FLAGS.num_networks = 5
|
||||||
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
'..',
|
'..',
|
||||||
'nova.sqlite'))
|
'nova',
|
||||||
|
'tests',
|
||||||
|
'tests.sqlite'))
|
||||||
FLAGS.sql_connection = 'sqlite:///%s' % path
|
FLAGS.sql_connection = 'sqlite:///%s' % path
|
||||||
action = argv[1]
|
action = argv[1]
|
||||||
if action in ['add', 'del', 'old']:
|
if action in ['add', 'del', 'old']:
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
from nova.api import direct
|
from nova.api import direct
|
||||||
@@ -44,10 +45,15 @@ from nova.compute import api as compute_api
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('direct_port', 8001, 'Direct API port')
|
flags.DEFINE_integer('direct_port', 8001, 'Direct API port')
|
||||||
flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host')
|
flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host')
|
||||||
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
FLAGS(sys.argv)
|
FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
|
||||||
direct.register_service('compute', compute_api.API())
|
direct.register_service('compute', compute_api.API())
|
||||||
direct.register_service('reflect', direct.Reflection())
|
direct.register_service('reflect', direct.Reflection())
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.objectstore import image
|
from nova.objectstore import image
|
||||||
|
|
||||||
@@ -92,6 +93,7 @@ def main():
|
|||||||
"""Main entry point."""
|
"""Main entry point."""
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
argv = FLAGS(sys.argv)
|
argv = FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
images = get_images()
|
images = get_images()
|
||||||
|
|
||||||
if len(argv) == 2:
|
if len(argv) == 2:
|
||||||
|
|||||||
@@ -41,9 +41,6 @@ from nova import utils
|
|||||||
from nova import twistd
|
from nova import twistd
|
||||||
from nova.compute import monitor
|
from nova.compute import monitor
|
||||||
|
|
||||||
# TODO(todd): shouldn't this be done with flags? And what about verbose?
|
|
||||||
logging.getLogger('boto').setLevel(logging.WARN)
|
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.instancemonitor')
|
LOG = logging.getLogger('nova.instancemonitor')
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -86,8 +86,6 @@ from nova.auth import manager
|
|||||||
from nova.cloudpipe import pipelib
|
from nova.cloudpipe import pipelib
|
||||||
from nova.db import migration
|
from nova.db import migration
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig()
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DECLARE('fixed_range', 'nova.network.manager')
|
flags.DECLARE('fixed_range', 'nova.network.manager')
|
||||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||||
@@ -95,6 +93,9 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
|||||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||||
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
|
|
||||||
|
|
||||||
def param2id(object_id):
|
def param2id(object_id):
|
||||||
@@ -710,6 +711,7 @@ def main():
|
|||||||
"""Parse options and call the appropriate class/method."""
|
"""Parse options and call the appropriate class/method."""
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
argv = FLAGS(sys.argv)
|
argv = FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
|
||||||
script_name = argv.pop(0)
|
script_name = argv.pop(0)
|
||||||
if len(argv) < 1:
|
if len(argv) < 1:
|
||||||
|
|||||||
@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
service.serve()
|
service.serve()
|
||||||
service.wait()
|
service.wait()
|
||||||
|
|||||||
@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
service.serve()
|
service.serve()
|
||||||
service.wait()
|
service.wait()
|
||||||
|
|||||||
@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
service.serve()
|
service.serve()
|
||||||
service.wait()
|
service.wait()
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ if [ "$CMD" == "install" ]; then
|
|||||||
sudo apt-get install -y user-mode-linux kvm libvirt-bin
|
sudo apt-get install -y user-mode-linux kvm libvirt-bin
|
||||||
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
|
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
|
||||||
sudo apt-get install -y lvm2 iscsitarget open-iscsi
|
sudo apt-get install -y lvm2 iscsitarget open-iscsi
|
||||||
sudo apt-get install -y socat
|
sudo apt-get install -y socat unzip
|
||||||
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
|
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
|
||||||
sudo /etc/init.d/iscsitarget restart
|
sudo /etc/init.d/iscsitarget restart
|
||||||
sudo modprobe kvm
|
sudo modprobe kvm
|
||||||
@@ -111,8 +111,7 @@ if [ "$CMD" == "run" ]; then
|
|||||||
--nodaemon
|
--nodaemon
|
||||||
--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
|
--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
|
||||||
--network_manager=nova.network.manager.$NET_MAN
|
--network_manager=nova.network.manager.$NET_MAN
|
||||||
--cc_host=$HOST_IP
|
--my_ip=$HOST_IP
|
||||||
--routing_source_ip=$HOST_IP
|
|
||||||
--sql_connection=$SQL_CONN
|
--sql_connection=$SQL_CONN
|
||||||
--auth_driver=nova.auth.$AUTH
|
--auth_driver=nova.auth.$AUTH
|
||||||
--libvirt_type=$LIBVIRT_TYPE
|
--libvirt_type=$LIBVIRT_TYPE
|
||||||
@@ -151,7 +150,6 @@ NOVA_CONF_EOF
|
|||||||
mkdir -p $NOVA_DIR/instances
|
mkdir -p $NOVA_DIR/instances
|
||||||
rm -rf $NOVA_DIR/networks
|
rm -rf $NOVA_DIR/networks
|
||||||
mkdir -p $NOVA_DIR/networks
|
mkdir -p $NOVA_DIR/networks
|
||||||
$NOVA_DIR/tools/clean-vlans
|
|
||||||
if [ ! -d "$NOVA_DIR/images" ]; then
|
if [ ! -d "$NOVA_DIR/images" ]; then
|
||||||
ln -s $DIR/images $NOVA_DIR/images
|
ln -s $DIR/images $NOVA_DIR/images
|
||||||
fi
|
fi
|
||||||
@@ -169,10 +167,14 @@ NOVA_CONF_EOF
|
|||||||
# create a project called 'admin' with project manager of 'admin'
|
# create a project called 'admin' with project manager of 'admin'
|
||||||
$NOVA_DIR/bin/nova-manage project create admin admin
|
$NOVA_DIR/bin/nova-manage project create admin admin
|
||||||
# export environment variables for project 'admin' and user 'admin'
|
# export environment variables for project 'admin' and user 'admin'
|
||||||
$NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc
|
$NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
|
||||||
|
unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
|
||||||
# create a small network
|
# create a small network
|
||||||
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
|
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
|
||||||
|
|
||||||
|
# create some floating ips
|
||||||
|
$NOVA_DIR/bin/nova-manage floating create `hostname` 10.6.0.0/27
|
||||||
|
|
||||||
# nova api crashes if we start it with a regular screen command,
|
# nova api crashes if we start it with a regular screen command,
|
||||||
# so send the start command by forcing text into the window.
|
# so send the start command by forcing text into the window.
|
||||||
screen_it api "$NOVA_DIR/bin/nova-api"
|
screen_it api "$NOVA_DIR/bin/nova-api"
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
ENABLED=true
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
ENABLED=true
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
Welcome to your OpenStack installation!
|
|
||||||
|
|
||||||
-----------------------------------------------
|
|
||||||
@@ -1,170 +0,0 @@
|
|||||||
# Master configuration file for the QEMU driver.
|
|
||||||
# All settings described here are optional - if omitted, sensible
|
|
||||||
# defaults are used.
|
|
||||||
|
|
||||||
# VNC is configured to listen on 127.0.0.1 by default.
|
|
||||||
# To make it listen on all public interfaces, uncomment
|
|
||||||
# this next option.
|
|
||||||
#
|
|
||||||
# NB, strong recommendation to enable TLS + x509 certificate
|
|
||||||
# verification when allowing public access
|
|
||||||
#
|
|
||||||
# vnc_listen = "0.0.0.0"
|
|
||||||
|
|
||||||
|
|
||||||
# Enable use of TLS encryption on the VNC server. This requires
|
|
||||||
# a VNC client which supports the VeNCrypt protocol extension.
|
|
||||||
# Examples include vinagre, virt-viewer, virt-manager and vencrypt
|
|
||||||
# itself. UltraVNC, RealVNC, TightVNC do not support this
|
|
||||||
#
|
|
||||||
# It is necessary to setup CA and issue a server certificate
|
|
||||||
# before enabling this.
|
|
||||||
#
|
|
||||||
# vnc_tls = 1
|
|
||||||
|
|
||||||
|
|
||||||
# Use of TLS requires that x509 certificates be issued. The
|
|
||||||
# default it to keep them in /etc/pki/libvirt-vnc. This directory
|
|
||||||
# must contain
|
|
||||||
#
|
|
||||||
# ca-cert.pem - the CA master certificate
|
|
||||||
# server-cert.pem - the server certificate signed with ca-cert.pem
|
|
||||||
# server-key.pem - the server private key
|
|
||||||
#
|
|
||||||
# This option allows the certificate directory to be changed
|
|
||||||
#
|
|
||||||
# vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
|
|
||||||
|
|
||||||
|
|
||||||
# The default TLS configuration only uses certificates for the server
|
|
||||||
# allowing the client to verify the server's identity and establish
|
|
||||||
# and encrypted channel.
|
|
||||||
#
|
|
||||||
# It is possible to use x509 certificates for authentication too, by
|
|
||||||
# issuing a x509 certificate to every client who needs to connect.
|
|
||||||
#
|
|
||||||
# Enabling this option will reject any client who does not have a
|
|
||||||
# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
|
|
||||||
#
|
|
||||||
# vnc_tls_x509_verify = 1
|
|
||||||
|
|
||||||
|
|
||||||
# The default VNC password. Only 8 letters are significant for
|
|
||||||
# VNC passwords. This parameter is only used if the per-domain
|
|
||||||
# XML config does not already provide a password. To allow
|
|
||||||
# access without passwords, leave this commented out. An empty
|
|
||||||
# string will still enable passwords, but be rejected by QEMU
|
|
||||||
# effectively preventing any use of VNC. Obviously change this
|
|
||||||
# example here before you set this
|
|
||||||
#
|
|
||||||
# vnc_password = "XYZ12345"
|
|
||||||
|
|
||||||
|
|
||||||
# Enable use of SASL encryption on the VNC server. This requires
|
|
||||||
# a VNC client which supports the SASL protocol extension.
|
|
||||||
# Examples include vinagre, virt-viewer and virt-manager
|
|
||||||
# itself. UltraVNC, RealVNC, TightVNC do not support this
|
|
||||||
#
|
|
||||||
# It is necessary to configure /etc/sasl2/qemu.conf to choose
|
|
||||||
# the desired SASL plugin (eg, GSSPI for Kerberos)
|
|
||||||
#
|
|
||||||
# vnc_sasl = 1
|
|
||||||
|
|
||||||
|
|
||||||
# The default SASL configuration file is located in /etc/sasl2/
|
|
||||||
# When running libvirtd unprivileged, it may be desirable to
|
|
||||||
# override the configs in this location. Set this parameter to
|
|
||||||
# point to the directory, and create a qemu.conf in that location
|
|
||||||
#
|
|
||||||
# vnc_sasl_dir = "/some/directory/sasl2"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# The default security driver is SELinux. If SELinux is disabled
|
|
||||||
# on the host, then the security driver will automatically disable
|
|
||||||
# itself. If you wish to disable QEMU SELinux security driver while
|
|
||||||
# leaving SELinux enabled for the host in general, then set this
|
|
||||||
# to 'none' instead
|
|
||||||
#
|
|
||||||
# security_driver = "selinux"
|
|
||||||
|
|
||||||
|
|
||||||
# The user ID for QEMU processes run by the system instance
|
|
||||||
user = "root"
|
|
||||||
|
|
||||||
# The group ID for QEMU processes run by the system instance
|
|
||||||
group = "root"
|
|
||||||
|
|
||||||
# Whether libvirt should dynamically change file ownership
|
|
||||||
# to match the configured user/group above. Defaults to 1.
|
|
||||||
# Set to 0 to disable file ownership changes.
|
|
||||||
#dynamic_ownership = 1
|
|
||||||
|
|
||||||
|
|
||||||
# What cgroup controllers to make use of with QEMU guests
|
|
||||||
#
|
|
||||||
# - 'cpu' - use for schedular tunables
|
|
||||||
# - 'devices' - use for device whitelisting
|
|
||||||
#
|
|
||||||
# NB, even if configured here, they won't be used unless
|
|
||||||
# the adminsitrator has mounted cgroups. eg
|
|
||||||
#
|
|
||||||
# mkdir /dev/cgroup
|
|
||||||
# mount -t cgroup -o devices,cpu none /dev/cgroup
|
|
||||||
#
|
|
||||||
# They can be mounted anywhere, and different controlers
|
|
||||||
# can be mounted in different locations. libvirt will detect
|
|
||||||
# where they are located.
|
|
||||||
#
|
|
||||||
# cgroup_controllers = [ "cpu", "devices" ]
|
|
||||||
|
|
||||||
# This is the basic set of devices allowed / required by
|
|
||||||
# all virtual machines.
|
|
||||||
#
|
|
||||||
# As well as this, any configured block backed disks,
|
|
||||||
# all sound device, and all PTY devices are allowed.
|
|
||||||
#
|
|
||||||
# This will only need setting if newer QEMU suddenly
|
|
||||||
# wants some device we don't already know a bout.
|
|
||||||
#
|
|
||||||
#cgroup_device_acl = [
|
|
||||||
# "/dev/null", "/dev/full", "/dev/zero",
|
|
||||||
# "/dev/random", "/dev/urandom",
|
|
||||||
# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
|
|
||||||
# "/dev/rtc", "/dev/hpet", "/dev/net/tun",
|
|
||||||
#]
|
|
||||||
|
|
||||||
# The default format for Qemu/KVM guest save images is raw; that is, the
|
|
||||||
# memory from the domain is dumped out directly to a file. If you have
|
|
||||||
# guests with a large amount of memory, however, this can take up quite
|
|
||||||
# a bit of space. If you would like to compress the images while they
|
|
||||||
# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
|
|
||||||
# for save_image_format. Note that this means you slow down the process of
|
|
||||||
# saving a domain in order to save disk space; the list above is in descending
|
|
||||||
# order by performance and ascending order by compression ratio.
|
|
||||||
#
|
|
||||||
# save_image_format = "raw"
|
|
||||||
|
|
||||||
# If provided by the host and a hugetlbfs mount point is configured,
|
|
||||||
# a guest may request huge page backing. When this mount point is
|
|
||||||
# unspecified here, determination of a host mount point in /proc/mounts
|
|
||||||
# will be attempted. Specifying an explicit mount overrides detection
|
|
||||||
# of the same in /proc/mounts. Setting the mount point to "" will
|
|
||||||
# disable guest hugepage backing.
|
|
||||||
#
|
|
||||||
# NB, within this mount point, guests will create memory backing files
|
|
||||||
# in a location of $MOUNTPOINT/libvirt/qemu
|
|
||||||
|
|
||||||
# hugetlbfs_mount = "/dev/hugepages"
|
|
||||||
|
|
||||||
# mac_filter enables MAC addressed based filtering on bridge ports.
|
|
||||||
# This currently requires ebtables to be installed.
|
|
||||||
#
|
|
||||||
# mac_filter = 1
|
|
||||||
|
|
||||||
# By default, PCI devices below non-ACS switch are not allowed to be assigned
|
|
||||||
# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
|
|
||||||
# be assigned to guests.
|
|
||||||
#
|
|
||||||
# relaxed_acs_check = 1
|
|
||||||
@@ -1,463 +0,0 @@
|
|||||||
# This is an example configuration file for the LVM2 system.
|
|
||||||
# It contains the default settings that would be used if there was no
|
|
||||||
# /etc/lvm/lvm.conf file.
|
|
||||||
#
|
|
||||||
# Refer to 'man lvm.conf' for further information including the file layout.
|
|
||||||
#
|
|
||||||
# To put this file in a different directory and override /etc/lvm set
|
|
||||||
# the environment variable LVM_SYSTEM_DIR before running the tools.
|
|
||||||
|
|
||||||
|
|
||||||
# This section allows you to configure which block devices should
|
|
||||||
# be used by the LVM system.
|
|
||||||
devices {
|
|
||||||
|
|
||||||
# Where do you want your volume groups to appear ?
|
|
||||||
dir = "/dev"
|
|
||||||
|
|
||||||
# An array of directories that contain the device nodes you wish
|
|
||||||
# to use with LVM2.
|
|
||||||
scan = [ "/dev" ]
|
|
||||||
|
|
||||||
# If several entries in the scanned directories correspond to the
|
|
||||||
# same block device and the tools need to display a name for device,
|
|
||||||
# all the pathnames are matched against each item in the following
|
|
||||||
# list of regular expressions in turn and the first match is used.
|
|
||||||
preferred_names = [ ]
|
|
||||||
|
|
||||||
# Try to avoid using undescriptive /dev/dm-N names, if present.
|
|
||||||
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
|
|
||||||
|
|
||||||
# A filter that tells LVM2 to only use a restricted set of devices.
|
|
||||||
# The filter consists of an array of regular expressions. These
|
|
||||||
# expressions can be delimited by a character of your choice, and
|
|
||||||
# prefixed with either an 'a' (for accept) or 'r' (for reject).
|
|
||||||
# The first expression found to match a device name determines if
|
|
||||||
# the device will be accepted or rejected (ignored). Devices that
|
|
||||||
# don't match any patterns are accepted.
|
|
||||||
|
|
||||||
# Be careful if there there are symbolic links or multiple filesystem
|
|
||||||
# entries for the same device as each name is checked separately against
|
|
||||||
# the list of patterns. The effect is that if any name matches any 'a'
|
|
||||||
# pattern, the device is accepted; otherwise if any name matches any 'r'
|
|
||||||
# pattern it is rejected; otherwise it is accepted.
|
|
||||||
|
|
||||||
# Don't have more than one filter line active at once: only one gets used.
|
|
||||||
|
|
||||||
# Run vgscan after you change this parameter to ensure that
|
|
||||||
# the cache file gets regenerated (see below).
|
|
||||||
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
|
|
||||||
|
|
||||||
|
|
||||||
# By default we accept every block device:
|
|
||||||
filter = [ "r|/dev/etherd/.*|", "r|/dev/block/.*|", "a/.*/" ]
|
|
||||||
|
|
||||||
# Exclude the cdrom drive
|
|
||||||
# filter = [ "r|/dev/cdrom|" ]
|
|
||||||
|
|
||||||
# When testing I like to work with just loopback devices:
|
|
||||||
# filter = [ "a/loop/", "r/.*/" ]
|
|
||||||
|
|
||||||
# Or maybe all loops and ide drives except hdc:
|
|
||||||
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
|
|
||||||
|
|
||||||
# Use anchors if you want to be really specific
|
|
||||||
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
|
|
||||||
|
|
||||||
# The results of the filtering are cached on disk to avoid
|
|
||||||
# rescanning dud devices (which can take a very long time).
|
|
||||||
# By default this cache is stored in the /etc/lvm/cache directory
|
|
||||||
# in a file called '.cache'.
|
|
||||||
# It is safe to delete the contents: the tools regenerate it.
|
|
||||||
# (The old setting 'cache' is still respected if neither of
|
|
||||||
# these new ones is present.)
|
|
||||||
cache_dir = "/etc/lvm/cache"
|
|
||||||
cache_file_prefix = ""
|
|
||||||
|
|
||||||
# You can turn off writing this cache file by setting this to 0.
|
|
||||||
write_cache_state = 1
|
|
||||||
|
|
||||||
# Advanced settings.
|
|
||||||
|
|
||||||
# List of pairs of additional acceptable block device types found
|
|
||||||
# in /proc/devices with maximum (non-zero) number of partitions.
|
|
||||||
# types = [ "fd", 16 ]
|
|
||||||
|
|
||||||
# If sysfs is mounted (2.6 kernels) restrict device scanning to
|
|
||||||
# the block devices it believes are valid.
|
|
||||||
# 1 enables; 0 disables.
|
|
||||||
sysfs_scan = 1
|
|
||||||
|
|
||||||
# By default, LVM2 will ignore devices used as components of
|
|
||||||
# software RAID (md) devices by looking for md superblocks.
|
|
||||||
# 1 enables; 0 disables.
|
|
||||||
md_component_detection = 1
|
|
||||||
|
|
||||||
# By default, if a PV is placed directly upon an md device, LVM2
|
|
||||||
# will align its data blocks with the md device's stripe-width.
|
|
||||||
# 1 enables; 0 disables.
|
|
||||||
md_chunk_alignment = 1
|
|
||||||
|
|
||||||
# By default, the start of a PV's data area will be a multiple of
|
|
||||||
# the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
|
|
||||||
# - minimum_io_size - the smallest request the device can perform
|
|
||||||
# w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
|
|
||||||
# - optimal_io_size - the device's preferred unit of receiving I/O
|
|
||||||
# (e.g. MD's stripe width)
|
|
||||||
# minimum_io_size is used if optimal_io_size is undefined (0).
|
|
||||||
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
|
|
||||||
# This setting takes precedence over md_chunk_alignment.
|
|
||||||
# 1 enables; 0 disables.
|
|
||||||
data_alignment_detection = 1
|
|
||||||
|
|
||||||
# Alignment (in KB) of start of data area when creating a new PV.
|
|
||||||
# If a PV is placed directly upon an md device and md_chunk_alignment or
|
|
||||||
# data_alignment_detection is enabled this parameter is ignored.
|
|
||||||
# Set to 0 for the default alignment of 64KB or page size, if larger.
|
|
||||||
data_alignment = 0
|
|
||||||
|
|
||||||
# By default, the start of the PV's aligned data area will be shifted by
|
|
||||||
# the 'alignment_offset' exposed in sysfs. This offset is often 0 but
|
|
||||||
# may be non-zero; e.g.: certain 4KB sector drives that compensate for
|
|
||||||
# windows partitioning will have an alignment_offset of 3584 bytes
|
|
||||||
# (sector 7 is the lowest aligned logical block, the 4KB sectors start
|
|
||||||
# at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
|
|
||||||
# 1 enables; 0 disables.
|
|
||||||
data_alignment_offset_detection = 1
|
|
||||||
|
|
||||||
# If, while scanning the system for PVs, LVM2 encounters a device-mapper
|
|
||||||
# device that has its I/O suspended, it waits for it to become accessible.
|
|
||||||
# Set this to 1 to skip such devices. This should only be needed
|
|
||||||
# in recovery situations.
|
|
||||||
ignore_suspended_devices = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# This section that allows you to configure the nature of the
|
|
||||||
# information that LVM2 reports.
|
|
||||||
log {
|
|
||||||
|
|
||||||
# Controls the messages sent to stdout or stderr.
|
|
||||||
# There are three levels of verbosity, 3 being the most verbose.
|
|
||||||
verbose = 0
|
|
||||||
|
|
||||||
# Should we send log messages through syslog?
|
|
||||||
# 1 is yes; 0 is no.
|
|
||||||
syslog = 1
|
|
||||||
|
|
||||||
# Should we log error and debug messages to a file?
|
|
||||||
# By default there is no log file.
|
|
||||||
#file = "/var/log/lvm2.log"
|
|
||||||
|
|
||||||
# Should we overwrite the log file each time the program is run?
|
|
||||||
# By default we append.
|
|
||||||
overwrite = 0
|
|
||||||
|
|
||||||
# What level of log messages should we send to the log file and/or syslog?
|
|
||||||
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
|
|
||||||
# 7 is the most verbose (LOG_DEBUG).
|
|
||||||
level = 0
|
|
||||||
|
|
||||||
# Format of output messages
|
|
||||||
# Whether or not (1 or 0) to indent messages according to their severity
|
|
||||||
indent = 1
|
|
||||||
|
|
||||||
# Whether or not (1 or 0) to display the command name on each line output
|
|
||||||
command_names = 0
|
|
||||||
|
|
||||||
# A prefix to use before the message text (but after the command name,
|
|
||||||
# if selected). Default is two spaces, so you can see/grep the severity
|
|
||||||
# of each message.
|
|
||||||
prefix = " "
|
|
||||||
|
|
||||||
# To make the messages look similar to the original LVM tools use:
|
|
||||||
# indent = 0
|
|
||||||
# command_names = 1
|
|
||||||
# prefix = " -- "
|
|
||||||
|
|
||||||
# Set this if you want log messages during activation.
|
|
||||||
# Don't use this in low memory situations (can deadlock).
|
|
||||||
# activation = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Configuration of metadata backups and archiving. In LVM2 when we
|
|
||||||
# talk about a 'backup' we mean making a copy of the metadata for the
|
|
||||||
# *current* system. The 'archive' contains old metadata configurations.
|
|
||||||
# Backups are stored in a human readeable text format.
|
|
||||||
backup {
|
|
||||||
|
|
||||||
# Should we maintain a backup of the current metadata configuration ?
|
|
||||||
# Use 1 for Yes; 0 for No.
|
|
||||||
# Think very hard before turning this off!
|
|
||||||
backup = 1
|
|
||||||
|
|
||||||
# Where shall we keep it ?
|
|
||||||
# Remember to back up this directory regularly!
|
|
||||||
backup_dir = "/etc/lvm/backup"
|
|
||||||
|
|
||||||
# Should we maintain an archive of old metadata configurations.
|
|
||||||
# Use 1 for Yes; 0 for No.
|
|
||||||
# On by default. Think very hard before turning this off.
|
|
||||||
archive = 1
|
|
||||||
|
|
||||||
# Where should archived files go ?
|
|
||||||
# Remember to back up this directory regularly!
|
|
||||||
archive_dir = "/etc/lvm/archive"
|
|
||||||
|
|
||||||
# What is the minimum number of archive files you wish to keep ?
|
|
||||||
retain_min = 10
|
|
||||||
|
|
||||||
# What is the minimum time you wish to keep an archive file for ?
|
|
||||||
retain_days = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
# Settings for the running LVM2 in shell (readline) mode.
|
|
||||||
shell {
|
|
||||||
|
|
||||||
# Number of lines of history to store in ~/.lvm_history
|
|
||||||
history_size = 100
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# Miscellaneous global LVM2 settings
|
|
||||||
global {
|
|
||||||
|
|
||||||
# The file creation mask for any files and directories created.
|
|
||||||
# Interpreted as octal if the first digit is zero.
|
|
||||||
umask = 077
|
|
||||||
|
|
||||||
# Allow other users to read the files
|
|
||||||
#umask = 022
|
|
||||||
|
|
||||||
# Enabling test mode means that no changes to the on disk metadata
|
|
||||||
# will be made. Equivalent to having the -t option on every
|
|
||||||
# command. Defaults to off.
|
|
||||||
test = 0
|
|
||||||
|
|
||||||
# Default value for --units argument
|
|
||||||
units = "h"
|
|
||||||
|
|
||||||
# Since version 2.02.54, the tools distinguish between powers of
|
|
||||||
# 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
|
|
||||||
# KB, MB, GB).
|
|
||||||
# If you have scripts that depend on the old behaviour, set this to 0
|
|
||||||
# temporarily until you update them.
|
|
||||||
si_unit_consistency = 1
|
|
||||||
|
|
||||||
# Whether or not to communicate with the kernel device-mapper.
|
|
||||||
# Set to 0 if you want to use the tools to manipulate LVM metadata
|
|
||||||
# without activating any logical volumes.
|
|
||||||
# If the device-mapper kernel driver is not present in your kernel
|
|
||||||
# setting this to 0 should suppress the error messages.
|
|
||||||
activation = 1
|
|
||||||
|
|
||||||
# If we can't communicate with device-mapper, should we try running
|
|
||||||
# the LVM1 tools?
|
|
||||||
# This option only applies to 2.4 kernels and is provided to help you
|
|
||||||
# switch between device-mapper kernels and LVM1 kernels.
|
|
||||||
# The LVM1 tools need to be installed with .lvm1 suffices
|
|
||||||
# e.g. vgscan.lvm1 and they will stop working after you start using
|
|
||||||
# the new lvm2 on-disk metadata format.
|
|
||||||
# The default value is set when the tools are built.
|
|
||||||
# fallback_to_lvm1 = 0
|
|
||||||
|
|
||||||
# The default metadata format that commands should use - "lvm1" or "lvm2".
|
|
||||||
# The command line override is -M1 or -M2.
|
|
||||||
# Defaults to "lvm2".
|
|
||||||
# format = "lvm2"
|
|
||||||
|
|
||||||
# Location of proc filesystem
|
|
||||||
proc = "/proc"
|
|
||||||
|
|
||||||
# Type of locking to use. Defaults to local file-based locking (1).
|
|
||||||
# Turn locking off by setting to 0 (dangerous: risks metadata corruption
|
|
||||||
# if LVM2 commands get run concurrently).
|
|
||||||
# Type 2 uses the external shared library locking_library.
|
|
||||||
# Type 3 uses built-in clustered locking.
|
|
||||||
# Type 4 uses read-only locking which forbids any operations that might
|
|
||||||
# change metadata.
|
|
||||||
locking_type = 1
|
|
||||||
|
|
||||||
# Set to 0 to fail when a lock request cannot be satisfied immediately.
|
|
||||||
wait_for_locks = 1
|
|
||||||
|
|
||||||
# If using external locking (type 2) and initialisation fails,
|
|
||||||
# with this set to 1 an attempt will be made to use the built-in
|
|
||||||
# clustered locking.
|
|
||||||
# If you are using a customised locking_library you should set this to 0.
|
|
||||||
fallback_to_clustered_locking = 1
|
|
||||||
|
|
||||||
# If an attempt to initialise type 2 or type 3 locking failed, perhaps
|
|
||||||
# because cluster components such as clvmd are not running, with this set
|
|
||||||
# to 1 an attempt will be made to use local file-based locking (type 1).
|
|
||||||
# If this succeeds, only commands against local volume groups will proceed.
|
|
||||||
# Volume Groups marked as clustered will be ignored.
|
|
||||||
fallback_to_local_locking = 1
|
|
||||||
|
|
||||||
# Local non-LV directory that holds file-based locks while commands are
|
|
||||||
# in progress. A directory like /tmp that may get wiped on reboot is OK.
|
|
||||||
locking_dir = "/var/lock/lvm"
|
|
||||||
|
|
||||||
# Whenever there are competing read-only and read-write access requests for
|
|
||||||
# a volume group's metadata, instead of always granting the read-only
|
|
||||||
# requests immediately, delay them to allow the read-write requests to be
|
|
||||||
# serviced. Without this setting, write access may be stalled by a high
|
|
||||||
# volume of read-only requests.
|
|
||||||
# NB. This option only affects locking_type = 1 viz. local file-based
|
|
||||||
# locking.
|
|
||||||
prioritise_write_locks = 1
|
|
||||||
|
|
||||||
# Other entries can go here to allow you to load shared libraries
|
|
||||||
# e.g. if support for LVM1 metadata was compiled as a shared library use
|
|
||||||
# format_libraries = "liblvm2format1.so"
|
|
||||||
# Full pathnames can be given.
|
|
||||||
|
|
||||||
# Search this directory first for shared libraries.
|
|
||||||
# library_dir = "/lib/lvm2"
|
|
||||||
|
|
||||||
# The external locking library to load if locking_type is set to 2.
|
|
||||||
# locking_library = "liblvm2clusterlock.so"
|
|
||||||
}
|
|
||||||
|
|
||||||
activation {
|
|
||||||
# Set to 0 to disable udev syncronisation (if compiled into the binaries).
|
|
||||||
# Processes will not wait for notification from udev.
|
|
||||||
# They will continue irrespective of any possible udev processing
|
|
||||||
# in the background. You should only use this if udev is not running
|
|
||||||
# or has rules that ignore the devices LVM2 creates.
|
|
||||||
# The command line argument --nodevsync takes precedence over this setting.
|
|
||||||
# If set to 1 when udev is not running, and there are LVM2 processes
|
|
||||||
# waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
|
|
||||||
udev_sync = 1
|
|
||||||
|
|
||||||
# How to fill in missing stripes if activating an incomplete volume.
|
|
||||||
# Using "error" will make inaccessible parts of the device return
|
|
||||||
# I/O errors on access. You can instead use a device path, in which
|
|
||||||
# case, that device will be used to in place of missing stripes.
|
|
||||||
# But note that using anything other than "error" with mirrored
|
|
||||||
# or snapshotted volumes is likely to result in data corruption.
|
|
||||||
missing_stripe_filler = "error"
|
|
||||||
|
|
||||||
# How much stack (in KB) to reserve for use while devices suspended
|
|
||||||
reserved_stack = 256
|
|
||||||
|
|
||||||
# How much memory (in KB) to reserve for use while devices suspended
|
|
||||||
reserved_memory = 8192
|
|
||||||
|
|
||||||
# Nice value used while devices suspended
|
|
||||||
process_priority = -18
|
|
||||||
|
|
||||||
# If volume_list is defined, each LV is only activated if there is a
|
|
||||||
# match against the list.
|
|
||||||
# "vgname" and "vgname/lvname" are matched exactly.
|
|
||||||
# "@tag" matches any tag set in the LV or VG.
|
|
||||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
|
||||||
#
|
|
||||||
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
|
||||||
|
|
||||||
# Size (in KB) of each copy operation when mirroring
|
|
||||||
mirror_region_size = 512
|
|
||||||
|
|
||||||
# Setting to use when there is no readahead value stored in the metadata.
|
|
||||||
#
|
|
||||||
# "none" - Disable readahead.
|
|
||||||
# "auto" - Use default value chosen by kernel.
|
|
||||||
readahead = "auto"
|
|
||||||
|
|
||||||
# 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
|
|
||||||
# how a device failure affecting a mirror is handled.
|
|
||||||
# A mirror is composed of mirror images (copies) and a log.
|
|
||||||
# A disk log ensures that a mirror does not need to be re-synced
|
|
||||||
# (all copies made the same) every time a machine reboots or crashes.
|
|
||||||
#
|
|
||||||
# In the event of a failure, the specified policy will be used to determine
|
|
||||||
# what happens. This applies to automatic repairs (when the mirror is being
|
|
||||||
# monitored by dmeventd) and to manual lvconvert --repair when
|
|
||||||
# --use-policies is given.
|
|
||||||
#
|
|
||||||
# "remove" - Simply remove the faulty device and run without it. If
|
|
||||||
# the log device fails, the mirror would convert to using
|
|
||||||
# an in-memory log. This means the mirror will not
|
|
||||||
# remember its sync status across crashes/reboots and
|
|
||||||
# the entire mirror will be re-synced. If a
|
|
||||||
# mirror image fails, the mirror will convert to a
|
|
||||||
# non-mirrored device if there is only one remaining good
|
|
||||||
# copy.
|
|
||||||
#
|
|
||||||
# "allocate" - Remove the faulty device and try to allocate space on
|
|
||||||
# a new device to be a replacement for the failed device.
|
|
||||||
# Using this policy for the log is fast and maintains the
|
|
||||||
# ability to remember sync state through crashes/reboots.
|
|
||||||
# Using this policy for a mirror device is slow, as it
|
|
||||||
# requires the mirror to resynchronize the devices, but it
|
|
||||||
# will preserve the mirror characteristic of the device.
|
|
||||||
# This policy acts like "remove" if no suitable device and
|
|
||||||
# space can be allocated for the replacement.
|
|
||||||
#
|
|
||||||
# "allocate_anywhere" - Not yet implemented. Useful to place the log device
|
|
||||||
# temporarily on same physical volume as one of the mirror
|
|
||||||
# images. This policy is not recommended for mirror devices
|
|
||||||
# since it would break the redundant nature of the mirror. This
|
|
||||||
# policy acts like "remove" if no suitable device and space can
|
|
||||||
# be allocated for the replacement.
|
|
||||||
|
|
||||||
mirror_log_fault_policy = "allocate"
|
|
||||||
mirror_device_fault_policy = "remove"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Advanced section #
|
|
||||||
####################
|
|
||||||
|
|
||||||
# Metadata settings
|
|
||||||
#
|
|
||||||
# metadata {
|
|
||||||
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
|
|
||||||
# You might want to override it from the command line with 0
|
|
||||||
# when running pvcreate on new PVs which are to be added to large VGs.
|
|
||||||
|
|
||||||
# pvmetadatacopies = 1
|
|
||||||
|
|
||||||
# Approximate default size of on-disk metadata areas in sectors.
|
|
||||||
# You should increase this if you have large volume groups or
|
|
||||||
# you want to retain a large on-disk history of your metadata changes.
|
|
||||||
|
|
||||||
# pvmetadatasize = 255
|
|
||||||
|
|
||||||
# List of directories holding live copies of text format metadata.
|
|
||||||
# These directories must not be on logical volumes!
|
|
||||||
# It's possible to use LVM2 with a couple of directories here,
|
|
||||||
# preferably on different (non-LV) filesystems, and with no other
|
|
||||||
# on-disk metadata (pvmetadatacopies = 0). Or this can be in
|
|
||||||
# addition to on-disk metadata areas.
|
|
||||||
# The feature was originally added to simplify testing and is not
|
|
||||||
# supported under low memory situations - the machine could lock up.
|
|
||||||
#
|
|
||||||
# Never edit any files in these directories by hand unless you
|
|
||||||
# you are absolutely sure you know what you are doing! Use
|
|
||||||
# the supplied toolset to make changes (e.g. vgcfgrestore).
|
|
||||||
|
|
||||||
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
|
|
||||||
#}
|
|
||||||
|
|
||||||
# Event daemon
|
|
||||||
#
|
|
||||||
dmeventd {
|
|
||||||
# mirror_library is the library used when monitoring a mirror device.
|
|
||||||
#
|
|
||||||
# "libdevmapper-event-lvm2mirror.so" attempts to recover from
|
|
||||||
# failures. It removes failed devices from a volume group and
|
|
||||||
# reconfigures a mirror as necessary. If no mirror library is
|
|
||||||
# provided, mirrors are not monitored through dmeventd.
|
|
||||||
|
|
||||||
mirror_library = "libdevmapper-event-lvm2mirror.so"
|
|
||||||
|
|
||||||
# snapshot_library is the library used when monitoring a snapshot device.
|
|
||||||
#
|
|
||||||
# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
|
|
||||||
# snapshots and emits a warning through syslog, when the use of
|
|
||||||
# snapshot exceedes 80%. The warning is repeated when 85%, 90% and
|
|
||||||
# 95% of the snapshot are filled.
|
|
||||||
|
|
||||||
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
|
|
||||||
}
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
--ec2_url=http://192.168.255.1:8773/services/Cloud
|
|
||||||
--rabbit_host=192.168.255.1
|
|
||||||
--redis_host=192.168.255.1
|
|
||||||
--s3_host=192.168.255.1
|
|
||||||
--vpn_ip=192.168.255.1
|
|
||||||
--datastore_path=/var/lib/nova/keeper
|
|
||||||
--networks_path=/var/lib/nova/networks
|
|
||||||
--instances_path=/var/lib/nova/instances
|
|
||||||
--buckets_path=/var/lib/nova/objectstore/buckets
|
|
||||||
--images_path=/var/lib/nova/objectstore/images
|
|
||||||
--ca_path=/var/lib/nova/CA
|
|
||||||
--keys_path=/var/lib/nova/keys
|
|
||||||
--vlan_start=2000
|
|
||||||
--vlan_end=3000
|
|
||||||
--private_range=192.168.0.0/16
|
|
||||||
--public_range=10.0.0.0/24
|
|
||||||
--volume_group=vgdata
|
|
||||||
--storage_dev=/dev/sdc
|
|
||||||
--bridge_dev=eth2
|
|
||||||
--aoe_eth_dev=eth2
|
|
||||||
--public_interface=vlan0
|
|
||||||
--default_kernel=aki-DEFAULT
|
|
||||||
--default_ramdisk=ari-DEFAULT
|
|
||||||
--vpn_image_id=ami-cloudpipe
|
|
||||||
--daemonize
|
|
||||||
--verbose
|
|
||||||
--syslog
|
|
||||||
--prefix=nova
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
[Boto]
|
|
||||||
debug = 0
|
|
||||||
num_retries = 1
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
<domain type='%(type)s'>
|
|
||||||
<name>%(name)s</name>
|
|
||||||
<os>
|
|
||||||
<type>hvm</type>
|
|
||||||
<kernel>%(basepath)s/kernel</kernel>
|
|
||||||
<initrd>%(basepath)s/ramdisk</initrd>
|
|
||||||
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
|
|
||||||
</os>
|
|
||||||
<features>
|
|
||||||
<acpi/>
|
|
||||||
</features>
|
|
||||||
<memory>%(memory_kb)s</memory>
|
|
||||||
<vcpu>%(vcpus)s</vcpu>
|
|
||||||
<devices>
|
|
||||||
<disk type='file'>
|
|
||||||
<source file='%(basepath)s/disk'/>
|
|
||||||
<target dev='vda' bus='virtio'/>
|
|
||||||
</disk>
|
|
||||||
<interface type='bridge'>
|
|
||||||
<source bridge='%(bridge_name)s'/>
|
|
||||||
<mac address='%(mac_address)s'/>
|
|
||||||
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
|
|
||||||
<!--
|
|
||||||
<filterref filter="nova-instance-%(name)s">
|
|
||||||
<parameter name="IP" value="%(ip_address)s" />
|
|
||||||
<parameter name="DHCPSERVER" value="%(dhcp_server)s" />
|
|
||||||
</filterref>
|
|
||||||
-->
|
|
||||||
</interface>
|
|
||||||
<serial type="file">
|
|
||||||
<source path='%(basepath)s/console.log'/>
|
|
||||||
<target port='1'/>
|
|
||||||
</serial>
|
|
||||||
</devices>
|
|
||||||
</domain>
|
|
||||||
@@ -1,137 +0,0 @@
|
|||||||
#
|
|
||||||
# The MySQL database server configuration file.
|
|
||||||
#
|
|
||||||
# You can copy this to one of:
|
|
||||||
# - "/etc/mysql/my.cnf" to set global options,
|
|
||||||
# - "~/.my.cnf" to set user-specific options.
|
|
||||||
#
|
|
||||||
# One can use all long options that the program supports.
|
|
||||||
# Run program with --help to get a list of available options and with
|
|
||||||
# --print-defaults to see which it would actually understand and use.
|
|
||||||
#
|
|
||||||
# For explanations see
|
|
||||||
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
|
|
||||||
|
|
||||||
# This will be passed to all mysql clients
|
|
||||||
# It has been reported that passwords should be enclosed with ticks/quotes
|
|
||||||
# escpecially if they contain "#" chars...
|
|
||||||
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
|
|
||||||
[client]
|
|
||||||
port = 3306
|
|
||||||
socket = /var/run/mysqld/mysqld.sock
|
|
||||||
|
|
||||||
# Here is entries for some specific programs
|
|
||||||
# The following values assume you have at least 32M ram
|
|
||||||
|
|
||||||
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
|
|
||||||
[mysqld_safe]
|
|
||||||
socket = /var/run/mysqld/mysqld.sock
|
|
||||||
nice = 0
|
|
||||||
|
|
||||||
[mysqld]
|
|
||||||
#
|
|
||||||
# * Basic Settings
|
|
||||||
#
|
|
||||||
|
|
||||||
#
|
|
||||||
# * IMPORTANT
|
|
||||||
# If you make changes to these settings and your system uses apparmor, you may
|
|
||||||
# also need to also adjust /etc/apparmor.d/usr.sbin.mysqld.
|
|
||||||
#
|
|
||||||
|
|
||||||
user = mysql
|
|
||||||
socket = /var/run/mysqld/mysqld.sock
|
|
||||||
port = 3306
|
|
||||||
basedir = /usr
|
|
||||||
datadir = /var/lib/mysql
|
|
||||||
tmpdir = /tmp
|
|
||||||
skip-external-locking
|
|
||||||
#
|
|
||||||
# Instead of skip-networking the default is now to listen only on
|
|
||||||
# localhost which is more compatible and is not less secure.
|
|
||||||
# bind-address = 127.0.0.1
|
|
||||||
#
|
|
||||||
# * Fine Tuning
|
|
||||||
#
|
|
||||||
innodb_buffer_pool_size = 12G
|
|
||||||
#innodb_log_file_size = 256M
|
|
||||||
innodb_log_buffer_size=4M
|
|
||||||
innodb_flush_log_at_trx_commit=2
|
|
||||||
innodb_thread_concurrency=8
|
|
||||||
innodb_flush_method=O_DIRECT
|
|
||||||
key_buffer = 128M
|
|
||||||
max_allowed_packet = 256M
|
|
||||||
thread_stack = 8196K
|
|
||||||
thread_cache_size = 32
|
|
||||||
# This replaces the startup script and checks MyISAM tables if needed
|
|
||||||
# the first time they are touched
|
|
||||||
myisam-recover = BACKUP
|
|
||||||
max_connections = 1000
|
|
||||||
table_cache = 1024
|
|
||||||
#thread_concurrency = 10
|
|
||||||
#
|
|
||||||
# * Query Cache Configuration
|
|
||||||
#
|
|
||||||
query_cache_limit = 32M
|
|
||||||
query_cache_size = 256M
|
|
||||||
#
|
|
||||||
# * Logging and Replication
|
|
||||||
#
|
|
||||||
# Both location gets rotated by the cronjob.
|
|
||||||
# Be aware that this log type is a performance killer.
|
|
||||||
# As of 5.1 you can enable the log at runtime!
|
|
||||||
#general_log_file = /var/log/mysql/mysql.log
|
|
||||||
#general_log = 1
|
|
||||||
|
|
||||||
log_error = /var/log/mysql/error.log
|
|
||||||
|
|
||||||
# Here you can see queries with especially long duration
|
|
||||||
log_slow_queries = /var/log/mysql/mysql-slow.log
|
|
||||||
long_query_time = 2
|
|
||||||
#log-queries-not-using-indexes
|
|
||||||
#
|
|
||||||
# The following can be used as easy to replay backup logs or for replication.
|
|
||||||
# note: if you are setting up a replication slave, see README.Debian about
|
|
||||||
# other settings you may need to change.
|
|
||||||
server-id = 1
|
|
||||||
log_bin = /var/log/mysql/mysql-bin.log
|
|
||||||
expire_logs_days = 10
|
|
||||||
max_binlog_size = 50M
|
|
||||||
#binlog_do_db = include_database_name
|
|
||||||
#binlog_ignore_db = include_database_name
|
|
||||||
#
|
|
||||||
# * InnoDB
|
|
||||||
#
|
|
||||||
sync_binlog=1
|
|
||||||
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
|
|
||||||
# Read the manual for more InnoDB related options. There are many!
|
|
||||||
#
|
|
||||||
# * Security Features
|
|
||||||
#
|
|
||||||
# Read the manual, too, if you want chroot!
|
|
||||||
# chroot = /var/lib/mysql/
|
|
||||||
#
|
|
||||||
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
|
|
||||||
#
|
|
||||||
# ssl-ca=/etc/mysql/cacert.pem
|
|
||||||
# ssl-cert=/etc/mysql/server-cert.pem
|
|
||||||
# ssl-key=/etc/mysql/server-key.pem
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[mysqldump]
|
|
||||||
quick
|
|
||||||
quote-names
|
|
||||||
max_allowed_packet = 256M
|
|
||||||
|
|
||||||
[mysql]
|
|
||||||
#no-auto-rehash # faster start of mysql but no tab completition
|
|
||||||
|
|
||||||
[isamchk]
|
|
||||||
key_buffer = 128M
|
|
||||||
|
|
||||||
#
|
|
||||||
# * IMPORTANT: Additional settings that can override those from this file!
|
|
||||||
# The files must end with '.cnf', otherwise they'll be ignored.
|
|
||||||
#
|
|
||||||
!includedir /etc/mysql/conf.d/
|
|
||||||
@@ -1,187 +0,0 @@
|
|||||||
#! /bin/sh
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# NOTE(vish): This script sets up some reasonable defaults for iptables and
|
|
||||||
# creates nova-specific chains. If you use this script you should
|
|
||||||
# run nova-network and nova-compute with --use_nova_chains=True
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(vish): If you run public nova-api on a different port, make sure to
|
|
||||||
# change the port here
|
|
||||||
|
|
||||||
if [ -f /etc/default/nova-iptables ] ; then
|
|
||||||
. /etc/default/nova-iptables
|
|
||||||
fi
|
|
||||||
|
|
||||||
export LC_ALL=C
|
|
||||||
|
|
||||||
API_PORT=${API_PORT:-"8773"}
|
|
||||||
|
|
||||||
if [ ! -n "$IP" ]; then
|
|
||||||
# NOTE(vish): IP address is what address the services ALLOW on.
|
|
||||||
# This will just get the first ip in the list, so if you
|
|
||||||
# have more than one eth device set up, this will fail, and
|
|
||||||
# you should explicitly pass in the ip of the instance
|
|
||||||
IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -n "$PRIVATE_RANGE" ]; then
|
|
||||||
#NOTE(vish): PRIVATE_RANGE: range is ALLOW to access DHCP
|
|
||||||
PRIVATE_RANGE="192.168.0.0/12"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -n "$MGMT_IP" ]; then
|
|
||||||
# NOTE(vish): Management IP is the ip over which to allow ssh traffic. It
|
|
||||||
# will also allow traffic to nova-api
|
|
||||||
MGMT_IP="$IP"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -n "$DMZ_IP" ]; then
|
|
||||||
# NOTE(vish): DMZ IP is the ip over which to allow api & objectstore access
|
|
||||||
DMZ_IP="$IP"
|
|
||||||
fi
|
|
||||||
|
|
||||||
clear_nova_iptables() {
|
|
||||||
iptables -P INPUT ACCEPT
|
|
||||||
iptables -P FORWARD ACCEPT
|
|
||||||
iptables -P OUTPUT ACCEPT
|
|
||||||
iptables -F
|
|
||||||
iptables -t nat -F
|
|
||||||
iptables -F services
|
|
||||||
iptables -X services
|
|
||||||
# HACK: re-adding fail2ban rules :(
|
|
||||||
iptables -N fail2ban-ssh
|
|
||||||
iptables -A INPUT -p tcp -m multiport --dports 22 -j fail2ban-ssh
|
|
||||||
iptables -A fail2ban-ssh -j RETURN
|
|
||||||
}
|
|
||||||
|
|
||||||
load_nova_iptables() {
|
|
||||||
|
|
||||||
iptables -P INPUT DROP
|
|
||||||
iptables -A INPUT -m state --state INVALID -j DROP
|
|
||||||
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
|
|
||||||
# NOTE(ja): allow localhost for everything
|
|
||||||
iptables -A INPUT -d 127.0.0.1/32 -j ACCEPT
|
|
||||||
# NOTE(ja): 22 only allowed MGMT_IP before, but we widened it to any
|
|
||||||
# address, since ssh should be listening only on internal
|
|
||||||
# before we re-add this rule we will need to add
|
|
||||||
# flexibility for RSYNC between omega/stingray
|
|
||||||
iptables -A INPUT -m tcp -p tcp --dport 22 -j ACCEPT
|
|
||||||
iptables -A INPUT -m udp -p udp --dport 123 -j ACCEPT
|
|
||||||
iptables -A INPUT -p icmp -j ACCEPT
|
|
||||||
iptables -N services
|
|
||||||
iptables -A INPUT -j services
|
|
||||||
iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset
|
|
||||||
iptables -A INPUT -j REJECT --reject-with icmp-port-unreachable
|
|
||||||
|
|
||||||
iptables -P FORWARD DROP
|
|
||||||
iptables -A FORWARD -m state --state INVALID -j DROP
|
|
||||||
iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
|
||||||
iptables -A FORWARD -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
|
|
||||||
|
|
||||||
# NOTE(vish): DROP on output is too restrictive for now. We need to add
|
|
||||||
# in a bunch of more specific output rules to use it.
|
|
||||||
# iptables -P OUTPUT DROP
|
|
||||||
iptables -A OUTPUT -m state --state INVALID -j DROP
|
|
||||||
iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
|
|
||||||
|
|
||||||
if [ -n "$GANGLIA" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 8649 -j ACCEPT
|
|
||||||
iptables -A services -m udp -p udp -d $IP --dport 8649 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if [ -n "$WEB" ] || [ -n "$ALL" ]; then
|
|
||||||
# # NOTE(vish): This opens up ports for web access, allowing web-based
|
|
||||||
# # dashboards to work.
|
|
||||||
# iptables -A services -m tcp -p tcp -d $IP --dport 80 -j ACCEPT
|
|
||||||
# iptables -A services -m tcp -p tcp -d $IP --dport 443 -j ACCEPT
|
|
||||||
# fi
|
|
||||||
|
|
||||||
if [ -n "$OBJECTSTORE" ] || [ -n "$ALL" ]; then
|
|
||||||
# infrastructure
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 3333 -j ACCEPT
|
|
||||||
# clients
|
|
||||||
iptables -A services -m tcp -p tcp -d $DMZ_IP --dport 3333 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$API" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport $API_PORT -j ACCEPT
|
|
||||||
if [ "$IP" != "$DMZ_IP" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $DMZ_IP --dport $API_PORT -j ACCEPT
|
|
||||||
fi
|
|
||||||
if [ "$IP" != "$MGMT_IP" ] && [ "$DMZ_IP" != "$MGMT_IP" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $MGMT_IP --dport $API_PORT -j ACCEPT
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$REDIS" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 6379 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$MYSQL" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 3306 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$RABBITMQ" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 4369 -j ACCEPT
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 5672 -j ACCEPT
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 53284 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$DNSMASQ" ] || [ -n "$ALL" ]; then
|
|
||||||
# NOTE(vish): this could theoretically be setup per network
|
|
||||||
# for each host, but it seems like overkill
|
|
||||||
iptables -A services -m tcp -p tcp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
|
|
||||||
iptables -A services -m udp -p udp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
|
|
||||||
iptables -A services -m udp -p udp --dport 67 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$LDAP" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 389 -j ACCEPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$ISCSI" ] || [ -n "$ALL" ]; then
|
|
||||||
iptables -A services -m tcp -p tcp -d $IP --dport 3260 -j ACCEPT
|
|
||||||
iptables -A services -m tcp -p tcp -d 127.0.0.0/16 --dport 3260 -j ACCEPT
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
echo "Starting nova-iptables: "
|
|
||||||
load_nova_iptables
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
echo "Clearing nova-iptables: "
|
|
||||||
clear_nova_iptables
|
|
||||||
;;
|
|
||||||
restart)
|
|
||||||
echo "Restarting nova-iptables: "
|
|
||||||
clear_nova_iptables
|
|
||||||
load_nova_iptables
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $NAME {start|stop|restart}" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# FILE: /etc/udev/scripts/iscsidev.sh
|
|
||||||
|
|
||||||
BUS=${1}
|
|
||||||
HOST=${BUS%%:*}
|
|
||||||
|
|
||||||
[ -e /sys/class/iscsi_host ] || exit 1
|
|
||||||
|
|
||||||
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
|
|
||||||
|
|
||||||
target_name=$(cat ${file})
|
|
||||||
|
|
||||||
# This is not an open-scsi drive
|
|
||||||
if [ -z "${target_name}" ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${target_name##*:}"
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
/root/slap.sh
|
|
||||||
mysql -e "DROP DATABASE nova"
|
|
||||||
mysql -e "CREATE DATABASE nova"
|
|
||||||
mysql -e "GRANT ALL on nova.* to nova@'%' identified by 'TODO:CHANGEME:CMON'"
|
|
||||||
touch /root/installed
|
|
||||||
@@ -1,261 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS
|
|
||||||
|
|
||||||
apt-get install -y slapd ldap-utils python-ldap
|
|
||||||
|
|
||||||
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
|
|
||||||
#
|
|
||||||
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
|
||||||
# Author: Eric AUGE <eau@phear.org>
|
|
||||||
#
|
|
||||||
# Based on the proposal of : Mark Ruijter
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
# octetString SYNTAX
|
|
||||||
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
|
|
||||||
DESC 'MANDATORY: OpenSSH Public key'
|
|
||||||
EQUALITY octetStringMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
|
||||||
|
|
||||||
# printableString SYNTAX yes|no
|
|
||||||
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
|
|
||||||
DESC 'MANDATORY: OpenSSH LPK objectclass'
|
|
||||||
MAY ( sshPublicKey $ uid )
|
|
||||||
)
|
|
||||||
LPK_SCHEMA_EOF
|
|
||||||
|
|
||||||
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
|
|
||||||
#
|
|
||||||
# Person object for Nova
|
|
||||||
# inetorgperson with extra attributes
|
|
||||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# using internet experimental oid arc as per BP64 3.1
|
|
||||||
objectidentifier novaSchema 1.3.6.1.3.1.666.666
|
|
||||||
objectidentifier novaAttrs novaSchema:3
|
|
||||||
objectidentifier novaOCs novaSchema:4
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:1
|
|
||||||
NAME 'accessKey'
|
|
||||||
DESC 'Key for accessing data'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:2
|
|
||||||
NAME 'secretKey'
|
|
||||||
DESC 'Secret key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:3
|
|
||||||
NAME 'keyFingerprint'
|
|
||||||
DESC 'Fingerprint of private key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:4
|
|
||||||
NAME 'isAdmin'
|
|
||||||
DESC 'Is user an administrator?'
|
|
||||||
EQUALITY booleanMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:5
|
|
||||||
NAME 'projectManager'
|
|
||||||
DESC 'Project Managers of a project'
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:1
|
|
||||||
NAME 'novaUser'
|
|
||||||
DESC 'access and secret keys'
|
|
||||||
AUXILIARY
|
|
||||||
MUST ( uid )
|
|
||||||
MAY ( accessKey $ secretKey $ isAdmin )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:2
|
|
||||||
NAME 'novaKeyPair'
|
|
||||||
DESC 'Key pair for User'
|
|
||||||
SUP top
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:3
|
|
||||||
NAME 'novaProject'
|
|
||||||
DESC 'Container for project'
|
|
||||||
SUP groupOfNames
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ projectManager )
|
|
||||||
)
|
|
||||||
|
|
||||||
NOVA_SCHEMA_EOF
|
|
||||||
|
|
||||||
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
|
||||||
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
|
||||||
# slapd.conf - Configuration file for LDAP SLAPD
|
|
||||||
##########
|
|
||||||
# Basics #
|
|
||||||
##########
|
|
||||||
include /etc/ldap/schema/core.schema
|
|
||||||
include /etc/ldap/schema/cosine.schema
|
|
||||||
include /etc/ldap/schema/inetorgperson.schema
|
|
||||||
include /etc/ldap/schema/openssh-lpk_openldap.schema
|
|
||||||
include /etc/ldap/schema/nova.schema
|
|
||||||
pidfile /var/run/slapd/slapd.pid
|
|
||||||
argsfile /var/run/slapd/slapd.args
|
|
||||||
loglevel none
|
|
||||||
modulepath /usr/lib/ldap
|
|
||||||
# modulepath /usr/local/libexec/openldap
|
|
||||||
moduleload back_hdb
|
|
||||||
##########################
|
|
||||||
# Database Configuration #
|
|
||||||
##########################
|
|
||||||
database hdb
|
|
||||||
suffix "dc=example,dc=com"
|
|
||||||
rootdn "cn=Manager,dc=example,dc=com"
|
|
||||||
rootpw changeme
|
|
||||||
directory /var/lib/ldap
|
|
||||||
# directory /usr/local/var/openldap-data
|
|
||||||
index objectClass,cn eq
|
|
||||||
########
|
|
||||||
# ACLs #
|
|
||||||
########
|
|
||||||
access to attrs=userPassword
|
|
||||||
by anonymous auth
|
|
||||||
by self write
|
|
||||||
by * none
|
|
||||||
access to *
|
|
||||||
by self write
|
|
||||||
by * none
|
|
||||||
SLAPD_CONF_EOF
|
|
||||||
|
|
||||||
mv /etc/ldap/ldap.conf /etc/ldap/ldap.conf.orig
|
|
||||||
|
|
||||||
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
|
|
||||||
# LDAP Client Settings
|
|
||||||
URI ldap://localhost
|
|
||||||
BASE dc=example,dc=com
|
|
||||||
BINDDN cn=Manager,dc=example,dc=com
|
|
||||||
SIZELIMIT 0
|
|
||||||
TIMELIMIT 0
|
|
||||||
LDAP_CONF_EOF
|
|
||||||
|
|
||||||
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
|
|
||||||
# This is the root of the directory tree
|
|
||||||
dn: dc=example,dc=com
|
|
||||||
description: Example.Com, your trusted non-existent corporation.
|
|
||||||
dc: example
|
|
||||||
o: Example.Com
|
|
||||||
objectClass: top
|
|
||||||
objectClass: dcObject
|
|
||||||
objectClass: organization
|
|
||||||
|
|
||||||
# Subtree for users
|
|
||||||
dn: ou=Users,dc=example,dc=com
|
|
||||||
ou: Users
|
|
||||||
description: Users
|
|
||||||
objectClass: organizationalUnit
|
|
||||||
|
|
||||||
# Subtree for groups
|
|
||||||
dn: ou=Groups,dc=example,dc=com
|
|
||||||
ou: Groups
|
|
||||||
description: Groups
|
|
||||||
objectClass: organizationalUnit
|
|
||||||
|
|
||||||
# Subtree for system accounts
|
|
||||||
dn: ou=System,dc=example,dc=com
|
|
||||||
ou: System
|
|
||||||
description: Special accounts used by software applications.
|
|
||||||
objectClass: organizationalUnit
|
|
||||||
|
|
||||||
# Special Account for Authentication:
|
|
||||||
dn: uid=authenticate,ou=System,dc=example,dc=com
|
|
||||||
uid: authenticate
|
|
||||||
ou: System
|
|
||||||
description: Special account for authenticating users
|
|
||||||
userPassword: {MD5}TODO-000000000000000000000000000==
|
|
||||||
objectClass: account
|
|
||||||
objectClass: simpleSecurityObject
|
|
||||||
|
|
||||||
# create the sysadmin entry
|
|
||||||
|
|
||||||
dn: cn=developers,ou=Groups,dc=example,dc=com
|
|
||||||
objectclass: groupOfNames
|
|
||||||
cn: developers
|
|
||||||
description: IT admin group
|
|
||||||
member: uid=admin,ou=Users,dc=example,dc=com
|
|
||||||
|
|
||||||
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
|
|
||||||
objectclass: groupOfNames
|
|
||||||
cn: sysadmins
|
|
||||||
description: IT admin group
|
|
||||||
member: uid=admin,ou=Users,dc=example,dc=com
|
|
||||||
|
|
||||||
dn: cn=netadmins,ou=Groups,dc=example,dc=com
|
|
||||||
objectclass: groupOfNames
|
|
||||||
cn: netadmins
|
|
||||||
description: Network admin group
|
|
||||||
member: uid=admin,ou=Users,dc=example,dc=com
|
|
||||||
|
|
||||||
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
|
|
||||||
objectclass: groupOfNames
|
|
||||||
cn: cloudadmins
|
|
||||||
description: Cloud admin group
|
|
||||||
member: uid=admin,ou=Users,dc=example,dc=com
|
|
||||||
|
|
||||||
dn: cn=itsec,ou=Groups,dc=example,dc=com
|
|
||||||
objectclass: groupOfNames
|
|
||||||
cn: itsec
|
|
||||||
description: IT security users group
|
|
||||||
member: uid=admin,ou=Users,dc=example,dc=com
|
|
||||||
BASE_LDIF_EOF
|
|
||||||
|
|
||||||
/etc/init.d/slapd stop
|
|
||||||
rm -rf /var/lib/ldap/*
|
|
||||||
rm -rf /etc/ldap/slapd.d/*
|
|
||||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d
|
|
||||||
cp /usr/share/slapd/DB_CONFIG /var/lib/ldap/DB_CONFIG
|
|
||||||
slapadd -v -l /etc/ldap/base.ldif
|
|
||||||
chown -R openldap:openldap /etc/ldap/slapd.d
|
|
||||||
chown -R openldap:openldap /var/lib/ldap
|
|
||||||
/etc/init.d/slapd start
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
# fileserver.conf
|
|
||||||
|
|
||||||
[files]
|
|
||||||
path /srv/cloud/puppet/files
|
|
||||||
allow 10.0.0.0/24
|
|
||||||
|
|
||||||
[plugins]
|
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
exec { "update-apt": command => "/usr/bin/apt-get update" }
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
class issue {
|
|
||||||
file { "/etc/issue":
|
|
||||||
owner => "root",
|
|
||||||
group => "root",
|
|
||||||
mode => 444,
|
|
||||||
source => "puppet://${puppet_server}/files/etc/issue",
|
|
||||||
}
|
|
||||||
file { "/etc/issue.net":
|
|
||||||
owner => "root",
|
|
||||||
group => "root",
|
|
||||||
mode => 444,
|
|
||||||
source => "puppet://${puppet_server}/files/etc/issue",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
# via http://projects.puppetlabs.com/projects/puppet/wiki/Kernel_Modules_Patterns
|
|
||||||
|
|
||||||
define kern_module ($ensure) {
|
|
||||||
$modulesfile = $operatingsystem ? { ubuntu => "/etc/modules", redhat => "/etc/rc.modules" }
|
|
||||||
case $operatingsystem {
|
|
||||||
redhat: { file { "/etc/rc.modules": ensure => file, mode => 755 } }
|
|
||||||
}
|
|
||||||
case $ensure {
|
|
||||||
present: {
|
|
||||||
exec { "insert_module_${name}":
|
|
||||||
command => $operatingsystem ? {
|
|
||||||
ubuntu => "/bin/echo '${name}' >> '${modulesfile}'",
|
|
||||||
redhat => "/bin/echo '/sbin/modprobe ${name}' >> '${modulesfile}' "
|
|
||||||
},
|
|
||||||
unless => "/bin/grep -qFx '${name}' '${modulesfile}'"
|
|
||||||
}
|
|
||||||
exec { "/sbin/modprobe ${name}": unless => "/bin/grep -q '^${name} ' '/proc/modules'" }
|
|
||||||
}
|
|
||||||
absent: {
|
|
||||||
exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" }
|
|
||||||
exec { "remove_module_${name}":
|
|
||||||
command => $operatingsystem ? {
|
|
||||||
ubuntu => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'",
|
|
||||||
redhat => "/usr/bin/perl -ni -e 'print unless /^\\Q/sbin/modprobe ${name}\\E\$/' '${modulesfile}'"
|
|
||||||
},
|
|
||||||
onlyif => $operatingsystem ? {
|
|
||||||
ubuntu => "/bin/grep -qFx '${name}' '${modulesfile}'",
|
|
||||||
redhat => "/bin/grep -q '^/sbin/modprobe ${name}' '${modulesfile}'"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: { err ( "unknown ensure value ${ensure}" ) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
define loopback($num) {
|
|
||||||
exec { "mknod -m 0660 /dev/loop${num} b 7 ${num}; chown root:disk /dev/loop${num}":
|
|
||||||
creates => "/dev/loop${num}",
|
|
||||||
path => ["/usr/bin", "/usr/sbin", "/bin"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
class lvm {
|
|
||||||
file { "/etc/lvm/lvm.conf":
|
|
||||||
owner => "root",
|
|
||||||
group => "root",
|
|
||||||
mode => 444,
|
|
||||||
source => "puppet://${puppet_server}/files/etc/lvm.conf",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
class lvmconf {
|
|
||||||
file { "/etc/lvm/lvm.conf":
|
|
||||||
owner => "root", group => "root", mode => 644,
|
|
||||||
source => "puppet://${puppet_server}/files/etc/lvm/lvm.conf",
|
|
||||||
ensure => present
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,464 +0,0 @@
|
|||||||
import "kern_module"
|
|
||||||
import "apt"
|
|
||||||
import "loopback"
|
|
||||||
|
|
||||||
#$head_node_ip = "undef"
|
|
||||||
#$rabbit_ip = "undef"
|
|
||||||
#$vpn_ip = "undef"
|
|
||||||
#$public_interface = "undef"
|
|
||||||
#$vlan_start = "5000"
|
|
||||||
#$vlan_end = "6000"
|
|
||||||
#$private_range = "10.0.0.0/16"
|
|
||||||
#$public_range = "192.168.177.0/24"
|
|
||||||
|
|
||||||
define nova_iptables($services, $ip="", $private_range="", $mgmt_ip="", $dmz_ip="") {
|
|
||||||
file { "/etc/init.d/nova-iptables":
|
|
||||||
owner => "root", mode => 755,
|
|
||||||
source => "puppet://${puppet_server}/files/production/nova-iptables",
|
|
||||||
}
|
|
||||||
|
|
||||||
file { "/etc/default/nova-iptables":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => template("nova-iptables.erb")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define nova_conf_pointer($name) {
|
|
||||||
file { "/etc/nova/nova-${name}.conf":
|
|
||||||
owner => "nova", mode => 400,
|
|
||||||
content => "--flagfile=/etc/nova/nova.conf"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class novaconf {
|
|
||||||
file { "/etc/nova/nova.conf":
|
|
||||||
owner => "nova", mode => 400,
|
|
||||||
content => template("production/nova-common.conf.erb", "production/nova-${cluster_name}.conf.erb")
|
|
||||||
}
|
|
||||||
nova_conf_pointer{'manage': name => 'manage'}
|
|
||||||
}
|
|
||||||
|
|
||||||
class novadata {
|
|
||||||
package { "rabbitmq-server": ensure => present }
|
|
||||||
|
|
||||||
file { "/etc/rabbitmq/rabbitmq.conf":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => "NODENAME=rabbit@localhost",
|
|
||||||
}
|
|
||||||
|
|
||||||
service { "rabbitmq-server":
|
|
||||||
ensure => running,
|
|
||||||
enable => true,
|
|
||||||
hasstatus => true,
|
|
||||||
require => [
|
|
||||||
File["/etc/rabbitmq/rabbitmq.conf"],
|
|
||||||
Package["rabbitmq-server"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "mysql-server": ensure => present }
|
|
||||||
|
|
||||||
file { "/etc/mysql/my.cnf":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
source => "puppet://${puppet_server}/files/production/my.cnf",
|
|
||||||
}
|
|
||||||
|
|
||||||
service { "mysql":
|
|
||||||
ensure => running,
|
|
||||||
enable => true,
|
|
||||||
hasstatus => true,
|
|
||||||
require => [
|
|
||||||
File["/etc/mysql/my.cnf"],
|
|
||||||
Package["mysql-server"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
file { "/root/slap.sh":
|
|
||||||
owner => "root", mode => 755,
|
|
||||||
source => "puppet://${puppet_server}/files/production/slap.sh",
|
|
||||||
}
|
|
||||||
|
|
||||||
file { "/root/setup_data.sh":
|
|
||||||
owner => "root", mode => 755,
|
|
||||||
source => "puppet://${puppet_server}/files/production/setup_data.sh",
|
|
||||||
}
|
|
||||||
|
|
||||||
# setup compute data
|
|
||||||
exec { "setup_data":
|
|
||||||
command => "/root/setup_data.sh",
|
|
||||||
path => "/usr/bin:/bin",
|
|
||||||
unless => "test -f /root/installed",
|
|
||||||
require => [
|
|
||||||
Service["mysql"],
|
|
||||||
File["/root/slap.sh"],
|
|
||||||
File["/root/setup_data.sh"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define nscheduler($version) {
|
|
||||||
package { "nova-scheduler": ensure => $version, require => Exec["update-apt"] }
|
|
||||||
nova_conf_pointer{'scheduler': name => 'scheduler'}
|
|
||||||
exec { "update-rc.d -f nova-scheduler remove; update-rc.d nova-scheduler defaults 50":
|
|
||||||
path => "/usr/bin:/usr/sbin:/bin",
|
|
||||||
onlyif => "test -f /etc/init.d/nova-scheduler",
|
|
||||||
unless => "test -f /etc/rc2.d/S50nova-scheduler"
|
|
||||||
}
|
|
||||||
service { "nova-scheduler":
|
|
||||||
ensure => running,
|
|
||||||
hasstatus => true,
|
|
||||||
subscribe => [
|
|
||||||
Package["nova-scheduler"],
|
|
||||||
File["/etc/nova/nova.conf"],
|
|
||||||
File["/etc/nova/nova-scheduler.conf"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
define napi($version, $api_servers, $api_base_port) {
|
|
||||||
file { "/etc/boto.cfg":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
source => "puppet://${puppet_server}/files/production/boto.cfg",
|
|
||||||
}
|
|
||||||
|
|
||||||
file { "/var/lib/nova/CA/genvpn.sh":
|
|
||||||
owner => "nova", mode => 755,
|
|
||||||
source => "puppet://${puppet_server}/files/production/genvpn.sh",
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "python-greenlet": ensure => present }
|
|
||||||
package { "nova-api": ensure => $version, require => [Exec["update-apt"], Package["python-greenlet"]] }
|
|
||||||
nova_conf_pointer{'api': name => 'api'}
|
|
||||||
|
|
||||||
exec { "update-rc.d -f nova-api remove; update-rc.d nova-api defaults 50":
|
|
||||||
path => "/usr/bin:/usr/sbin:/bin",
|
|
||||||
onlyif => "test -f /etc/init.d/nova-api",
|
|
||||||
unless => "test -f /etc/rc2.d/S50nova-api"
|
|
||||||
}
|
|
||||||
|
|
||||||
service { "nova-netsync":
|
|
||||||
start => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock start",
|
|
||||||
stop => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock stop",
|
|
||||||
ensure => running,
|
|
||||||
hasstatus => false,
|
|
||||||
pattern => "nova-netsync",
|
|
||||||
require => Service["nova-api"],
|
|
||||||
subscribe => File["/etc/nova/nova.conf"]
|
|
||||||
}
|
|
||||||
service { "nova-api":
|
|
||||||
start => "monit start all -g nova_api",
|
|
||||||
stop => "monit stop all -g nova_api",
|
|
||||||
restart => "monit restart all -g nova_api",
|
|
||||||
# ensure => running,
|
|
||||||
# hasstatus => true,
|
|
||||||
require => Service["monit"],
|
|
||||||
subscribe => [
|
|
||||||
Package["nova-objectstore"],
|
|
||||||
File["/etc/boto.cfg"],
|
|
||||||
File["/etc/nova/nova.conf"],
|
|
||||||
File["/etc/nova/nova-objectstore.conf"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
# the haproxy & monit's template use $api_servers and $api_base_port
|
|
||||||
|
|
||||||
package { "haproxy": ensure => present }
|
|
||||||
file { "/etc/default/haproxy":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => "ENABLED=1",
|
|
||||||
require => Package['haproxy']
|
|
||||||
}
|
|
||||||
file { "/etc/haproxy/haproxy.cfg":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => template("/srv/cloud/puppet/templates/haproxy.cfg.erb"),
|
|
||||||
require => Package['haproxy']
|
|
||||||
}
|
|
||||||
service { "haproxy":
|
|
||||||
ensure => true,
|
|
||||||
enable => true,
|
|
||||||
hasstatus => true,
|
|
||||||
subscribe => [
|
|
||||||
Package["haproxy"],
|
|
||||||
File["/etc/default/haproxy"],
|
|
||||||
File["/etc/haproxy/haproxy.cfg"],
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "socat": ensure => present }
|
|
||||||
|
|
||||||
file { "/usr/local/bin/gmetric_haproxy.sh":
|
|
||||||
owner => "root", mode => 755,
|
|
||||||
source => "puppet://${puppet_server}/files/production/ganglia/gmetric_scripts/gmetric_haproxy.sh",
|
|
||||||
}
|
|
||||||
|
|
||||||
cron { "gmetric_haproxy":
|
|
||||||
command => "/usr/local/bin/gmetric_haproxy.sh",
|
|
||||||
user => root,
|
|
||||||
minute => "*/3",
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "monit": ensure => present }
|
|
||||||
|
|
||||||
file { "/etc/default/monit":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => "startup=1",
|
|
||||||
require => Package['monit']
|
|
||||||
}
|
|
||||||
file { "/etc/monit/monitrc":
|
|
||||||
owner => "root", mode => 600,
|
|
||||||
content => template("/srv/cloud/puppet/templates/monitrc-nova-api.erb"),
|
|
||||||
require => Package['monit']
|
|
||||||
}
|
|
||||||
service { "monit":
|
|
||||||
ensure => true,
|
|
||||||
pattern => "sbin/monit",
|
|
||||||
subscribe => [
|
|
||||||
Package["monit"],
|
|
||||||
File["/etc/default/monit"],
|
|
||||||
File["/etc/monit/monitrc"],
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
define nnetwork($version) {
|
|
||||||
# kill the default network added by the package
|
|
||||||
exec { "kill-libvirt-default-net":
|
|
||||||
command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
|
|
||||||
path => "/usr/bin:/bin",
|
|
||||||
onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
|
|
||||||
}
|
|
||||||
|
|
||||||
# EVIL HACK: custom binary because dnsmasq 2.52 segfaulted accessing dereferenced object
|
|
||||||
file { "/usr/sbin/dnsmasq":
|
|
||||||
owner => "root", group => "root",
|
|
||||||
source => "puppet://${puppet_server}/files/production/dnsmasq",
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "nova-network": ensure => $version, require => Exec["update-apt"] }
|
|
||||||
nova_conf_pointer{'dhcpbridge': name => 'dhcpbridge'}
|
|
||||||
nova_conf_pointer{'network': name => "network" }
|
|
||||||
|
|
||||||
exec { "update-rc.d -f nova-network remove; update-rc.d nova-network defaults 50":
|
|
||||||
path => "/usr/bin:/usr/sbin:/bin",
|
|
||||||
onlyif => "test -f /etc/init.d/nova-network",
|
|
||||||
unless => "test -f /etc/rc2.d/S50nova-network"
|
|
||||||
}
|
|
||||||
service { "nova-network":
|
|
||||||
ensure => running,
|
|
||||||
hasstatus => true,
|
|
||||||
subscribe => [
|
|
||||||
Package["nova-network"],
|
|
||||||
File["/etc/nova/nova.conf"],
|
|
||||||
File["/etc/nova/nova-network.conf"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define nobjectstore($version) {
|
|
||||||
package { "nova-objectstore": ensure => $version, require => Exec["update-apt"] }
|
|
||||||
nova_conf_pointer{'objectstore': name => 'objectstore'}
|
|
||||||
exec { "update-rc.d -f nova-objectstore remove; update-rc.d nova-objectstore defaults 50":
|
|
||||||
path => "/usr/bin:/usr/sbin:/bin",
|
|
||||||
onlyif => "test -f /etc/init.d/nova-objectstore",
|
|
||||||
unless => "test -f /etc/rc2.d/S50nova-objectstore"
|
|
||||||
}
|
|
||||||
service { "nova-objectstore":
|
|
||||||
ensure => running,
|
|
||||||
hasstatus => true,
|
|
||||||
subscribe => [
|
|
||||||
Package["nova-objectstore"],
|
|
||||||
File["/etc/nova/nova.conf"],
|
|
||||||
File["/etc/nova/nova-objectstore.conf"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define ncompute($version) {
|
|
||||||
include ganglia-python
|
|
||||||
include ganglia-compute
|
|
||||||
|
|
||||||
# kill the default network added by the package
|
|
||||||
exec { "kill-libvirt-default-net":
|
|
||||||
command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
|
|
||||||
path => "/usr/bin:/bin",
|
|
||||||
onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# LIBVIRT has to be restarted when ebtables / gawk is installed
|
|
||||||
service { "libvirt-bin":
|
|
||||||
ensure => running,
|
|
||||||
pattern => "sbin/libvirtd",
|
|
||||||
subscribe => [
|
|
||||||
Package["ebtables"],
|
|
||||||
Kern_module["kvm_intel"]
|
|
||||||
],
|
|
||||||
require => [
|
|
||||||
Package["libvirt-bin"],
|
|
||||||
Package["ebtables"],
|
|
||||||
Package["gawk"],
|
|
||||||
Kern_module["kvm_intel"],
|
|
||||||
File["/dev/kvm"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "libvirt-bin": ensure => "0.8.3-1ubuntu14~ppalucid2" }
|
|
||||||
package { "ebtables": ensure => present }
|
|
||||||
package { "gawk": ensure => present }
|
|
||||||
|
|
||||||
# ensure proper permissions on /dev/kvm
|
|
||||||
file { "/dev/kvm":
|
|
||||||
owner => "root",
|
|
||||||
group => "kvm",
|
|
||||||
mode => 660
|
|
||||||
}
|
|
||||||
|
|
||||||
# require hardware virt
|
|
||||||
kern_module { "kvm_intel":
|
|
||||||
ensure => present,
|
|
||||||
}
|
|
||||||
|
|
||||||
# increase loopback devices
|
|
||||||
file { "/etc/modprobe.d/loop.conf":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => "options loop max_loop=40"
|
|
||||||
}
|
|
||||||
|
|
||||||
nova_conf_pointer{'compute': name => 'compute'}
|
|
||||||
|
|
||||||
loopback{loop0: num => 0}
|
|
||||||
loopback{loop1: num => 1}
|
|
||||||
loopback{loop2: num => 2}
|
|
||||||
loopback{loop3: num => 3}
|
|
||||||
loopback{loop4: num => 4}
|
|
||||||
loopback{loop5: num => 5}
|
|
||||||
loopback{loop6: num => 6}
|
|
||||||
loopback{loop7: num => 7}
|
|
||||||
loopback{loop8: num => 8}
|
|
||||||
loopback{loop9: num => 9}
|
|
||||||
loopback{loop10: num => 10}
|
|
||||||
loopback{loop11: num => 11}
|
|
||||||
loopback{loop12: num => 12}
|
|
||||||
loopback{loop13: num => 13}
|
|
||||||
loopback{loop14: num => 14}
|
|
||||||
loopback{loop15: num => 15}
|
|
||||||
loopback{loop16: num => 16}
|
|
||||||
loopback{loop17: num => 17}
|
|
||||||
loopback{loop18: num => 18}
|
|
||||||
loopback{loop19: num => 19}
|
|
||||||
loopback{loop20: num => 20}
|
|
||||||
loopback{loop21: num => 21}
|
|
||||||
loopback{loop22: num => 22}
|
|
||||||
loopback{loop23: num => 23}
|
|
||||||
loopback{loop24: num => 24}
|
|
||||||
loopback{loop25: num => 25}
|
|
||||||
loopback{loop26: num => 26}
|
|
||||||
loopback{loop27: num => 27}
|
|
||||||
loopback{loop28: num => 28}
|
|
||||||
loopback{loop29: num => 29}
|
|
||||||
loopback{loop30: num => 30}
|
|
||||||
loopback{loop31: num => 31}
|
|
||||||
loopback{loop32: num => 32}
|
|
||||||
loopback{loop33: num => 33}
|
|
||||||
loopback{loop34: num => 34}
|
|
||||||
loopback{loop35: num => 35}
|
|
||||||
loopback{loop36: num => 36}
|
|
||||||
loopback{loop37: num => 37}
|
|
||||||
loopback{loop38: num => 38}
|
|
||||||
loopback{loop39: num => 39}
|
|
||||||
|
|
||||||
package { "python-libvirt": ensure => "0.8.3-1ubuntu14~ppalucid2" }
|
|
||||||
|
|
||||||
package { "nova-compute":
|
|
||||||
ensure => "$version",
|
|
||||||
require => Package["python-libvirt"]
|
|
||||||
}
|
|
||||||
|
|
||||||
#file { "/usr/share/nova/libvirt.qemu.xml.template":
|
|
||||||
# owner => "nova", mode => 400,
|
|
||||||
# source => "puppet://${puppet_server}/files/production/libvirt.qemu.xml.template",
|
|
||||||
#}
|
|
||||||
|
|
||||||
# fix runlevels: using enable => true adds it as 20, which is too early
|
|
||||||
exec { "update-rc.d -f nova-compute remove":
|
|
||||||
path => "/usr/bin:/usr/sbin:/bin",
|
|
||||||
onlyif => "test -f /etc/rc2.d/S??nova-compute"
|
|
||||||
}
|
|
||||||
service { "nova-compute":
|
|
||||||
ensure => running,
|
|
||||||
hasstatus => true,
|
|
||||||
subscribe => [
|
|
||||||
Package["nova-compute"],
|
|
||||||
File["/etc/nova/nova.conf"],
|
|
||||||
File["/etc/nova/nova-compute.conf"],
|
|
||||||
#File["/usr/share/nova/libvirt.qemu.xml.template"],
|
|
||||||
Service["libvirt-bin"],
|
|
||||||
Kern_module["kvm_intel"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define nvolume($version) {
|
|
||||||
|
|
||||||
package { "nova-volume": ensure => $version, require => Exec["update-apt"] }
|
|
||||||
|
|
||||||
nova_conf_pointer{'volume': name => 'volume'}
|
|
||||||
|
|
||||||
# fix runlevels: using enable => true adds it as 20, which is too early
|
|
||||||
exec { "update-rc.d -f nova-volume remove":
|
|
||||||
path => "/usr/bin:/usr/sbin:/bin",
|
|
||||||
onlyif => "test -f /etc/rc2.d/S??nova-volume"
|
|
||||||
}
|
|
||||||
|
|
||||||
file { "/etc/default/iscsitarget":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => "ISCSITARGET_ENABLE=true"
|
|
||||||
}
|
|
||||||
|
|
||||||
package { "iscsitarget": ensure => present }
|
|
||||||
|
|
||||||
file { "/dev/iscsi": ensure => directory } # FIXME(vish): owner / mode?
|
|
||||||
file { "/usr/sbin/nova-iscsi-dev.sh":
|
|
||||||
owner => "root", mode => 755,
|
|
||||||
source => "puppet://${puppet_server}/files/production/nova-iscsi-dev.sh"
|
|
||||||
}
|
|
||||||
file { "/etc/udev/rules.d/55-openiscsi.rules":
|
|
||||||
owner => "root", mode => 644,
|
|
||||||
content => 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/usr/sbin/nova-iscsi-dev.sh %b",SYMLINK+="iscsi/%c%n"'
|
|
||||||
}
|
|
||||||
|
|
||||||
service { "iscsitarget":
|
|
||||||
ensure => running,
|
|
||||||
enable => true,
|
|
||||||
hasstatus => true,
|
|
||||||
require => [
|
|
||||||
File["/etc/default/iscsitarget"],
|
|
||||||
Package["iscsitarget"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
service { "nova-volume":
|
|
||||||
ensure => running,
|
|
||||||
hasstatus => true,
|
|
||||||
subscribe => [
|
|
||||||
Package["nova-volume"],
|
|
||||||
File["/etc/nova/nova.conf"],
|
|
||||||
File["/etc/nova/nova-volume.conf"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class novaspool {
|
|
||||||
# This isn't in release yet
|
|
||||||
#cron { logspool:
|
|
||||||
# command => "/usr/bin/nova-logspool /var/log/nova.log /var/lib/nova/spool",
|
|
||||||
# user => "nova"
|
|
||||||
#}
|
|
||||||
#cron { spoolsentry:
|
|
||||||
# command => "/usr/bin/nova-spoolsentry ${sentry_url} ${sentry_key} /var/lib/nova/spool",
|
|
||||||
# user => "nova"
|
|
||||||
#}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
class swift {
|
|
||||||
package { "memcached": ensure => present }
|
|
||||||
service { "memcached": require => Package['memcached'] }
|
|
||||||
|
|
||||||
package { "swift-proxy": ensure => present }
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
# site.pp
|
|
||||||
|
|
||||||
import "templates"
|
|
||||||
import "classes/*"
|
|
||||||
|
|
||||||
node novabase inherits default {
|
|
||||||
# $puppet_server = "192.168.0.10"
|
|
||||||
$cluster_name = "openstack001"
|
|
||||||
$ganglia_udp_send_channel = "openstack001.example.com"
|
|
||||||
$syslog = "192.168.0.10"
|
|
||||||
|
|
||||||
# THIS STUFF ISN'T IN RELEASE YET
|
|
||||||
#$sentry_url = "http://192.168.0.19/sentry/store/"
|
|
||||||
#$sentry_key = "TODO:SENTRYPASS"
|
|
||||||
|
|
||||||
$local_network = "192.168.0.0/16"
|
|
||||||
$vpn_ip = "192.168.0.2"
|
|
||||||
$public_interface = "eth0"
|
|
||||||
include novanode
|
|
||||||
# include nova-common
|
|
||||||
include opsmetrics
|
|
||||||
|
|
||||||
# non-nova stuff such as nova-dash inherit from novanode
|
|
||||||
# novaspool needs a better home
|
|
||||||
# include novaspool
|
|
||||||
}
|
|
||||||
|
|
||||||
# Builder
|
|
||||||
node "nova000.example.com" inherits novabase {
|
|
||||||
$syslog = "server"
|
|
||||||
include ntp
|
|
||||||
include syslog-server
|
|
||||||
}
|
|
||||||
|
|
||||||
# Non-Nova nodes
|
|
||||||
|
|
||||||
node
|
|
||||||
"blog.example.com",
|
|
||||||
"wiki.example.com"
|
|
||||||
inherits novabase {
|
|
||||||
include ganglia-python
|
|
||||||
include ganglia-apache
|
|
||||||
include ganglia-mysql
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
node "nova001.example.com"
|
|
||||||
inherits novabase {
|
|
||||||
include novabase
|
|
||||||
|
|
||||||
nova_iptables { nova:
|
|
||||||
services => [
|
|
||||||
"ganglia",
|
|
||||||
"mysql",
|
|
||||||
"rabbitmq",
|
|
||||||
"ldap",
|
|
||||||
"api",
|
|
||||||
"objectstore",
|
|
||||||
"nrpe",
|
|
||||||
],
|
|
||||||
ip => "192.168.0.10",
|
|
||||||
}
|
|
||||||
|
|
||||||
nobjectstore { nova: version => "0.9.0" }
|
|
||||||
nscheduler { nova: version => "0.9.0" }
|
|
||||||
napi { nova:
|
|
||||||
version => "0.9.0",
|
|
||||||
api_servers => 10,
|
|
||||||
api_base_port => 8000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
node "nova002.example.com"
|
|
||||||
inherits novabase {
|
|
||||||
include novaconf
|
|
||||||
|
|
||||||
nova_iptables { nova:
|
|
||||||
services => [
|
|
||||||
"ganglia",
|
|
||||||
"dnsmasq",
|
|
||||||
"nrpe"
|
|
||||||
],
|
|
||||||
ip => "192.168.4.2",
|
|
||||||
private_range => "192.168.0.0/16",
|
|
||||||
}
|
|
||||||
|
|
||||||
nnetwork { nova: version => "0.9.0" }
|
|
||||||
}
|
|
||||||
|
|
||||||
node
|
|
||||||
"nova003.example.com",
|
|
||||||
"nova004.example.com",
|
|
||||||
"nova005.example.com",
|
|
||||||
"nova006.example.com",
|
|
||||||
"nova007.example.com",
|
|
||||||
"nova008.example.com",
|
|
||||||
"nova009.example.com",
|
|
||||||
"nova010.example.com",
|
|
||||||
"nova011.example.com",
|
|
||||||
"nova012.example.com",
|
|
||||||
"nova013.example.com",
|
|
||||||
"nova014.example.com",
|
|
||||||
"nova015.example.com",
|
|
||||||
"nova016.example.com",
|
|
||||||
"nova017.example.com",
|
|
||||||
"nova018.example.com",
|
|
||||||
"nova019.example.com",
|
|
||||||
inherits novabase {
|
|
||||||
include novaconf
|
|
||||||
ncompute { nova: version => "0.9.0" }
|
|
||||||
nvolume { nova: version => "0.9.0" }
|
|
||||||
}
|
|
||||||
|
|
||||||
#node
|
|
||||||
# "nova020.example.com"
|
|
||||||
# "nova021.example.com"
|
|
||||||
#inherits novanode {
|
|
||||||
# include novaconf
|
|
||||||
#ncompute { nova: version => "0.9.0" }
|
|
||||||
#}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
# templates.pp
|
|
||||||
|
|
||||||
import "classes/*"
|
|
||||||
|
|
||||||
class baseclass {
|
|
||||||
# include dns-client # FIXME: missing resolv.conf.erb??
|
|
||||||
include issue
|
|
||||||
}
|
|
||||||
|
|
||||||
node default {
|
|
||||||
$nova_site = "undef"
|
|
||||||
$nova_ns1 = "undef"
|
|
||||||
$nova_ns2 = "undef"
|
|
||||||
# include baseclass
|
|
||||||
}
|
|
||||||
|
|
||||||
# novanode handles the system-level requirements for Nova/Swift nodes
|
|
||||||
class novanode {
|
|
||||||
include baseclass
|
|
||||||
include lvmconf
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
[main]
|
|
||||||
logdir=/var/log/puppet
|
|
||||||
vardir=/var/lib/puppet
|
|
||||||
ssldir=/var/lib/puppet/ssl
|
|
||||||
rundir=/var/run/puppet
|
|
||||||
factpath=$vardir/lib/facter
|
|
||||||
pluginsync=false
|
|
||||||
|
|
||||||
[puppetmasterd]
|
|
||||||
templatedir=/var/lib/nova/contrib/puppet/templates
|
|
||||||
autosign=true
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
# this config needs haproxy-1.1.28 or haproxy-1.2.1
|
|
||||||
|
|
||||||
global
|
|
||||||
log 127.0.0.1 local0
|
|
||||||
log 127.0.0.1 local1 notice
|
|
||||||
#log loghost local0 info
|
|
||||||
maxconn 4096
|
|
||||||
#chroot /usr/share/haproxy
|
|
||||||
stats socket /var/run/haproxy.sock
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
daemon
|
|
||||||
#debug
|
|
||||||
#quiet
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
mode http
|
|
||||||
option httplog
|
|
||||||
option dontlognull
|
|
||||||
retries 3
|
|
||||||
option redispatch
|
|
||||||
stats enable
|
|
||||||
stats uri /haproxy
|
|
||||||
maxconn 2000
|
|
||||||
contimeout 5000
|
|
||||||
clitimeout 50000
|
|
||||||
srvtimeout 50000
|
|
||||||
|
|
||||||
|
|
||||||
listen nova-api 0.0.0.0:8773
|
|
||||||
option httpchk GET / HTTP/1.0\r\nHost:\ example.com
|
|
||||||
option forwardfor
|
|
||||||
reqidel ^X-Forwarded-For:.*
|
|
||||||
balance roundrobin
|
|
||||||
<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset -%>
|
|
||||||
server api_<%= port %> 127.0.0.1:<%= port %> maxconn 1 check
|
|
||||||
<% end -%>
|
|
||||||
option httpclose # disable keep-alive
|
|
||||||
@@ -1,138 +0,0 @@
|
|||||||
###############################################################################
|
|
||||||
## Monit control file
|
|
||||||
###############################################################################
|
|
||||||
##
|
|
||||||
## Comments begin with a '#' and extend through the end of the line. Keywords
|
|
||||||
## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'.
|
|
||||||
##
|
|
||||||
## Below you will find examples of some frequently used statements. For
|
|
||||||
## information about the control file, a complete list of statements and
|
|
||||||
## options please have a look in the monit manual.
|
|
||||||
##
|
|
||||||
##
|
|
||||||
###############################################################################
|
|
||||||
## Global section
|
|
||||||
###############################################################################
|
|
||||||
##
|
|
||||||
## Start monit in the background (run as a daemon):
|
|
||||||
#
|
|
||||||
set daemon 60 # check services at 1-minute intervals
|
|
||||||
with start delay 30 # optional: delay the first check by half a minute
|
|
||||||
# (by default check immediately after monit start)
|
|
||||||
|
|
||||||
|
|
||||||
## Set syslog logging with the 'daemon' facility. If the FACILITY option is
|
|
||||||
## omitted, monit will use 'user' facility by default. If you want to log to
|
|
||||||
## a stand alone log file instead, specify the path to a log file
|
|
||||||
#
|
|
||||||
set logfile syslog facility log_daemon
|
|
||||||
#
|
|
||||||
#
|
|
||||||
### Set the location of monit id file which saves the unique id specific for
|
|
||||||
### given monit. The id is generated and stored on first monit start.
|
|
||||||
### By default the file is placed in $HOME/.monit.id.
|
|
||||||
#
|
|
||||||
# set idfile /var/.monit.id
|
|
||||||
#
|
|
||||||
### Set the location of monit state file which saves the monitoring state
|
|
||||||
### on each cycle. By default the file is placed in $HOME/.monit.state. If
|
|
||||||
### state file is stored on persistent filesystem, monit will recover the
|
|
||||||
### monitoring state across reboots. If it is on temporary filesystem, the
|
|
||||||
### state will be lost on reboot.
|
|
||||||
#
|
|
||||||
# set statefile /var/.monit.state
|
|
||||||
#
|
|
||||||
## Set the list of mail servers for alert delivery. Multiple servers may be
|
|
||||||
## specified using comma separator. By default monit uses port 25 - this
|
|
||||||
## is possible to override with the PORT option.
|
|
||||||
#
|
|
||||||
# set mailserver mail.bar.baz, # primary mailserver
|
|
||||||
# backup.bar.baz port 10025, # backup mailserver on port 10025
|
|
||||||
# localhost # fallback relay
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## By default monit will drop alert events if no mail servers are available.
|
|
||||||
## If you want to keep the alerts for a later delivery retry, you can use the
|
|
||||||
## EVENTQUEUE statement. The base directory where undelivered alerts will be
|
|
||||||
## stored is specified by the BASEDIR option. You can limit the maximal queue
|
|
||||||
## size using the SLOTS option (if omitted, the queue is limited by space
|
|
||||||
## available in the back end filesystem).
|
|
||||||
#
|
|
||||||
# set eventqueue
|
|
||||||
# basedir /var/monit # set the base directory where events will be stored
|
|
||||||
# slots 100 # optionaly limit the queue size
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## Send status and events to M/Monit (Monit central management: for more
|
|
||||||
## informations about M/Monit see http://www.tildeslash.com/mmonit).
|
|
||||||
#
|
|
||||||
# set mmonit http://monit:monit@192.168.1.10:8080/collector
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## Monit by default uses the following alert mail format:
|
|
||||||
##
|
|
||||||
## --8<--
|
|
||||||
## From: monit@$HOST # sender
|
|
||||||
## Subject: monit alert -- $EVENT $SERVICE # subject
|
|
||||||
##
|
|
||||||
## $EVENT Service $SERVICE #
|
|
||||||
## #
|
|
||||||
## Date: $DATE #
|
|
||||||
## Action: $ACTION #
|
|
||||||
## Host: $HOST # body
|
|
||||||
## Description: $DESCRIPTION #
|
|
||||||
## #
|
|
||||||
## Your faithful employee, #
|
|
||||||
## monit #
|
|
||||||
## --8<--
|
|
||||||
##
|
|
||||||
## You can override this message format or parts of it, such as subject
|
|
||||||
## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc.
|
|
||||||
## are expanded at runtime. For example, to override the sender:
|
|
||||||
#
|
|
||||||
# set mail-format { from: monit@foo.bar }
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## You can set alert recipients here whom will receive alerts if/when a
|
|
||||||
## service defined in this file has errors. Alerts may be restricted on
|
|
||||||
## events by using a filter as in the second example below.
|
|
||||||
#
|
|
||||||
# set alert sysadm@foo.bar # receive all alerts
|
|
||||||
# set alert manager@foo.bar only on { timeout } # receive just service-
|
|
||||||
# # timeout alert
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## Monit has an embedded web server which can be used to view status of
|
|
||||||
## services monitored, the current configuration, actual services parameters
|
|
||||||
## and manage services from a web interface.
|
|
||||||
#
|
|
||||||
set httpd port 2812 and
|
|
||||||
use address localhost # only accept connection from localhost
|
|
||||||
allow localhost # allow localhost to connect to the server and
|
|
||||||
# allow admin:monit # require user 'admin' with password 'monit'
|
|
||||||
# allow @monit # allow users of group 'monit' to connect (rw)
|
|
||||||
# allow @users readonly # allow users of group 'users' to connect readonly
|
|
||||||
#
|
|
||||||
#
|
|
||||||
###############################################################################
|
|
||||||
## Services
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset %>
|
|
||||||
|
|
||||||
check process nova_api_<%= port %> with pidfile /var/run/nova/nova-api-<%= port %>.pid
|
|
||||||
group nova_api
|
|
||||||
start program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock start"
|
|
||||||
as uid nova
|
|
||||||
stop program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock stop"
|
|
||||||
as uid nova
|
|
||||||
if failed port <%= port %> protocol http
|
|
||||||
with timeout 15 seconds
|
|
||||||
for 4 cycles
|
|
||||||
then restart
|
|
||||||
if totalmem > 300 Mb then restart
|
|
||||||
if cpu is greater than 60% for 2 cycles then alert
|
|
||||||
if cpu > 80% for 3 cycles then restart
|
|
||||||
if 3 restarts within 5 cycles then timeout
|
|
||||||
|
|
||||||
<% end %>
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
<% services.each do |service| -%>
|
|
||||||
<%= service.upcase %>=1
|
|
||||||
<% end -%>
|
|
||||||
<% if ip && ip != "" %>IP="<%=ip%>"<% end %>
|
|
||||||
<% if private_range && private_range != "" %>PRIVATE_RANGE="<%=private_range%>"<% end %>
|
|
||||||
<% if mgmt_ip && mgmt_ip != "" %>MGMT_IP="<%=mgmt_ip%>"<% end %>
|
|
||||||
<% if dmz_ip && dmz_ip != "" %>DMZ_IP="<%=dmz_ip%>"<% end %>
|
|
||||||
|
|
||||||
# warning: this file is auto-generated by puppet
|
|
||||||
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
# global
|
|
||||||
--dmz_net=192.168.0.0
|
|
||||||
--dmz_mask=255.255.0.0
|
|
||||||
--dmz_cidr=192.168.0.0/16
|
|
||||||
--ldap_user_dn=cn=Administrators,dc=example,dc=com
|
|
||||||
--ldap_user_unit=Users
|
|
||||||
--ldap_user_subtree=ou=Users,dc=example,dc=com
|
|
||||||
--ldap_project_subtree=ou=Groups,dc=example,dc=com
|
|
||||||
--role_project_subtree=ou=Groups,dc=example,dc=com
|
|
||||||
--ldap_cloudadmin=cn=NovaAdmins,ou=Groups,dc=example,dc=com
|
|
||||||
--ldap_itsec=cn=NovaSecurity,ou=Groups,dc=example,dc=com
|
|
||||||
--ldap_sysadmin=cn=Administrators,ou=Groups,dc=example,dc=com
|
|
||||||
--ldap_netadmin=cn=Administrators,ou=Groups,dc=example,dc=com
|
|
||||||
--ldap_developer=cn=developers,ou=Groups,dc=example,dc=com
|
|
||||||
--verbose
|
|
||||||
--daemonize
|
|
||||||
--syslog
|
|
||||||
--networks_path=/var/lib/nova/networks
|
|
||||||
--instances_path=/var/lib/nova/instances
|
|
||||||
--buckets_path=/var/lib/nova/objectstore/buckets
|
|
||||||
--images_path=/var/lib/nova/objectstore/images
|
|
||||||
--scheduler_driver=nova.scheduler.simple.SimpleScheduler
|
|
||||||
--libvirt_xml_template=/usr/share/nova/libvirt.qemu.xml.template
|
|
||||||
--credentials_template=/usr/share/nova/novarc.template
|
|
||||||
--boot_script_template=/usr/share/nova/bootscript.template
|
|
||||||
--vpn_client_template=/usr/share/nova/client.ovpn.template
|
|
||||||
--max_cores=40
|
|
||||||
--max_gigabytes=2000
|
|
||||||
--ca_path=/var/lib/nova/CA
|
|
||||||
--keys_path=/var/lib/nova/keys
|
|
||||||
--vpn_start=11000
|
|
||||||
--volume_group=vgdata
|
|
||||||
--volume_manager=nova.volume.manager.ISCSIManager
|
|
||||||
--volume_driver=nova.volume.driver.ISCSIDriver
|
|
||||||
--default_kernel=aki-DEFAULT
|
|
||||||
--default_ramdisk=ari-DEFAULT
|
|
||||||
--dhcpbridge=/usr/bin/nova-dhcpbridge
|
|
||||||
--vpn_image_id=ami-cloudpipe
|
|
||||||
--dhcpbridge_flagfile=/etc/nova/nova.conf
|
|
||||||
--credential_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=NOVA/CN=%s-%s
|
|
||||||
--auth_driver=nova.auth.ldapdriver.LdapDriver
|
|
||||||
--quota_cores=17
|
|
||||||
--quota_floating_ips=5
|
|
||||||
--quota_instances=6
|
|
||||||
--quota_volumes=10
|
|
||||||
--quota_gigabytes=100
|
|
||||||
--use_nova_chains=True
|
|
||||||
--input_chain=services
|
|
||||||
--use_project_ca=True
|
|
||||||
--fixed_ip_disassociate_timeout=300
|
|
||||||
--api_max_requests=1
|
|
||||||
--api_listen_ip=127.0.0.1
|
|
||||||
--user_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=%s-%s-%s
|
|
||||||
--project_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-ca-%s-%s
|
|
||||||
--vpn_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-vpn-%s-%s
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
--fixed_range=192.168.0.0/16
|
|
||||||
--iscsi_ip_prefix=192.168.4
|
|
||||||
--floating_range=10.0.0.0/24
|
|
||||||
--rabbit_host=192.168.0.10
|
|
||||||
--s3_host=192.168.0.10
|
|
||||||
--cc_host=192.168.0.10
|
|
||||||
--cc_dmz=192.168.24.10
|
|
||||||
--s3_dmz=192.168.24.10
|
|
||||||
--ec2_url=http://192.168.0.1:8773/services/Cloud
|
|
||||||
--vpn_ip=192.168.0.2
|
|
||||||
--ldap_url=ldap://192.168.0.10
|
|
||||||
--sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
|
|
||||||
--other_sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
|
|
||||||
--routing_source_ip=192.168.0.2
|
|
||||||
--bridge_dev=eth1
|
|
||||||
--public_interface=eth0
|
|
||||||
--vlan_start=3100
|
|
||||||
--num_networks=700
|
|
||||||
--rabbit_userid=TODO:RABBIT
|
|
||||||
--rabbit_password=TODO:CHANGEME
|
|
||||||
--ldap_password=TODO:CHANGEME
|
|
||||||
@@ -1,6 +1,3 @@
|
|||||||
[DEFAULT]
|
|
||||||
verbose = 1
|
|
||||||
|
|
||||||
#######
|
#######
|
||||||
# EC2 #
|
# EC2 #
|
||||||
#######
|
#######
|
||||||
|
|||||||
@@ -30,5 +30,3 @@
|
|||||||
.. moduleauthor:: Manish Singh <yosh@gimp.org>
|
.. moduleauthor:: Manish Singh <yosh@gimp.org>
|
||||||
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
|
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from exception import *
|
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ Starting point for routing EC2 requests.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import webob
|
import webob
|
||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
@@ -56,23 +55,20 @@ class RequestLogging(wsgi.Middleware):
|
|||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
|
start = utils.utcnow()
|
||||||
rv = req.get_response(self.application)
|
rv = req.get_response(self.application)
|
||||||
self.log_request_completion(rv, req)
|
self.log_request_completion(rv, req, start)
|
||||||
return rv
|
return rv
|
||||||
|
|
||||||
def log_request_completion(self, response, request):
|
def log_request_completion(self, response, request, start):
|
||||||
controller = request.environ.get('ec2.controller', None)
|
controller = request.environ.get('ec2.controller', None)
|
||||||
if controller:
|
if controller:
|
||||||
controller = controller.__class__.__name__
|
controller = controller.__class__.__name__
|
||||||
action = request.environ.get('ec2.action', None)
|
action = request.environ.get('ec2.action', None)
|
||||||
ctxt = request.environ.get('ec2.context', None)
|
ctxt = request.environ.get('ec2.context', None)
|
||||||
seconds = 'X'
|
delta = utils.utcnow() - start
|
||||||
microseconds = 'X'
|
seconds = delta.seconds
|
||||||
if ctxt:
|
microseconds = delta.microseconds
|
||||||
delta = datetime.datetime.utcnow() - \
|
|
||||||
ctxt.timestamp
|
|
||||||
seconds = delta.seconds
|
|
||||||
microseconds = delta.microseconds
|
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
|
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
|
||||||
seconds,
|
seconds,
|
||||||
@@ -294,7 +290,7 @@ class Authorizer(wsgi.Middleware):
|
|||||||
return True
|
return True
|
||||||
if 'none' in roles:
|
if 'none' in roles:
|
||||||
return False
|
return False
|
||||||
return any(context.project.has_role(context.user.id, role)
|
return any(context.project.has_role(context.user_id, role)
|
||||||
for role in roles)
|
for role in roles)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
APIRequest class
|
APIRequest class
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
import re
|
import re
|
||||||
# TODO(termie): replace minidom with etree
|
# TODO(termie): replace minidom with etree
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
@@ -45,6 +46,11 @@ def _underscore_to_xmlcase(str):
|
|||||||
return res[:1].lower() + res[1:]
|
return res[:1].lower() + res[1:]
|
||||||
|
|
||||||
|
|
||||||
|
def _database_to_isoformat(datetimeobj):
|
||||||
|
"""Return a xs:dateTime parsable string from datatime"""
|
||||||
|
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||||
|
|
||||||
|
|
||||||
def _try_convert(value):
|
def _try_convert(value):
|
||||||
"""Return a non-string if possible"""
|
"""Return a non-string if possible"""
|
||||||
if value == 'None':
|
if value == 'None':
|
||||||
@@ -171,6 +177,9 @@ class APIRequest(object):
|
|||||||
self._render_dict(xml, data_el, data.__dict__)
|
self._render_dict(xml, data_el, data.__dict__)
|
||||||
elif isinstance(data, bool):
|
elif isinstance(data, bool):
|
||||||
data_el.appendChild(xml.createTextNode(str(data).lower()))
|
data_el.appendChild(xml.createTextNode(str(data).lower()))
|
||||||
|
elif isinstance(data, datetime.datetime):
|
||||||
|
data_el.appendChild(
|
||||||
|
xml.createTextNode(_database_to_isoformat(data)))
|
||||||
elif data != None:
|
elif data != None:
|
||||||
data_el.appendChild(xml.createTextNode(str(data)))
|
data_el.appendChild(xml.createTextNode(str(data)))
|
||||||
|
|
||||||
|
|||||||
@@ -198,8 +198,9 @@ class CloudController(object):
|
|||||||
return self._describe_availability_zones(context, **kwargs)
|
return self._describe_availability_zones(context, **kwargs)
|
||||||
|
|
||||||
def _describe_availability_zones(self, context, **kwargs):
|
def _describe_availability_zones(self, context, **kwargs):
|
||||||
enabled_services = db.service_get_all(context)
|
ctxt = context.elevated()
|
||||||
disabled_services = db.service_get_all(context, True)
|
enabled_services = db.service_get_all(ctxt)
|
||||||
|
disabled_services = db.service_get_all(ctxt, True)
|
||||||
available_zones = []
|
available_zones = []
|
||||||
for zone in [service.availability_zone for service
|
for zone in [service.availability_zone for service
|
||||||
in enabled_services]:
|
in enabled_services]:
|
||||||
@@ -282,7 +283,7 @@ class CloudController(object):
|
|||||||
'description': 'fixme'}]}
|
'description': 'fixme'}]}
|
||||||
|
|
||||||
def describe_key_pairs(self, context, key_name=None, **kwargs):
|
def describe_key_pairs(self, context, key_name=None, **kwargs):
|
||||||
key_pairs = db.key_pair_get_all_by_user(context, context.user.id)
|
key_pairs = db.key_pair_get_all_by_user(context, context.user_id)
|
||||||
if not key_name is None:
|
if not key_name is None:
|
||||||
key_pairs = [x for x in key_pairs if x['name'] in key_name]
|
key_pairs = [x for x in key_pairs if x['name'] in key_name]
|
||||||
|
|
||||||
@@ -290,7 +291,7 @@ class CloudController(object):
|
|||||||
for key_pair in key_pairs:
|
for key_pair in key_pairs:
|
||||||
# filter out the vpn keys
|
# filter out the vpn keys
|
||||||
suffix = FLAGS.vpn_key_suffix
|
suffix = FLAGS.vpn_key_suffix
|
||||||
if context.user.is_admin() or \
|
if context.is_admin or \
|
||||||
not key_pair['name'].endswith(suffix):
|
not key_pair['name'].endswith(suffix):
|
||||||
result.append({
|
result.append({
|
||||||
'keyName': key_pair['name'],
|
'keyName': key_pair['name'],
|
||||||
@@ -301,7 +302,7 @@ class CloudController(object):
|
|||||||
|
|
||||||
def create_key_pair(self, context, key_name, **kwargs):
|
def create_key_pair(self, context, key_name, **kwargs):
|
||||||
LOG.audit(_("Create key pair %s"), key_name, context=context)
|
LOG.audit(_("Create key pair %s"), key_name, context=context)
|
||||||
data = _gen_key(context, context.user.id, key_name)
|
data = _gen_key(context, context.user_id, key_name)
|
||||||
return {'keyName': key_name,
|
return {'keyName': key_name,
|
||||||
'keyFingerprint': data['fingerprint'],
|
'keyFingerprint': data['fingerprint'],
|
||||||
'keyMaterial': data['private_key']}
|
'keyMaterial': data['private_key']}
|
||||||
@@ -310,7 +311,7 @@ class CloudController(object):
|
|||||||
def delete_key_pair(self, context, key_name, **kwargs):
|
def delete_key_pair(self, context, key_name, **kwargs):
|
||||||
LOG.audit(_("Delete key pair %s"), key_name, context=context)
|
LOG.audit(_("Delete key pair %s"), key_name, context=context)
|
||||||
try:
|
try:
|
||||||
db.key_pair_destroy(context, context.user.id, key_name)
|
db.key_pair_destroy(context, context.user_id, key_name)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
# aws returns true even if the key doesn't exist
|
# aws returns true even if the key doesn't exist
|
||||||
pass
|
pass
|
||||||
@@ -318,14 +319,19 @@ class CloudController(object):
|
|||||||
|
|
||||||
def describe_security_groups(self, context, group_name=None, **kwargs):
|
def describe_security_groups(self, context, group_name=None, **kwargs):
|
||||||
self.compute_api.ensure_default_security_group(context)
|
self.compute_api.ensure_default_security_group(context)
|
||||||
if context.user.is_admin():
|
if group_name:
|
||||||
|
groups = []
|
||||||
|
for name in group_name:
|
||||||
|
group = db.security_group_get_by_name(context,
|
||||||
|
context.project_id,
|
||||||
|
name)
|
||||||
|
groups.append(group)
|
||||||
|
elif context.is_admin:
|
||||||
groups = db.security_group_get_all(context)
|
groups = db.security_group_get_all(context)
|
||||||
else:
|
else:
|
||||||
groups = db.security_group_get_by_project(context,
|
groups = db.security_group_get_by_project(context,
|
||||||
context.project_id)
|
context.project_id)
|
||||||
groups = [self._format_security_group(context, g) for g in groups]
|
groups = [self._format_security_group(context, g) for g in groups]
|
||||||
if not group_name is None:
|
|
||||||
groups = [g for g in groups if g.name in group_name]
|
|
||||||
|
|
||||||
return {'securityGroupInfo':
|
return {'securityGroupInfo':
|
||||||
list(sorted(groups,
|
list(sorted(groups,
|
||||||
@@ -494,7 +500,7 @@ class CloudController(object):
|
|||||||
if db.security_group_exists(context, context.project_id, group_name):
|
if db.security_group_exists(context, context.project_id, group_name):
|
||||||
raise exception.ApiError(_('group %s already exists') % group_name)
|
raise exception.ApiError(_('group %s already exists') % group_name)
|
||||||
|
|
||||||
group = {'user_id': context.user.id,
|
group = {'user_id': context.user_id,
|
||||||
'project_id': context.project_id,
|
'project_id': context.project_id,
|
||||||
'name': group_name,
|
'name': group_name,
|
||||||
'description': group_description}
|
'description': group_description}
|
||||||
@@ -529,8 +535,9 @@ class CloudController(object):
|
|||||||
|
|
||||||
def get_ajax_console(self, context, instance_id, **kwargs):
|
def get_ajax_console(self, context, instance_id, **kwargs):
|
||||||
ec2_id = instance_id[0]
|
ec2_id = instance_id[0]
|
||||||
internal_id = ec2_id_to_id(ec2_id)
|
instance_id = ec2_id_to_id(ec2_id)
|
||||||
return self.compute_api.get_ajax_console(context, internal_id)
|
return self.compute_api.get_ajax_console(context,
|
||||||
|
instance_id=instance_id)
|
||||||
|
|
||||||
def describe_volumes(self, context, volume_id=None, **kwargs):
|
def describe_volumes(self, context, volume_id=None, **kwargs):
|
||||||
if volume_id:
|
if volume_id:
|
||||||
@@ -669,12 +676,13 @@ class CloudController(object):
|
|||||||
instances = []
|
instances = []
|
||||||
for ec2_id in instance_id:
|
for ec2_id in instance_id:
|
||||||
internal_id = ec2_id_to_id(ec2_id)
|
internal_id = ec2_id_to_id(ec2_id)
|
||||||
instance = self.compute_api.get(context, internal_id)
|
instance = self.compute_api.get(context,
|
||||||
|
instance_id=internal_id)
|
||||||
instances.append(instance)
|
instances.append(instance)
|
||||||
else:
|
else:
|
||||||
instances = self.compute_api.get_all(context, **kwargs)
|
instances = self.compute_api.get_all(context, **kwargs)
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
if not context.user.is_admin():
|
if not context.is_admin:
|
||||||
if instance['image_id'] == FLAGS.vpn_image_id:
|
if instance['image_id'] == FLAGS.vpn_image_id:
|
||||||
continue
|
continue
|
||||||
i = {}
|
i = {}
|
||||||
@@ -702,7 +710,7 @@ class CloudController(object):
|
|||||||
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
|
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
|
||||||
i['keyName'] = instance['key_name']
|
i['keyName'] = instance['key_name']
|
||||||
|
|
||||||
if context.user.is_admin():
|
if context.is_admin:
|
||||||
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
|
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
|
||||||
instance['project_id'],
|
instance['project_id'],
|
||||||
instance['host'])
|
instance['host'])
|
||||||
@@ -736,7 +744,7 @@ class CloudController(object):
|
|||||||
|
|
||||||
def format_addresses(self, context):
|
def format_addresses(self, context):
|
||||||
addresses = []
|
addresses = []
|
||||||
if context.user.is_admin():
|
if context.is_admin:
|
||||||
iterator = db.floating_ip_get_all(context)
|
iterator = db.floating_ip_get_all(context)
|
||||||
else:
|
else:
|
||||||
iterator = db.floating_ip_get_all_by_project(context,
|
iterator = db.floating_ip_get_all_by_project(context,
|
||||||
@@ -750,7 +758,7 @@ class CloudController(object):
|
|||||||
ec2_id = id_to_ec2_id(instance_id)
|
ec2_id = id_to_ec2_id(instance_id)
|
||||||
address_rv = {'public_ip': address,
|
address_rv = {'public_ip': address,
|
||||||
'instance_id': ec2_id}
|
'instance_id': ec2_id}
|
||||||
if context.user.is_admin():
|
if context.is_admin:
|
||||||
details = "%s (%s)" % (address_rv['instance_id'],
|
details = "%s (%s)" % (address_rv['instance_id'],
|
||||||
floating_ip_ref['project_id'])
|
floating_ip_ref['project_id'])
|
||||||
address_rv['instance_id'] = details
|
address_rv['instance_id'] = details
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ import datetime
|
|||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import logging
|
|
||||||
|
|
||||||
import webob.exc
|
import webob.exc
|
||||||
import webob.dec
|
import webob.dec
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|||||||
@@ -15,8 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
from nova import compute
|
from nova import compute
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ import nova.api.openstack
|
|||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger('server')
|
LOG = logging.getLogger('server')
|
||||||
LOG.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -65,23 +64,21 @@ def _translate_detail_keys(inst):
|
|||||||
inst_dict['addresses'] = dict(public=[], private=[])
|
inst_dict['addresses'] = dict(public=[], private=[])
|
||||||
|
|
||||||
# grab single private fixed ip
|
# grab single private fixed ip
|
||||||
try:
|
private_ips = utils.get_from_path(inst, 'fixed_ip/address')
|
||||||
private_ip = inst['fixed_ip']['address']
|
inst_dict['addresses']['private'] = private_ips
|
||||||
if private_ip:
|
|
||||||
inst_dict['addresses']['private'].append(private_ip)
|
|
||||||
except KeyError:
|
|
||||||
LOG.debug(_("Failed to read private ip"))
|
|
||||||
|
|
||||||
# grab all public floating ips
|
# grab all public floating ips
|
||||||
try:
|
public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
|
||||||
for floating in inst['fixed_ip']['floating_ips']:
|
inst_dict['addresses']['public'] = public_ips
|
||||||
inst_dict['addresses']['public'].append(floating['address'])
|
|
||||||
except KeyError:
|
|
||||||
LOG.debug(_("Failed to read public ip(s)"))
|
|
||||||
|
|
||||||
inst_dict['metadata'] = {}
|
|
||||||
inst_dict['hostId'] = ''
|
inst_dict['hostId'] = ''
|
||||||
|
|
||||||
|
# Return the metadata as a dictionary
|
||||||
|
metadata = {}
|
||||||
|
for item in inst['metadata']:
|
||||||
|
metadata[item['key']] = item['value']
|
||||||
|
inst_dict['metadata'] = metadata
|
||||||
|
|
||||||
return dict(server=inst_dict)
|
return dict(server=inst_dict)
|
||||||
|
|
||||||
|
|
||||||
@@ -153,9 +150,10 @@ class Controller(wsgi.Controller):
|
|||||||
try:
|
try:
|
||||||
return image['properties'][param]
|
return image['properties'][param]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise exception.NotFound(
|
LOG.debug(
|
||||||
_("%(param)s property not found for image %(_image_id)s") %
|
_("%(param)s property not found for image %(_image_id)s") %
|
||||||
locals())
|
locals())
|
||||||
|
return None
|
||||||
|
|
||||||
image = self._image_service.show(req.environ['nova.context'], image_id)
|
image = self._image_service.show(req.environ['nova.context'], image_id)
|
||||||
return lookup(image, 'kernel_id'), lookup(image, 'ramdisk_id')
|
return lookup(image, 'kernel_id'), lookup(image, 'ramdisk_id')
|
||||||
@@ -166,14 +164,29 @@ class Controller(wsgi.Controller):
|
|||||||
if not env:
|
if not env:
|
||||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||||
|
|
||||||
key_pair = auth_manager.AuthManager.get_key_pairs(
|
context = req.environ['nova.context']
|
||||||
req.environ['nova.context'])[0]
|
key_pairs = auth_manager.AuthManager.get_key_pairs(context)
|
||||||
|
if not key_pairs:
|
||||||
|
raise exception.NotFound(_("No keypairs defined"))
|
||||||
|
key_pair = key_pairs[0]
|
||||||
|
|
||||||
image_id = common.get_image_id_from_image_hash(self._image_service,
|
image_id = common.get_image_id_from_image_hash(self._image_service,
|
||||||
req.environ['nova.context'], env['server']['imageId'])
|
context, env['server']['imageId'])
|
||||||
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
||||||
req, image_id)
|
req, image_id)
|
||||||
|
|
||||||
|
# Metadata is a list, not a Dictionary, because we allow duplicate keys
|
||||||
|
# (even though JSON can't encode this)
|
||||||
|
# In future, we may not allow duplicate keys.
|
||||||
|
# However, the CloudServers API is not definitive on this front,
|
||||||
|
# and we want to be compatible.
|
||||||
|
metadata = []
|
||||||
|
if env['server'].get('metadata'):
|
||||||
|
for k, v in env['server']['metadata'].items():
|
||||||
|
metadata.append({'key': k, 'value': v})
|
||||||
|
|
||||||
instances = self.compute_api.create(
|
instances = self.compute_api.create(
|
||||||
req.environ['nova.context'],
|
context,
|
||||||
instance_types.get_by_flavor_id(env['server']['flavorId']),
|
instance_types.get_by_flavor_id(env['server']['flavorId']),
|
||||||
image_id,
|
image_id,
|
||||||
kernel_id=kernel_id,
|
kernel_id=kernel_id,
|
||||||
@@ -182,6 +195,7 @@ class Controller(wsgi.Controller):
|
|||||||
display_description=env['server']['name'],
|
display_description=env['server']['name'],
|
||||||
key_name=key_pair['name'],
|
key_name=key_pair['name'],
|
||||||
key_data=key_pair['public_key'],
|
key_data=key_pair['public_key'],
|
||||||
|
metadata=metadata,
|
||||||
onset_files=env.get('onset_files', []))
|
onset_files=env.get('onset_files', []))
|
||||||
return _translate_keys(instances[0])
|
return _translate_keys(instances[0])
|
||||||
|
|
||||||
|
|||||||
@@ -15,8 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import common
|
import common
|
||||||
import logging
|
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ class API(base.Base):
|
|||||||
min_count=1, max_count=1,
|
min_count=1, max_count=1,
|
||||||
display_name='', display_description='',
|
display_name='', display_description='',
|
||||||
key_name=None, key_data=None, security_group='default',
|
key_name=None, key_data=None, security_group='default',
|
||||||
availability_zone=None, user_data=None,
|
availability_zone=None, user_data=None, metadata=[],
|
||||||
onset_files=None):
|
onset_files=None):
|
||||||
"""Create the number of instances requested if quota and
|
"""Create the number of instances requested if quota and
|
||||||
other arguments check out ok.
|
other arguments check out ok.
|
||||||
@@ -100,27 +100,48 @@ class API(base.Base):
|
|||||||
"run %s more instances of this type.") %
|
"run %s more instances of this type.") %
|
||||||
num_instances, "InstanceLimitExceeded")
|
num_instances, "InstanceLimitExceeded")
|
||||||
|
|
||||||
is_vpn = image_id == FLAGS.vpn_image_id
|
num_metadata = len(metadata)
|
||||||
if not is_vpn:
|
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
|
||||||
image = self.image_service.show(context, image_id)
|
if quota_metadata < num_metadata:
|
||||||
if kernel_id is None:
|
pid = context.project_id
|
||||||
kernel_id = image.get('kernel_id', None)
|
msg = (_("Quota exceeeded for %(pid)s,"
|
||||||
if ramdisk_id is None:
|
" tried to set %(num_metadata)s metadata properties")
|
||||||
ramdisk_id = image.get('ramdisk_id', None)
|
% locals())
|
||||||
|
LOG.warn(msg)
|
||||||
|
raise quota.QuotaError(msg, "MetadataLimitExceeded")
|
||||||
|
|
||||||
# FIXME(sirp): is there a way we can remove null_kernel?
|
# Because metadata is stored in the DB, we hard-code the size limits
|
||||||
# No kernel and ramdisk for raw images
|
# In future, we may support more variable length strings, so we act
|
||||||
if kernel_id == str(FLAGS.null_kernel):
|
# as if this is quota-controlled for forwards compatibility
|
||||||
kernel_id = None
|
for metadata_item in metadata:
|
||||||
ramdisk_id = None
|
k = metadata_item['key']
|
||||||
LOG.debug(_("Creating a raw instance"))
|
v = metadata_item['value']
|
||||||
# Make sure we have access to kernel and ramdisk (if not raw)
|
if len(k) > 255 or len(v) > 255:
|
||||||
logging.debug("Using Kernel=%s, Ramdisk=%s" %
|
pid = context.project_id
|
||||||
(kernel_id, ramdisk_id))
|
msg = (_("Quota exceeeded for %(pid)s,"
|
||||||
if kernel_id:
|
" metadata property key or value too long")
|
||||||
self.image_service.show(context, kernel_id)
|
% locals())
|
||||||
if ramdisk_id:
|
LOG.warn(msg)
|
||||||
self.image_service.show(context, ramdisk_id)
|
raise quota.QuotaError(msg, "MetadataLimitExceeded")
|
||||||
|
|
||||||
|
image = self.image_service.show(context, image_id)
|
||||||
|
if kernel_id is None:
|
||||||
|
kernel_id = image.get('kernel_id', None)
|
||||||
|
if ramdisk_id is None:
|
||||||
|
ramdisk_id = image.get('ramdisk_id', None)
|
||||||
|
# FIXME(sirp): is there a way we can remove null_kernel?
|
||||||
|
# No kernel and ramdisk for raw images
|
||||||
|
if kernel_id == str(FLAGS.null_kernel):
|
||||||
|
kernel_id = None
|
||||||
|
ramdisk_id = None
|
||||||
|
LOG.debug(_("Creating a raw instance"))
|
||||||
|
# Make sure we have access to kernel and ramdisk (if not raw)
|
||||||
|
logging.debug("Using Kernel=%s, Ramdisk=%s" %
|
||||||
|
(kernel_id, ramdisk_id))
|
||||||
|
if kernel_id:
|
||||||
|
self.image_service.show(context, kernel_id)
|
||||||
|
if ramdisk_id:
|
||||||
|
self.image_service.show(context, ramdisk_id)
|
||||||
|
|
||||||
if security_group is None:
|
if security_group is None:
|
||||||
security_group = ['default']
|
security_group = ['default']
|
||||||
@@ -158,6 +179,7 @@ class API(base.Base):
|
|||||||
'key_name': key_name,
|
'key_name': key_name,
|
||||||
'key_data': key_data,
|
'key_data': key_data,
|
||||||
'locked': False,
|
'locked': False,
|
||||||
|
'metadata': metadata,
|
||||||
'availability_zone': availability_zone}
|
'availability_zone': availability_zone}
|
||||||
elevated = context.elevated()
|
elevated = context.elevated()
|
||||||
instances = []
|
instances = []
|
||||||
@@ -451,7 +473,7 @@ class API(base.Base):
|
|||||||
{'method': 'authorize_ajax_console',
|
{'method': 'authorize_ajax_console',
|
||||||
'args': {'token': output['token'], 'host': output['host'],
|
'args': {'token': output['token'], 'host': output['host'],
|
||||||
'port': output['port']}})
|
'port': output['port']}})
|
||||||
return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
|
return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
|
||||||
output['token'])}
|
output['token'])}
|
||||||
|
|
||||||
def get_console_output(self, context, instance_id):
|
def get_console_output(self, context, instance_id):
|
||||||
|
|||||||
@@ -20,11 +20,11 @@ Console Proxy Service
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import logging
|
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import manager
|
from nova import manager
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ XVP (Xenserver VNC Proxy) driver.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import fcntl
|
import fcntl
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
import subprocess
|
import subprocess
|
||||||
@@ -31,6 +30,7 @@ from nova import context
|
|||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
flags.DEFINE_string('console_xvp_conf_template',
|
flags.DEFINE_string('console_xvp_conf_template',
|
||||||
|
|||||||
@@ -85,8 +85,8 @@ def service_get(context, service_id):
|
|||||||
|
|
||||||
|
|
||||||
def service_get_all(context, disabled=False):
|
def service_get_all(context, disabled=False):
|
||||||
"""Get all service."""
|
"""Get all services."""
|
||||||
return IMPL.service_get_all(context, None, disabled)
|
return IMPL.service_get_all(context, disabled)
|
||||||
|
|
||||||
|
|
||||||
def service_get_all_by_topic(context, topic):
|
def service_get_all_by_topic(context, topic):
|
||||||
|
|||||||
@@ -136,15 +136,12 @@ def service_get(context, service_id, session=None):
|
|||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def service_get_all(context, session=None, disabled=False):
|
def service_get_all(context, disabled=False):
|
||||||
if not session:
|
session = get_session()
|
||||||
session = get_session()
|
return session.query(models.Service).\
|
||||||
|
|
||||||
result = session.query(models.Service).\
|
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
filter_by(disabled=disabled).\
|
filter_by(disabled=disabled).\
|
||||||
all()
|
all()
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
@@ -715,6 +712,7 @@ def instance_get(context, instance_id, session=None):
|
|||||||
options(joinedload_all('security_groups.rules')).\
|
options(joinedload_all('security_groups.rules')).\
|
||||||
options(joinedload('volumes')).\
|
options(joinedload('volumes')).\
|
||||||
options(joinedload_all('fixed_ip.network')).\
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
|
options(joinedload('metadata')).\
|
||||||
filter_by(id=instance_id).\
|
filter_by(id=instance_id).\
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
first()
|
first()
|
||||||
@@ -723,6 +721,7 @@ def instance_get(context, instance_id, session=None):
|
|||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload_all('security_groups.rules')).\
|
options(joinedload_all('security_groups.rules')).\
|
||||||
options(joinedload('volumes')).\
|
options(joinedload('volumes')).\
|
||||||
|
options(joinedload('metadata')).\
|
||||||
filter_by(project_id=context.project_id).\
|
filter_by(project_id=context.project_id).\
|
||||||
filter_by(id=instance_id).\
|
filter_by(id=instance_id).\
|
||||||
filter_by(deleted=False).\
|
filter_by(deleted=False).\
|
||||||
@@ -1046,7 +1045,8 @@ def network_create_safe(context, values):
|
|||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def network_disassociate(context, network_id):
|
def network_disassociate(context, network_id):
|
||||||
network_update(context, network_id, {'project_id': None})
|
network_update(context, network_id, {'project_id': None,
|
||||||
|
'host': None})
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
|
|||||||
@@ -0,0 +1,78 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import *
|
||||||
|
from migrate import *
|
||||||
|
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
# Just for the ForeignKey and column creation to succeed, these are not the
|
||||||
|
# actual definitions of instances or services.
|
||||||
|
instances = Table('instances', meta,
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
quotas = Table('quotas', meta,
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
|
||||||
|
instance_metadata_table = Table('instance_metadata', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('instance_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('instances.id'),
|
||||||
|
nullable=False),
|
||||||
|
Column('key',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('value',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)))
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# New columns
|
||||||
|
#
|
||||||
|
quota_metadata_items = Column('metadata_items', Integer())
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
for table in (instance_metadata_table, ):
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
logging.info(repr(table))
|
||||||
|
logging.exception('Exception while creating table')
|
||||||
|
raise
|
||||||
|
|
||||||
|
quotas.create_column(quota_metadata_items)
|
||||||
@@ -0,0 +1,72 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import *
|
||||||
|
from migrate import *
|
||||||
|
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
# Table stub-definitions
|
||||||
|
# Just for the ForeignKey and column creation to succeed, these are not the
|
||||||
|
# actual definitions of instances or services.
|
||||||
|
#
|
||||||
|
volumes = Table('volumes', meta,
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
# None
|
||||||
|
|
||||||
|
#
|
||||||
|
# Tables to alter
|
||||||
|
#
|
||||||
|
# None
|
||||||
|
|
||||||
|
#
|
||||||
|
# Columns to add to existing tables
|
||||||
|
#
|
||||||
|
|
||||||
|
volumes_provider_location = Column('provider_location',
|
||||||
|
String(length=256,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
volumes_provider_auth = Column('provider_auth',
|
||||||
|
String(length=256,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# Add columns to existing tables
|
||||||
|
volumes.create_column(volumes_provider_location)
|
||||||
|
volumes.create_column(volumes_provider_auth)
|
||||||
@@ -243,6 +243,9 @@ class Volume(BASE, NovaBase):
|
|||||||
display_name = Column(String(255))
|
display_name = Column(String(255))
|
||||||
display_description = Column(String(255))
|
display_description = Column(String(255))
|
||||||
|
|
||||||
|
provider_location = Column(String(255))
|
||||||
|
provider_auth = Column(String(255))
|
||||||
|
|
||||||
|
|
||||||
class Quota(BASE, NovaBase):
|
class Quota(BASE, NovaBase):
|
||||||
"""Represents quota overrides for a project."""
|
"""Represents quota overrides for a project."""
|
||||||
@@ -256,6 +259,7 @@ class Quota(BASE, NovaBase):
|
|||||||
volumes = Column(Integer)
|
volumes = Column(Integer)
|
||||||
gigabytes = Column(Integer)
|
gigabytes = Column(Integer)
|
||||||
floating_ips = Column(Integer)
|
floating_ips = Column(Integer)
|
||||||
|
metadata_items = Column(Integer)
|
||||||
|
|
||||||
|
|
||||||
class ExportDevice(BASE, NovaBase):
|
class ExportDevice(BASE, NovaBase):
|
||||||
@@ -536,6 +540,20 @@ class Console(BASE, NovaBase):
|
|||||||
pool = relationship(ConsolePool, backref=backref('consoles'))
|
pool = relationship(ConsolePool, backref=backref('consoles'))
|
||||||
|
|
||||||
|
|
||||||
|
class InstanceMetadata(BASE, NovaBase):
|
||||||
|
"""Represents a metadata key/value pair for an instance"""
|
||||||
|
__tablename__ = 'instance_metadata'
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
key = Column(String(255))
|
||||||
|
value = Column(String(255))
|
||||||
|
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
|
||||||
|
instance = relationship(Instance, backref="metadata",
|
||||||
|
foreign_keys=instance_id,
|
||||||
|
primaryjoin='and_('
|
||||||
|
'InstanceMetadata.instance_id == Instance.id,'
|
||||||
|
'InstanceMetadata.deleted == False)')
|
||||||
|
|
||||||
|
|
||||||
class Zone(BASE, NovaBase):
|
class Zone(BASE, NovaBase):
|
||||||
"""Represents a child zone of this zone."""
|
"""Represents a child zone of this zone."""
|
||||||
__tablename__ = 'zones'
|
__tablename__ = 'zones'
|
||||||
@@ -557,7 +575,8 @@ def register_models():
|
|||||||
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
|
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
|
||||||
Network, SecurityGroup, SecurityGroupIngressRule,
|
Network, SecurityGroup, SecurityGroupIngressRule,
|
||||||
SecurityGroupInstanceAssociation, AuthToken, User,
|
SecurityGroupInstanceAssociation, AuthToken, User,
|
||||||
Project, Certificate, ConsolePool, Console, Zone)
|
Project, Certificate, ConsolePool, Console, Zone,
|
||||||
|
InstanceMetadata)
|
||||||
engine = create_engine(FLAGS.sql_connection, echo=False)
|
engine = create_engine(FLAGS.sql_connection, echo=False)
|
||||||
for model in models:
|
for model in models:
|
||||||
model.metadata.create_all(engine)
|
model.metadata.create_all(engine)
|
||||||
|
|||||||
@@ -160,9 +160,45 @@ class StrWrapper(object):
|
|||||||
raise KeyError(name)
|
raise KeyError(name)
|
||||||
|
|
||||||
|
|
||||||
FLAGS = FlagValues()
|
# Copied from gflags with small mods to get the naming correct.
|
||||||
gflags.FLAGS = FLAGS
|
# Originally gflags checks for the first module that is not gflags that is
|
||||||
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
|
# in the call chain, we want to check for the first module that is not gflags
|
||||||
|
# and not this module.
|
||||||
|
def _GetCallingModule():
|
||||||
|
"""Returns the name of the module that's calling into this module.
|
||||||
|
|
||||||
|
We generally use this function to get the name of the module calling a
|
||||||
|
DEFINE_foo... function.
|
||||||
|
"""
|
||||||
|
# Walk down the stack to find the first globals dict that's not ours.
|
||||||
|
for depth in range(1, sys.getrecursionlimit()):
|
||||||
|
if not sys._getframe(depth).f_globals is globals():
|
||||||
|
module_name = __GetModuleName(sys._getframe(depth).f_globals)
|
||||||
|
if module_name == 'gflags':
|
||||||
|
continue
|
||||||
|
if module_name is not None:
|
||||||
|
return module_name
|
||||||
|
raise AssertionError("No module was found")
|
||||||
|
|
||||||
|
|
||||||
|
# Copied from gflags because it is a private function
|
||||||
|
def __GetModuleName(globals_dict):
|
||||||
|
"""Given a globals dict, returns the name of the module that defines it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
globals_dict: A dictionary that should correspond to an environment
|
||||||
|
providing the values of the globals.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A string (the name of the module) or None (if the module could not
|
||||||
|
be identified.
|
||||||
|
"""
|
||||||
|
for name, module in sys.modules.iteritems():
|
||||||
|
if getattr(module, '__dict__', None) is globals_dict:
|
||||||
|
if name == '__main__':
|
||||||
|
return sys.argv[0]
|
||||||
|
return name
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _wrapper(func):
|
def _wrapper(func):
|
||||||
@@ -173,6 +209,11 @@ def _wrapper(func):
|
|||||||
return _wrapped
|
return _wrapped
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = FlagValues()
|
||||||
|
gflags.FLAGS = FLAGS
|
||||||
|
gflags._GetCallingModule = _GetCallingModule
|
||||||
|
|
||||||
|
|
||||||
DEFINE = _wrapper(gflags.DEFINE)
|
DEFINE = _wrapper(gflags.DEFINE)
|
||||||
DEFINE_string = _wrapper(gflags.DEFINE_string)
|
DEFINE_string = _wrapper(gflags.DEFINE_string)
|
||||||
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
|
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
|
||||||
@@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
|
|||||||
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
|
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
|
||||||
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
|
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
|
||||||
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
|
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
|
||||||
|
|
||||||
|
|
||||||
HelpFlag = gflags.HelpFlag
|
HelpFlag = gflags.HelpFlag
|
||||||
HelpshortFlag = gflags.HelpshortFlag
|
HelpshortFlag = gflags.HelpshortFlag
|
||||||
HelpXMLFlag = gflags.HelpXMLFlag
|
HelpXMLFlag = gflags.HelpXMLFlag
|
||||||
|
|||||||
134
nova/log.py
134
nova/log.py
@@ -65,6 +65,7 @@ flags.DEFINE_string('logging_exception_prefix',
|
|||||||
flags.DEFINE_list('default_log_levels',
|
flags.DEFINE_list('default_log_levels',
|
||||||
['amqplib=WARN',
|
['amqplib=WARN',
|
||||||
'sqlalchemy=WARN',
|
'sqlalchemy=WARN',
|
||||||
|
'boto=WARN',
|
||||||
'eventlet.wsgi.server=WARN'],
|
'eventlet.wsgi.server=WARN'],
|
||||||
'list of logger=LEVEL pairs')
|
'list of logger=LEVEL pairs')
|
||||||
|
|
||||||
@@ -94,7 +95,7 @@ critical = logging.critical
|
|||||||
log = logging.log
|
log = logging.log
|
||||||
# handlers
|
# handlers
|
||||||
StreamHandler = logging.StreamHandler
|
StreamHandler = logging.StreamHandler
|
||||||
RotatingFileHandler = logging.handlers.RotatingFileHandler
|
WatchedFileHandler = logging.handlers.WatchedFileHandler
|
||||||
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
|
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
|
||||||
SysLogHandler = logging.handlers.SysLogHandler
|
SysLogHandler = logging.handlers.SysLogHandler
|
||||||
|
|
||||||
@@ -117,7 +118,7 @@ def _get_binary_name():
|
|||||||
return os.path.basename(inspect.stack()[-1][1])
|
return os.path.basename(inspect.stack()[-1][1])
|
||||||
|
|
||||||
|
|
||||||
def get_log_file_path(binary=None):
|
def _get_log_file_path(binary=None):
|
||||||
if FLAGS.logfile:
|
if FLAGS.logfile:
|
||||||
return FLAGS.logfile
|
return FLAGS.logfile
|
||||||
if FLAGS.logdir:
|
if FLAGS.logdir:
|
||||||
@@ -125,25 +126,6 @@ def get_log_file_path(binary=None):
|
|||||||
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
|
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
|
||||||
|
|
||||||
|
|
||||||
def basicConfig():
|
|
||||||
logging.basicConfig()
|
|
||||||
for handler in logging.root.handlers:
|
|
||||||
handler.setFormatter(_formatter)
|
|
||||||
if FLAGS.verbose:
|
|
||||||
logging.root.setLevel(logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logging.root.setLevel(logging.INFO)
|
|
||||||
if FLAGS.use_syslog:
|
|
||||||
syslog = SysLogHandler(address='/dev/log')
|
|
||||||
syslog.setFormatter(_formatter)
|
|
||||||
logging.root.addHandler(syslog)
|
|
||||||
logpath = get_log_file_path()
|
|
||||||
if logpath:
|
|
||||||
logfile = RotatingFileHandler(logpath)
|
|
||||||
logfile.setFormatter(_formatter)
|
|
||||||
logging.root.addHandler(logfile)
|
|
||||||
|
|
||||||
|
|
||||||
class NovaLogger(logging.Logger):
|
class NovaLogger(logging.Logger):
|
||||||
"""
|
"""
|
||||||
NovaLogger manages request context and formatting.
|
NovaLogger manages request context and formatting.
|
||||||
@@ -151,23 +133,19 @@ class NovaLogger(logging.Logger):
|
|||||||
This becomes the class that is instanciated by logging.getLogger.
|
This becomes the class that is instanciated by logging.getLogger.
|
||||||
"""
|
"""
|
||||||
def __init__(self, name, level=NOTSET):
|
def __init__(self, name, level=NOTSET):
|
||||||
level_name = self._get_level_from_flags(name, FLAGS)
|
|
||||||
level = globals()[level_name]
|
|
||||||
logging.Logger.__init__(self, name, level)
|
logging.Logger.__init__(self, name, level)
|
||||||
|
self.setup_from_flags()
|
||||||
|
|
||||||
def _get_level_from_flags(self, name, FLAGS):
|
def setup_from_flags(self):
|
||||||
# if exactly "nova", or a child logger, honor the verbose flag
|
"""Setup logger from flags"""
|
||||||
if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
|
level = NOTSET
|
||||||
return 'DEBUG'
|
|
||||||
for pair in FLAGS.default_log_levels:
|
for pair in FLAGS.default_log_levels:
|
||||||
logger, _sep, level = pair.partition('=')
|
logger, _sep, level_name = pair.partition('=')
|
||||||
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
|
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
|
||||||
# (but not a.bc, so we check the dot)
|
# (but not a.bc, so we check the dot)
|
||||||
if name == logger:
|
if self.name == logger or self.name.startswith("%s." % logger):
|
||||||
return level
|
level = globals()[level_name]
|
||||||
if name.startswith(logger) and name[len(logger)] == '.':
|
self.setLevel(level)
|
||||||
return level
|
|
||||||
return 'INFO'
|
|
||||||
|
|
||||||
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
|
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
|
||||||
"""Extract context from any log call"""
|
"""Extract context from any log call"""
|
||||||
@@ -176,12 +154,12 @@ class NovaLogger(logging.Logger):
|
|||||||
if context:
|
if context:
|
||||||
extra.update(_dictify_context(context))
|
extra.update(_dictify_context(context))
|
||||||
extra.update({"nova_version": version.version_string_with_vcs()})
|
extra.update({"nova_version": version.version_string_with_vcs()})
|
||||||
logging.Logger._log(self, level, msg, args, exc_info, extra)
|
return logging.Logger._log(self, level, msg, args, exc_info, extra)
|
||||||
|
|
||||||
def addHandler(self, handler):
|
def addHandler(self, handler):
|
||||||
"""Each handler gets our custom formatter"""
|
"""Each handler gets our custom formatter"""
|
||||||
handler.setFormatter(_formatter)
|
handler.setFormatter(_formatter)
|
||||||
logging.Logger.addHandler(self, handler)
|
return logging.Logger.addHandler(self, handler)
|
||||||
|
|
||||||
def audit(self, msg, *args, **kwargs):
|
def audit(self, msg, *args, **kwargs):
|
||||||
"""Shortcut for our AUDIT level"""
|
"""Shortcut for our AUDIT level"""
|
||||||
@@ -208,23 +186,6 @@ class NovaLogger(logging.Logger):
|
|||||||
self.error(message, **kwargs)
|
self.error(message, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def handle_exception(type, value, tb):
|
|
||||||
logging.root.critical(str(value), exc_info=(type, value, tb))
|
|
||||||
|
|
||||||
|
|
||||||
sys.excepthook = handle_exception
|
|
||||||
logging.setLoggerClass(NovaLogger)
|
|
||||||
|
|
||||||
|
|
||||||
class NovaRootLogger(NovaLogger):
|
|
||||||
pass
|
|
||||||
|
|
||||||
if not isinstance(logging.root, NovaRootLogger):
|
|
||||||
logging.root = NovaRootLogger("nova.root", WARNING)
|
|
||||||
NovaLogger.root = logging.root
|
|
||||||
NovaLogger.manager.root = logging.root
|
|
||||||
|
|
||||||
|
|
||||||
class NovaFormatter(logging.Formatter):
|
class NovaFormatter(logging.Formatter):
|
||||||
"""
|
"""
|
||||||
A nova.context.RequestContext aware formatter configured through flags.
|
A nova.context.RequestContext aware formatter configured through flags.
|
||||||
@@ -271,8 +232,73 @@ class NovaFormatter(logging.Formatter):
|
|||||||
_formatter = NovaFormatter()
|
_formatter = NovaFormatter()
|
||||||
|
|
||||||
|
|
||||||
|
class NovaRootLogger(NovaLogger):
|
||||||
|
def __init__(self, name, level=NOTSET):
|
||||||
|
self.logpath = None
|
||||||
|
self.filelog = None
|
||||||
|
self.streamlog = StreamHandler()
|
||||||
|
self.syslog = None
|
||||||
|
NovaLogger.__init__(self, name, level)
|
||||||
|
|
||||||
|
def setup_from_flags(self):
|
||||||
|
"""Setup logger from flags"""
|
||||||
|
global _filelog
|
||||||
|
if FLAGS.use_syslog:
|
||||||
|
self.syslog = SysLogHandler(address='/dev/log')
|
||||||
|
self.addHandler(self.syslog)
|
||||||
|
elif self.syslog:
|
||||||
|
self.removeHandler(self.syslog)
|
||||||
|
logpath = _get_log_file_path()
|
||||||
|
if logpath:
|
||||||
|
self.removeHandler(self.streamlog)
|
||||||
|
if logpath != self.logpath:
|
||||||
|
self.removeHandler(self.filelog)
|
||||||
|
self.filelog = WatchedFileHandler(logpath)
|
||||||
|
self.addHandler(self.filelog)
|
||||||
|
self.logpath = logpath
|
||||||
|
else:
|
||||||
|
self.removeHandler(self.filelog)
|
||||||
|
self.addHandler(self.streamlog)
|
||||||
|
if FLAGS.verbose:
|
||||||
|
self.setLevel(DEBUG)
|
||||||
|
else:
|
||||||
|
self.setLevel(INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_exception(type, value, tb):
|
||||||
|
logging.root.critical(str(value), exc_info=(type, value, tb))
|
||||||
|
|
||||||
|
|
||||||
|
def reset():
|
||||||
|
"""Resets logging handlers. Should be called if FLAGS changes."""
|
||||||
|
for logger in NovaLogger.manager.loggerDict.itervalues():
|
||||||
|
if isinstance(logger, NovaLogger):
|
||||||
|
logger.setup_from_flags()
|
||||||
|
|
||||||
|
|
||||||
|
def setup():
|
||||||
|
"""Setup nova logging."""
|
||||||
|
if not isinstance(logging.root, NovaRootLogger):
|
||||||
|
logging._acquireLock()
|
||||||
|
for handler in logging.root.handlers:
|
||||||
|
logging.root.removeHandler(handler)
|
||||||
|
logging.root = NovaRootLogger("nova")
|
||||||
|
NovaLogger.root = logging.root
|
||||||
|
NovaLogger.manager.root = logging.root
|
||||||
|
for logger in NovaLogger.manager.loggerDict.itervalues():
|
||||||
|
logger.root = logging.root
|
||||||
|
if isinstance(logger, logging.Logger):
|
||||||
|
NovaLogger.manager._fixupParents(logger)
|
||||||
|
NovaLogger.manager.loggerDict["nova"] = logging.root
|
||||||
|
logging._releaseLock()
|
||||||
|
sys.excepthook = handle_exception
|
||||||
|
reset()
|
||||||
|
|
||||||
|
|
||||||
|
root = logging.root
|
||||||
|
logging.setLoggerClass(NovaLogger)
|
||||||
|
|
||||||
|
|
||||||
def audit(msg, *args, **kwargs):
|
def audit(msg, *args, **kwargs):
|
||||||
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
|
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
|
||||||
if len(logging.root.handlers) == 0:
|
|
||||||
basicConfig()
|
|
||||||
logging.root.log(AUDIT, msg, *args, **kwargs)
|
logging.root.log(AUDIT, msg, *args, **kwargs)
|
||||||
|
|||||||
@@ -322,6 +322,16 @@ class FlatManager(NetworkManager):
|
|||||||
"""
|
"""
|
||||||
timeout_fixed_ips = False
|
timeout_fixed_ips = False
|
||||||
|
|
||||||
|
def init_host(self):
|
||||||
|
"""Do any initialization that needs to be run if this is a
|
||||||
|
standalone service.
|
||||||
|
"""
|
||||||
|
#Fix for bug 723298 - do not call init_host on superclass
|
||||||
|
#Following code has been copied for NetworkManager.init_host
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
for network in self.db.host_get_networks(ctxt, self.host):
|
||||||
|
self._on_set_network_host(ctxt, network['id'])
|
||||||
|
|
||||||
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
|
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
|
||||||
"""Gets a fixed ip from the pool."""
|
"""Gets a fixed ip from the pool."""
|
||||||
# TODO(vish): when this is called by compute, we can associate compute
|
# TODO(vish): when this is called by compute, we can associate compute
|
||||||
@@ -359,6 +369,7 @@ class FlatManager(NetworkManager):
|
|||||||
project_net = IPy.IP(cidr)
|
project_net = IPy.IP(cidr)
|
||||||
net = {}
|
net = {}
|
||||||
net['bridge'] = FLAGS.flat_network_bridge
|
net['bridge'] = FLAGS.flat_network_bridge
|
||||||
|
net['dns'] = FLAGS.flat_network_dns
|
||||||
net['cidr'] = cidr
|
net['cidr'] = cidr
|
||||||
net['netmask'] = str(project_net.netmask())
|
net['netmask'] = str(project_net.netmask())
|
||||||
net['gateway'] = str(project_net[1])
|
net['gateway'] = str(project_net[1])
|
||||||
@@ -406,6 +417,22 @@ class FlatManager(NetworkManager):
|
|||||||
net['dns'] = FLAGS.flat_network_dns
|
net['dns'] = FLAGS.flat_network_dns
|
||||||
self.db.network_update(context, network_id, net)
|
self.db.network_update(context, network_id, net)
|
||||||
|
|
||||||
|
def allocate_floating_ip(self, context, project_id):
|
||||||
|
#Fix for bug 723298
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def associate_floating_ip(self, context, floating_address, fixed_address):
|
||||||
|
#Fix for bug 723298
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def disassociate_floating_ip(self, context, floating_address):
|
||||||
|
#Fix for bug 723298
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def deallocate_floating_ip(self, context, floating_address):
|
||||||
|
#Fix for bug 723298
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
class FlatDHCPManager(FlatManager):
|
class FlatDHCPManager(FlatManager):
|
||||||
"""Flat networking with dhcp.
|
"""Flat networking with dhcp.
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ class Bucket(object):
|
|||||||
|
|
||||||
def is_authorized(self, context):
|
def is_authorized(self, context):
|
||||||
try:
|
try:
|
||||||
return context.user.is_admin() or \
|
return context.is_admin or \
|
||||||
self.owner_id == context.project_id
|
self.owner_id == context.project_id
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ class Image(object):
|
|||||||
# but only modified by admin or owner.
|
# but only modified by admin or owner.
|
||||||
try:
|
try:
|
||||||
return (self.metadata['isPublic'] and readonly) or \
|
return (self.metadata['isPublic'] and readonly) or \
|
||||||
context.user.is_admin() or \
|
context.is_admin or \
|
||||||
self.metadata['imageOwnerId'] == context.project_id
|
self.metadata['imageOwnerId'] == context.project_id
|
||||||
except:
|
except:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -35,6 +35,8 @@ flags.DEFINE_integer('quota_gigabytes', 1000,
|
|||||||
'number of volume gigabytes allowed per project')
|
'number of volume gigabytes allowed per project')
|
||||||
flags.DEFINE_integer('quota_floating_ips', 10,
|
flags.DEFINE_integer('quota_floating_ips', 10,
|
||||||
'number of floating ips allowed per project')
|
'number of floating ips allowed per project')
|
||||||
|
flags.DEFINE_integer('quota_metadata_items', 128,
|
||||||
|
'number of metadata items allowed per instance')
|
||||||
|
|
||||||
|
|
||||||
def get_quota(context, project_id):
|
def get_quota(context, project_id):
|
||||||
@@ -42,7 +44,8 @@ def get_quota(context, project_id):
|
|||||||
'cores': FLAGS.quota_cores,
|
'cores': FLAGS.quota_cores,
|
||||||
'volumes': FLAGS.quota_volumes,
|
'volumes': FLAGS.quota_volumes,
|
||||||
'gigabytes': FLAGS.quota_gigabytes,
|
'gigabytes': FLAGS.quota_gigabytes,
|
||||||
'floating_ips': FLAGS.quota_floating_ips}
|
'floating_ips': FLAGS.quota_floating_ips,
|
||||||
|
'metadata_items': FLAGS.quota_metadata_items}
|
||||||
try:
|
try:
|
||||||
quota = db.quota_get(context, project_id)
|
quota = db.quota_get(context, project_id)
|
||||||
for key in rval.keys():
|
for key in rval.keys():
|
||||||
@@ -94,6 +97,15 @@ def allowed_floating_ips(context, num_floating_ips):
|
|||||||
return min(num_floating_ips, allowed_floating_ips)
|
return min(num_floating_ips, allowed_floating_ips)
|
||||||
|
|
||||||
|
|
||||||
|
def allowed_metadata_items(context, num_metadata_items):
|
||||||
|
"""Check quota; return min(num_metadata_items,allowed_metadata_items)"""
|
||||||
|
project_id = context.project_id
|
||||||
|
context = context.elevated()
|
||||||
|
quota = get_quota(context, project_id)
|
||||||
|
num_allowed_metadata_items = quota['metadata_items']
|
||||||
|
return min(num_metadata_items, num_allowed_metadata_items)
|
||||||
|
|
||||||
|
|
||||||
class QuotaError(exception.ApiError):
|
class QuotaError(exception.ApiError):
|
||||||
"""Quota Exceeeded"""
|
"""Quota Exceeeded"""
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -45,19 +45,10 @@ FLAGS = flags.FLAGS
|
|||||||
flags.DEFINE_integer('report_interval', 10,
|
flags.DEFINE_integer('report_interval', 10,
|
||||||
'seconds between nodes reporting state to datastore',
|
'seconds between nodes reporting state to datastore',
|
||||||
lower_bound=1)
|
lower_bound=1)
|
||||||
|
|
||||||
flags.DEFINE_integer('periodic_interval', 60,
|
flags.DEFINE_integer('periodic_interval', 60,
|
||||||
'seconds between running periodic tasks',
|
'seconds between running periodic tasks',
|
||||||
lower_bound=1)
|
lower_bound=1)
|
||||||
|
|
||||||
flags.DEFINE_string('pidfile', None,
|
|
||||||
'pidfile to use for this service')
|
|
||||||
|
|
||||||
|
|
||||||
flags.DEFINE_flag(flags.HelpFlag())
|
|
||||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
|
||||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
|
||||||
|
|
||||||
|
|
||||||
class Service(object):
|
class Service(object):
|
||||||
"""Base class for workers that run on hosts."""
|
"""Base class for workers that run on hosts."""
|
||||||
@@ -68,6 +59,8 @@ class Service(object):
|
|||||||
self.binary = binary
|
self.binary = binary
|
||||||
self.topic = topic
|
self.topic = topic
|
||||||
self.manager_class_name = manager
|
self.manager_class_name = manager
|
||||||
|
manager_class = utils.import_class(self.manager_class_name)
|
||||||
|
self.manager = manager_class(host=self.host, *args, **kwargs)
|
||||||
self.report_interval = report_interval
|
self.report_interval = report_interval
|
||||||
self.periodic_interval = periodic_interval
|
self.periodic_interval = periodic_interval
|
||||||
super(Service, self).__init__(*args, **kwargs)
|
super(Service, self).__init__(*args, **kwargs)
|
||||||
@@ -75,9 +68,9 @@ class Service(object):
|
|||||||
self.timers = []
|
self.timers = []
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
manager_class = utils.import_class(self.manager_class_name)
|
vcs_string = version.version_string_with_vcs()
|
||||||
self.manager = manager_class(host=self.host, *self.saved_args,
|
logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"),
|
||||||
**self.saved_kwargs)
|
{'topic': self.topic, 'vcs_string': vcs_string})
|
||||||
self.manager.init_host()
|
self.manager.init_host()
|
||||||
self.model_disconnected = False
|
self.model_disconnected = False
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
@@ -157,9 +150,6 @@ class Service(object):
|
|||||||
report_interval = FLAGS.report_interval
|
report_interval = FLAGS.report_interval
|
||||||
if not periodic_interval:
|
if not periodic_interval:
|
||||||
periodic_interval = FLAGS.periodic_interval
|
periodic_interval = FLAGS.periodic_interval
|
||||||
vcs_string = version.version_string_with_vcs()
|
|
||||||
logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)")
|
|
||||||
% locals())
|
|
||||||
service_obj = cls(host, binary, topic, manager,
|
service_obj = cls(host, binary, topic, manager,
|
||||||
report_interval, periodic_interval)
|
report_interval, periodic_interval)
|
||||||
|
|
||||||
@@ -181,6 +171,13 @@ class Service(object):
|
|||||||
pass
|
pass
|
||||||
self.timers = []
|
self.timers = []
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
for x in self.timers:
|
||||||
|
try:
|
||||||
|
x.wait()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def periodic_tasks(self):
|
def periodic_tasks(self):
|
||||||
"""Tasks to be run at a periodic interval"""
|
"""Tasks to be run at a periodic interval"""
|
||||||
self.manager.periodic_tasks(context.get_admin_context())
|
self.manager.periodic_tasks(context.get_admin_context())
|
||||||
@@ -214,11 +211,19 @@ class Service(object):
|
|||||||
|
|
||||||
|
|
||||||
def serve(*services):
|
def serve(*services):
|
||||||
FLAGS(sys.argv)
|
try:
|
||||||
logging.basicConfig()
|
if not services:
|
||||||
|
services = [Service.create()]
|
||||||
if not services:
|
except Exception:
|
||||||
services = [Service.create()]
|
logging.exception('in Service.create()')
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
# After we've loaded up all our dynamic bits, check
|
||||||
|
# whether we should print help
|
||||||
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
|
FLAGS.ParseNewFlags()
|
||||||
|
|
||||||
name = '_'.join(x.binary for x in services)
|
name = '_'.join(x.binary for x in services)
|
||||||
logging.debug(_("Serving %s"), name)
|
logging.debug(_("Serving %s"), name)
|
||||||
|
|||||||
55
nova/test.py
55
nova/test.py
@@ -23,6 +23,7 @@ and some black magic for inline callbacks.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
import uuid
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import mox
|
import mox
|
||||||
@@ -32,9 +33,10 @@ from nova import context
|
|||||||
from nova import db
|
from nova import db
|
||||||
from nova import fakerabbit
|
from nova import fakerabbit
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
|
from nova import service
|
||||||
from nova.network import manager as network_manager
|
from nova.network import manager as network_manager
|
||||||
from nova.tests import fake_flags
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -80,6 +82,7 @@ class TestCase(unittest.TestCase):
|
|||||||
self.stubs = stubout.StubOutForTesting()
|
self.stubs = stubout.StubOutForTesting()
|
||||||
self.flag_overrides = {}
|
self.flag_overrides = {}
|
||||||
self.injected = []
|
self.injected = []
|
||||||
|
self._services = []
|
||||||
self._monkey_patch_attach()
|
self._monkey_patch_attach()
|
||||||
self._original_flags = FLAGS.FlagValuesDict()
|
self._original_flags = FLAGS.FlagValuesDict()
|
||||||
|
|
||||||
@@ -91,25 +94,42 @@ class TestCase(unittest.TestCase):
|
|||||||
self.stubs.UnsetAll()
|
self.stubs.UnsetAll()
|
||||||
self.stubs.SmartUnsetAll()
|
self.stubs.SmartUnsetAll()
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
# NOTE(vish): Clean up any ips associated during the test.
|
super(TestCase, self).tearDown()
|
||||||
ctxt = context.get_admin_context()
|
finally:
|
||||||
db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
|
try:
|
||||||
self.start)
|
# Clean up any ips associated during the test.
|
||||||
db.network_disassociate_all(ctxt)
|
ctxt = context.get_admin_context()
|
||||||
|
db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
|
||||||
|
self.start)
|
||||||
|
db.network_disassociate_all(ctxt)
|
||||||
|
|
||||||
|
db.security_group_destroy_all(ctxt)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Clean out fake_rabbit's queue if we used it
|
||||||
|
if FLAGS.fake_rabbit:
|
||||||
|
fakerabbit.reset_all()
|
||||||
|
|
||||||
|
# Reset any overriden flags
|
||||||
|
self.reset_flags()
|
||||||
|
|
||||||
|
# Reset our monkey-patches
|
||||||
rpc.Consumer.attach_to_eventlet = self.originalAttach
|
rpc.Consumer.attach_to_eventlet = self.originalAttach
|
||||||
|
|
||||||
|
# Stop any timers
|
||||||
for x in self.injected:
|
for x in self.injected:
|
||||||
try:
|
try:
|
||||||
x.stop()
|
x.stop()
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if FLAGS.fake_rabbit:
|
# Kill any services
|
||||||
fakerabbit.reset_all()
|
for x in self._services:
|
||||||
|
try:
|
||||||
db.security_group_destroy_all(ctxt)
|
x.kill()
|
||||||
super(TestCase, self).tearDown()
|
except Exception:
|
||||||
finally:
|
pass
|
||||||
self.reset_flags()
|
|
||||||
|
|
||||||
def flags(self, **kw):
|
def flags(self, **kw):
|
||||||
"""Override flag variables for a test"""
|
"""Override flag variables for a test"""
|
||||||
@@ -127,6 +147,15 @@ class TestCase(unittest.TestCase):
|
|||||||
for k, v in self._original_flags.iteritems():
|
for k, v in self._original_flags.iteritems():
|
||||||
setattr(FLAGS, k, v)
|
setattr(FLAGS, k, v)
|
||||||
|
|
||||||
|
def start_service(self, name, host=None, **kwargs):
|
||||||
|
host = host and host or uuid.uuid4().hex
|
||||||
|
kwargs.setdefault('host', host)
|
||||||
|
kwargs.setdefault('binary', 'nova-%s' % name)
|
||||||
|
svc = service.Service.create(**kwargs)
|
||||||
|
svc.start()
|
||||||
|
self._services.append(svc)
|
||||||
|
return svc
|
||||||
|
|
||||||
def _monkey_patch_attach(self):
|
def _monkey_patch_attach(self):
|
||||||
self.originalAttach = rpc.Consumer.attach_to_eventlet
|
self.originalAttach = rpc.Consumer.attach_to_eventlet
|
||||||
|
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ import nova.api.openstack
|
|||||||
from nova.api.openstack import servers
|
from nova.api.openstack import servers
|
||||||
import nova.db.api
|
import nova.db.api
|
||||||
from nova.db.sqlalchemy.models import Instance
|
from nova.db.sqlalchemy.models import Instance
|
||||||
|
from nova.db.sqlalchemy.models import InstanceMetadata
|
||||||
import nova.rpc
|
import nova.rpc
|
||||||
from nova.tests.api.openstack import fakes
|
from nova.tests.api.openstack import fakes
|
||||||
|
|
||||||
@@ -64,6 +65,9 @@ def instance_address(context, instance_id):
|
|||||||
|
|
||||||
|
|
||||||
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
||||||
|
metadata = []
|
||||||
|
metadata.append(InstanceMetadata(key='seq', value=id))
|
||||||
|
|
||||||
if public_addresses == None:
|
if public_addresses == None:
|
||||||
public_addresses = list()
|
public_addresses = list()
|
||||||
|
|
||||||
@@ -84,7 +88,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
|||||||
"vcpus": 0,
|
"vcpus": 0,
|
||||||
"local_gb": 0,
|
"local_gb": 0,
|
||||||
"hostname": "",
|
"hostname": "",
|
||||||
"host": "",
|
"host": None,
|
||||||
"instance_type": "",
|
"instance_type": "",
|
||||||
"user_data": "",
|
"user_data": "",
|
||||||
"reservation_id": "",
|
"reservation_id": "",
|
||||||
@@ -95,7 +99,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
|||||||
"availability_zone": "",
|
"availability_zone": "",
|
||||||
"display_name": "server%s" % id,
|
"display_name": "server%s" % id,
|
||||||
"display_description": "",
|
"display_description": "",
|
||||||
"locked": False}
|
"locked": False,
|
||||||
|
"metadata": metadata}
|
||||||
|
|
||||||
instance["fixed_ip"] = {
|
instance["fixed_ip"] = {
|
||||||
"address": private_address,
|
"address": private_address,
|
||||||
@@ -214,7 +219,8 @@ class ServersTest(unittest.TestCase):
|
|||||||
"get_image_id_from_image_hash", image_id_from_hash)
|
"get_image_id_from_image_hash", image_id_from_hash)
|
||||||
|
|
||||||
body = dict(server=dict(
|
body = dict(server=dict(
|
||||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
name='server_test', imageId=2, flavorId=2,
|
||||||
|
metadata={'hello': 'world', 'open': 'stack'},
|
||||||
personality={}))
|
personality={}))
|
||||||
req = webob.Request.blank('/v1.0/servers')
|
req = webob.Request.blank('/v1.0/servers')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
@@ -291,6 +297,7 @@ class ServersTest(unittest.TestCase):
|
|||||||
self.assertEqual(s['id'], i)
|
self.assertEqual(s['id'], i)
|
||||||
self.assertEqual(s['name'], 'server%d' % i)
|
self.assertEqual(s['name'], 'server%d' % i)
|
||||||
self.assertEqual(s['imageId'], 10)
|
self.assertEqual(s['imageId'], 10)
|
||||||
|
self.assertEqual(s['metadata']['seq'], i)
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
def test_server_pause(self):
|
def test_server_pause(self):
|
||||||
|
|||||||
@@ -57,8 +57,7 @@ def zone_get_all(context):
|
|||||||
dict(id=1, api_url='http://foo.com', username='bob',
|
dict(id=1, api_url='http://foo.com', username='bob',
|
||||||
password='xxx'),
|
password='xxx'),
|
||||||
dict(id=2, api_url='http://blah.com', username='alice',
|
dict(id=2, api_url='http://blah.com', username='alice',
|
||||||
password='qwerty')
|
password='qwerty')]
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class ZonesTest(unittest.TestCase):
|
class ZonesTest(unittest.TestCase):
|
||||||
|
|||||||
@@ -39,5 +39,5 @@ FLAGS.num_shelves = 2
|
|||||||
FLAGS.blades_per_shelf = 4
|
FLAGS.blades_per_shelf = 4
|
||||||
FLAGS.iscsi_num_targets = 8
|
FLAGS.iscsi_num_targets = 8
|
||||||
FLAGS.verbose = True
|
FLAGS.verbose = True
|
||||||
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
FLAGS.sql_connection = 'sqlite:///tests.sqlite'
|
||||||
FLAGS.use_ipv6 = True
|
FLAGS.use_ipv6 = True
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
import boto
|
import boto
|
||||||
from boto.ec2 import regioninfo
|
from boto.ec2 import regioninfo
|
||||||
|
import datetime
|
||||||
import httplib
|
import httplib
|
||||||
import random
|
import random
|
||||||
import StringIO
|
import StringIO
|
||||||
@@ -127,6 +128,28 @@ class ApiEc2TestCase(test.TestCase):
|
|||||||
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
|
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
|
||||||
return self.http
|
return self.http
|
||||||
|
|
||||||
|
def test_return_valid_isoformat(self):
|
||||||
|
"""
|
||||||
|
Ensure that the ec2 api returns datetime in xs:dateTime
|
||||||
|
(which apparently isn't datetime.isoformat())
|
||||||
|
NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
|
||||||
|
"""
|
||||||
|
conv = apirequest._database_to_isoformat
|
||||||
|
# sqlite database representation with microseconds
|
||||||
|
time_to_convert = datetime.datetime.strptime(
|
||||||
|
"2011-02-21 20:14:10.634276",
|
||||||
|
"%Y-%m-%d %H:%M:%S.%f")
|
||||||
|
self.assertEqual(
|
||||||
|
conv(time_to_convert),
|
||||||
|
'2011-02-21T20:14:10Z')
|
||||||
|
# mysqlite database representation
|
||||||
|
time_to_convert = datetime.datetime.strptime(
|
||||||
|
"2011-02-21 19:56:18",
|
||||||
|
"%Y-%m-%d %H:%M:%S")
|
||||||
|
self.assertEqual(
|
||||||
|
conv(time_to_convert),
|
||||||
|
'2011-02-21T19:56:18Z')
|
||||||
|
|
||||||
def test_xmlns_version_matches_request_version(self):
|
def test_xmlns_version_matches_request_version(self):
|
||||||
self.expect_http(api_version='2010-10-30')
|
self.expect_http(api_version='2010-10-30')
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|||||||
@@ -327,15 +327,6 @@ class AuthManagerTestCase(object):
|
|||||||
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
|
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
|
||||||
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
AuthManagerTestCase.__init__(self)
|
|
||||||
test.TestCase.__init__(self, *args, **kwargs)
|
|
||||||
import nova.auth.fakeldap as fakeldap
|
|
||||||
if FLAGS.flush_db:
|
|
||||||
LOG.info("Flushing datastore")
|
|
||||||
r = fakeldap.Store.instance()
|
|
||||||
r.flushdb()
|
|
||||||
|
|
||||||
|
|
||||||
class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):
|
class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):
|
||||||
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||||
|
|||||||
@@ -65,18 +65,21 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud = cloud.CloudController()
|
self.cloud = cloud.CloudController()
|
||||||
|
|
||||||
# set up services
|
# set up services
|
||||||
self.compute = service.Service.create(binary='nova-compute')
|
self.compute = self.start_service('compute')
|
||||||
self.compute.start()
|
self.scheduter = self.start_service('scheduler')
|
||||||
self.network = service.Service.create(binary='nova-network')
|
self.network = self.start_service('network')
|
||||||
self.network.start()
|
|
||||||
|
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||||
self.project = self.manager.create_project('proj', 'admin', 'proj')
|
self.project = self.manager.create_project('proj', 'admin', 'proj')
|
||||||
self.context = context.RequestContext(user=self.user,
|
self.context = context.RequestContext(user=self.user,
|
||||||
project=self.project)
|
project=self.project)
|
||||||
|
host = self.network.get_network_host(self.context.elevated())
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
|
network_ref = db.project_get_network(self.context,
|
||||||
|
self.project.id)
|
||||||
|
db.network_disassociate(self.context, network_ref['id'])
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
self.compute.kill()
|
self.compute.kill()
|
||||||
@@ -102,7 +105,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
address = "10.10.10.10"
|
address = "10.10.10.10"
|
||||||
db.floating_ip_create(self.context,
|
db.floating_ip_create(self.context,
|
||||||
{'address': address,
|
{'address': address,
|
||||||
'host': FLAGS.host})
|
'host': self.network.host})
|
||||||
self.cloud.allocate_address(self.context)
|
self.cloud.allocate_address(self.context)
|
||||||
self.cloud.describe_addresses(self.context)
|
self.cloud.describe_addresses(self.context)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
||||||
@@ -115,9 +118,9 @@ class CloudTestCase(test.TestCase):
|
|||||||
address = "10.10.10.10"
|
address = "10.10.10.10"
|
||||||
db.floating_ip_create(self.context,
|
db.floating_ip_create(self.context,
|
||||||
{'address': address,
|
{'address': address,
|
||||||
'host': FLAGS.host})
|
'host': self.network.host})
|
||||||
self.cloud.allocate_address(self.context)
|
self.cloud.allocate_address(self.context)
|
||||||
inst = db.instance_create(self.context, {'host': FLAGS.host})
|
inst = db.instance_create(self.context, {'host': self.compute.host})
|
||||||
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
||||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
@@ -133,6 +136,22 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.instance_destroy(self.context, inst['id'])
|
db.instance_destroy(self.context, inst['id'])
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
|
def test_describe_security_groups(self):
|
||||||
|
"""Makes sure describe_security_groups works and filters results."""
|
||||||
|
sec = db.security_group_create(self.context,
|
||||||
|
{'project_id': self.context.project_id,
|
||||||
|
'name': 'test'})
|
||||||
|
result = self.cloud.describe_security_groups(self.context)
|
||||||
|
# NOTE(vish): should have the default group as well
|
||||||
|
self.assertEqual(len(result['securityGroupInfo']), 2)
|
||||||
|
result = self.cloud.describe_security_groups(self.context,
|
||||||
|
group_name=[sec['name']])
|
||||||
|
self.assertEqual(len(result['securityGroupInfo']), 1)
|
||||||
|
self.assertEqual(
|
||||||
|
result['securityGroupInfo'][0]['groupName'],
|
||||||
|
sec['name'])
|
||||||
|
db.security_group_destroy(self.context, sec['id'])
|
||||||
|
|
||||||
def test_describe_volumes(self):
|
def test_describe_volumes(self):
|
||||||
"""Makes sure describe_volumes works and filters results."""
|
"""Makes sure describe_volumes works and filters results."""
|
||||||
vol1 = db.volume_create(self.context, {})
|
vol1 = db.volume_create(self.context, {})
|
||||||
@@ -203,27 +222,32 @@ class CloudTestCase(test.TestCase):
|
|||||||
'instance_type': instance_type,
|
'instance_type': instance_type,
|
||||||
'max_count': max_count}
|
'max_count': max_count}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
|
greenthread.sleep(0.3)
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
output = self.cloud.get_console_output(context=self.context,
|
output = self.cloud.get_console_output(context=self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
|
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
|
||||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||||
# for unit tests.
|
# for unit tests.
|
||||||
greenthread.sleep(0.3)
|
greenthread.sleep(0.3)
|
||||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||||
|
greenthread.sleep(0.3)
|
||||||
|
|
||||||
def test_ajax_console(self):
|
def test_ajax_console(self):
|
||||||
|
image_id = FLAGS.default_image
|
||||||
kwargs = {'image_id': image_id}
|
kwargs = {'image_id': image_id}
|
||||||
rv = yield self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
output = yield self.cloud.get_console_output(context=self.context,
|
greenthread.sleep(0.3)
|
||||||
instance_id=[instance_id])
|
output = self.cloud.get_ajax_console(context=self.context,
|
||||||
self.assertEquals(b64decode(output['output']),
|
instance_id=[instance_id])
|
||||||
'http://fakeajaxconsole.com/?token=FAKETOKEN')
|
self.assertEquals(output['url'],
|
||||||
|
'%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
|
||||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||||
# for unit tests.
|
# for unit tests.
|
||||||
greenthread.sleep(0.3)
|
greenthread.sleep(0.3)
|
||||||
rv = yield self.cloud.terminate_instances(self.context, [instance_id])
|
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||||
|
greenthread.sleep(0.3)
|
||||||
|
|
||||||
def test_key_generation(self):
|
def test_key_generation(self):
|
||||||
result = self._create_key('test')
|
result = self._create_key('test')
|
||||||
@@ -286,70 +310,6 @@ class CloudTestCase(test.TestCase):
|
|||||||
LOG.debug(_("Terminating instance %s"), instance_id)
|
LOG.debug(_("Terminating instance %s"), instance_id)
|
||||||
rv = self.compute.terminate_instance(instance_id)
|
rv = self.compute.terminate_instance(instance_id)
|
||||||
|
|
||||||
def test_describe_instances(self):
|
|
||||||
"""Makes sure describe_instances works."""
|
|
||||||
instance1 = db.instance_create(self.context, {'host': 'host2'})
|
|
||||||
comp1 = db.service_create(self.context, {'host': 'host2',
|
|
||||||
'availability_zone': 'zone1',
|
|
||||||
'topic': "compute"})
|
|
||||||
result = self.cloud.describe_instances(self.context)
|
|
||||||
self.assertEqual(result['reservationSet'][0]
|
|
||||||
['instancesSet'][0]
|
|
||||||
['placement']['availabilityZone'], 'zone1')
|
|
||||||
db.instance_destroy(self.context, instance1['id'])
|
|
||||||
db.service_destroy(self.context, comp1['id'])
|
|
||||||
|
|
||||||
def test_instance_update_state(self):
|
|
||||||
# TODO(termie): what is this code even testing?
|
|
||||||
def instance(num):
|
|
||||||
return {
|
|
||||||
'reservation_id': 'r-1',
|
|
||||||
'instance_id': 'i-%s' % num,
|
|
||||||
'image_id': 'ami-%s' % num,
|
|
||||||
'private_dns_name': '10.0.0.%s' % num,
|
|
||||||
'dns_name': '10.0.0%s' % num,
|
|
||||||
'ami_launch_index': str(num),
|
|
||||||
'instance_type': 'fake',
|
|
||||||
'availability_zone': 'fake',
|
|
||||||
'key_name': None,
|
|
||||||
'kernel_id': 'fake',
|
|
||||||
'ramdisk_id': 'fake',
|
|
||||||
'groups': ['default'],
|
|
||||||
'product_codes': None,
|
|
||||||
'state': 0x01,
|
|
||||||
'user_data': ''}
|
|
||||||
rv = self.cloud._format_describe_instances(self.context)
|
|
||||||
logging.error(str(rv))
|
|
||||||
self.assertEqual(len(rv['reservationSet']), 0)
|
|
||||||
|
|
||||||
# simulate launch of 5 instances
|
|
||||||
# self.cloud.instances['pending'] = {}
|
|
||||||
#for i in xrange(5):
|
|
||||||
# inst = instance(i)
|
|
||||||
# self.cloud.instances['pending'][inst['instance_id']] = inst
|
|
||||||
|
|
||||||
#rv = self.cloud._format_instances(self.admin)
|
|
||||||
#self.assert_(len(rv['reservationSet']) == 1)
|
|
||||||
#self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
|
|
||||||
# report 4 nodes each having 1 of the instances
|
|
||||||
#for i in xrange(4):
|
|
||||||
# self.cloud.update_state('instances',
|
|
||||||
# {('node-%s' % i): {('i-%s' % i):
|
|
||||||
# instance(i)}})
|
|
||||||
|
|
||||||
# one instance should be pending still
|
|
||||||
#self.assert_(len(self.cloud.instances['pending'].keys()) == 1)
|
|
||||||
|
|
||||||
# check that the reservations collapse
|
|
||||||
#rv = self.cloud._format_instances(self.admin)
|
|
||||||
#self.assert_(len(rv['reservationSet']) == 1)
|
|
||||||
#self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
|
|
||||||
|
|
||||||
# check that we can get metadata for each instance
|
|
||||||
#for i in xrange(4):
|
|
||||||
# data = self.cloud.get_metadata(instance(i)['private_dns_name'])
|
|
||||||
# self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _fake_set_image_description(ctxt, image_id, description):
|
def _fake_set_image_description(ctxt, image_id, description):
|
||||||
from nova.objectstore import handler
|
from nova.objectstore import handler
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ Tests For Console proxy.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import logging
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -38,7 +37,6 @@ FLAGS = flags.FLAGS
|
|||||||
class ConsoleTestCase(test.TestCase):
|
class ConsoleTestCase(test.TestCase):
|
||||||
"""Test case for console proxy"""
|
"""Test case for console proxy"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
||||||
super(ConsoleTestCase, self).setUp()
|
super(ConsoleTestCase, self).setUp()
|
||||||
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
|
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
|
||||||
stub_compute=True)
|
stub_compute=True)
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
"""Tests for Direct API."""
|
"""Tests for Direct API."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
|
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import glob
|
import glob
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
import cStringIO
|
import cStringIO
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
|
from nova import flags
|
||||||
from nova import log
|
from nova import log
|
||||||
from nova import test
|
from nova import test
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
def _fake_context():
|
def _fake_context():
|
||||||
return context.RequestContext(1, 1)
|
return context.RequestContext(1, 1)
|
||||||
@@ -14,15 +17,11 @@ class RootLoggerTestCase(test.TestCase):
|
|||||||
super(RootLoggerTestCase, self).setUp()
|
super(RootLoggerTestCase, self).setUp()
|
||||||
self.log = log.logging.root
|
self.log = log.logging.root
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(RootLoggerTestCase, self).tearDown()
|
|
||||||
log.NovaLogger.manager.loggerDict = {}
|
|
||||||
|
|
||||||
def test_is_nova_instance(self):
|
def test_is_nova_instance(self):
|
||||||
self.assert_(isinstance(self.log, log.NovaLogger))
|
self.assert_(isinstance(self.log, log.NovaLogger))
|
||||||
|
|
||||||
def test_name_is_nova_root(self):
|
def test_name_is_nova(self):
|
||||||
self.assertEqual("nova.root", self.log.name)
|
self.assertEqual("nova", self.log.name)
|
||||||
|
|
||||||
def test_handlers_have_nova_formatter(self):
|
def test_handlers_have_nova_formatter(self):
|
||||||
formatters = []
|
formatters = []
|
||||||
@@ -45,25 +44,36 @@ class RootLoggerTestCase(test.TestCase):
|
|||||||
log.audit("foo", context=_fake_context())
|
log.audit("foo", context=_fake_context())
|
||||||
self.assert_(True) # didn't raise exception
|
self.assert_(True) # didn't raise exception
|
||||||
|
|
||||||
|
def test_will_be_verbose_if_verbose_flag_set(self):
|
||||||
|
self.flags(verbose=True)
|
||||||
|
log.reset()
|
||||||
|
self.assertEqual(log.DEBUG, self.log.level)
|
||||||
|
|
||||||
|
def test_will_not_be_verbose_if_verbose_flag_not_set(self):
|
||||||
|
self.flags(verbose=False)
|
||||||
|
log.reset()
|
||||||
|
self.assertEqual(log.INFO, self.log.level)
|
||||||
|
|
||||||
|
|
||||||
class LogHandlerTestCase(test.TestCase):
|
class LogHandlerTestCase(test.TestCase):
|
||||||
def test_log_path_logdir(self):
|
def test_log_path_logdir(self):
|
||||||
self.flags(logdir='/some/path')
|
self.flags(logdir='/some/path', logfile=None)
|
||||||
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
|
self.assertEquals(log._get_log_file_path(binary='foo-bar'),
|
||||||
'/some/path/foo-bar.log')
|
'/some/path/foo-bar.log')
|
||||||
|
|
||||||
def test_log_path_logfile(self):
|
def test_log_path_logfile(self):
|
||||||
self.flags(logfile='/some/path/foo-bar.log')
|
self.flags(logfile='/some/path/foo-bar.log')
|
||||||
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
|
self.assertEquals(log._get_log_file_path(binary='foo-bar'),
|
||||||
'/some/path/foo-bar.log')
|
'/some/path/foo-bar.log')
|
||||||
|
|
||||||
def test_log_path_none(self):
|
def test_log_path_none(self):
|
||||||
self.assertTrue(log.get_log_file_path(binary='foo-bar') is None)
|
self.flags(logdir=None, logfile=None)
|
||||||
|
self.assertTrue(log._get_log_file_path(binary='foo-bar') is None)
|
||||||
|
|
||||||
def test_log_path_logfile_overrides_logdir(self):
|
def test_log_path_logfile_overrides_logdir(self):
|
||||||
self.flags(logdir='/some/other/path',
|
self.flags(logdir='/some/other/path',
|
||||||
logfile='/some/path/foo-bar.log')
|
logfile='/some/path/foo-bar.log')
|
||||||
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
|
self.assertEquals(log._get_log_file_path(binary='foo-bar'),
|
||||||
'/some/path/foo-bar.log')
|
'/some/path/foo-bar.log')
|
||||||
|
|
||||||
|
|
||||||
@@ -76,13 +86,15 @@ class NovaFormatterTestCase(test.TestCase):
|
|||||||
logging_debug_format_suffix="--DBG")
|
logging_debug_format_suffix="--DBG")
|
||||||
self.log = log.logging.root
|
self.log = log.logging.root
|
||||||
self.stream = cStringIO.StringIO()
|
self.stream = cStringIO.StringIO()
|
||||||
handler = log.StreamHandler(self.stream)
|
self.handler = log.StreamHandler(self.stream)
|
||||||
self.log.addHandler(handler)
|
self.log.addHandler(self.handler)
|
||||||
|
self.level = self.log.level
|
||||||
self.log.setLevel(log.DEBUG)
|
self.log.setLevel(log.DEBUG)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
|
self.log.setLevel(self.level)
|
||||||
|
self.log.removeHandler(self.handler)
|
||||||
super(NovaFormatterTestCase, self).tearDown()
|
super(NovaFormatterTestCase, self).tearDown()
|
||||||
log.NovaLogger.manager.loggerDict = {}
|
|
||||||
|
|
||||||
def test_uncontextualized_log(self):
|
def test_uncontextualized_log(self):
|
||||||
self.log.info("foo")
|
self.log.info("foo")
|
||||||
@@ -102,30 +114,15 @@ class NovaFormatterTestCase(test.TestCase):
|
|||||||
class NovaLoggerTestCase(test.TestCase):
|
class NovaLoggerTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NovaLoggerTestCase, self).setUp()
|
super(NovaLoggerTestCase, self).setUp()
|
||||||
self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
|
levels = FLAGS.default_log_levels
|
||||||
|
levels.append("nova-test=AUDIT")
|
||||||
|
self.flags(default_log_levels=levels,
|
||||||
|
verbose=True)
|
||||||
self.log = log.getLogger('nova-test')
|
self.log = log.getLogger('nova-test')
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(NovaLoggerTestCase, self).tearDown()
|
|
||||||
log.NovaLogger.manager.loggerDict = {}
|
|
||||||
|
|
||||||
def test_has_level_from_flags(self):
|
def test_has_level_from_flags(self):
|
||||||
self.assertEqual(log.AUDIT, self.log.level)
|
self.assertEqual(log.AUDIT, self.log.level)
|
||||||
|
|
||||||
def test_child_log_has_level_of_parent_flag(self):
|
def test_child_log_has_level_of_parent_flag(self):
|
||||||
l = log.getLogger('nova-test.foo')
|
l = log.getLogger('nova-test.foo')
|
||||||
self.assertEqual(log.AUDIT, l.level)
|
self.assertEqual(log.AUDIT, l.level)
|
||||||
|
|
||||||
|
|
||||||
class VerboseLoggerTestCase(test.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(VerboseLoggerTestCase, self).setUp()
|
|
||||||
self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
|
|
||||||
self.log = log.getLogger('nova.test')
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(VerboseLoggerTestCase, self).tearDown()
|
|
||||||
log.NovaLogger.manager.loggerDict = {}
|
|
||||||
|
|
||||||
def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
|
|
||||||
self.assertEqual(log.DEBUG, self.log.level)
|
|
||||||
|
|||||||
@@ -46,6 +46,8 @@ class ProjectTestCase(test.TestCase):
|
|||||||
|
|
||||||
missing = set()
|
missing = set()
|
||||||
for contributor in contributors:
|
for contributor in contributors:
|
||||||
|
if contributor == 'nova-core':
|
||||||
|
continue
|
||||||
if not contributor in authors_file:
|
if not contributor in authors_file:
|
||||||
missing.add(contributor)
|
missing.add(contributor)
|
||||||
|
|
||||||
|
|||||||
@@ -117,6 +117,9 @@ class NetworkTestCase(test.TestCase):
|
|||||||
utils.to_global_ipv6(
|
utils.to_global_ipv6(
|
||||||
network_ref['cidr_v6'],
|
network_ref['cidr_v6'],
|
||||||
instance_ref['mac_address']))
|
instance_ref['mac_address']))
|
||||||
|
self._deallocate_address(0, address)
|
||||||
|
db.instance_destroy(context.get_admin_context(),
|
||||||
|
instance_ref['id'])
|
||||||
|
|
||||||
def test_public_network_association(self):
|
def test_public_network_association(self):
|
||||||
"""Makes sure that we can allocaate a public ip"""
|
"""Makes sure that we can allocaate a public ip"""
|
||||||
|
|||||||
@@ -16,6 +16,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from nova import compute
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -87,6 +88,18 @@ class QuotaTestCase(test.TestCase):
|
|||||||
num_instances = quota.allowed_instances(self.context, 100,
|
num_instances = quota.allowed_instances(self.context, 100,
|
||||||
instance_types.INSTANCE_TYPES['m1.small'])
|
instance_types.INSTANCE_TYPES['m1.small'])
|
||||||
self.assertEqual(num_instances, 10)
|
self.assertEqual(num_instances, 10)
|
||||||
|
|
||||||
|
# metadata_items
|
||||||
|
too_many_items = FLAGS.quota_metadata_items + 1000
|
||||||
|
num_metadata_items = quota.allowed_metadata_items(self.context,
|
||||||
|
too_many_items)
|
||||||
|
self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
|
||||||
|
db.quota_update(self.context, self.project.id, {'metadata_items': 5})
|
||||||
|
num_metadata_items = quota.allowed_metadata_items(self.context,
|
||||||
|
too_many_items)
|
||||||
|
self.assertEqual(num_metadata_items, 5)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
db.quota_destroy(self.context, self.project.id)
|
db.quota_destroy(self.context, self.project.id)
|
||||||
|
|
||||||
def test_too_many_instances(self):
|
def test_too_many_instances(self):
|
||||||
@@ -151,3 +164,15 @@ class QuotaTestCase(test.TestCase):
|
|||||||
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
||||||
self.context)
|
self.context)
|
||||||
db.floating_ip_destroy(context.get_admin_context(), address)
|
db.floating_ip_destroy(context.get_admin_context(), address)
|
||||||
|
|
||||||
|
def test_too_many_metadata_items(self):
|
||||||
|
metadata = {}
|
||||||
|
for i in range(FLAGS.quota_metadata_items + 1):
|
||||||
|
metadata['key%s' % i] = 'value%s' % i
|
||||||
|
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||||
|
self.context,
|
||||||
|
min_count=1,
|
||||||
|
max_count=1,
|
||||||
|
instance_type='m1.small',
|
||||||
|
image_id='fake',
|
||||||
|
metadata=metadata)
|
||||||
|
|||||||
@@ -150,6 +150,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
|
super(SimpleDriverTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_instance(self, **kwargs):
|
def _create_instance(self, **kwargs):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
@@ -176,18 +177,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_doesnt_report_disabled_hosts_as_up(self):
|
def test_doesnt_report_disabled_hosts_as_up(self):
|
||||||
"""Ensures driver doesn't find hosts before they are enabled"""
|
"""Ensures driver doesn't find hosts before they are enabled"""
|
||||||
# NOTE(vish): constructing service without create method
|
compute1 = self.start_service('compute', host='host1')
|
||||||
# because we are going to use it without queue
|
compute2 = self.start_service('compute', host='host2')
|
||||||
compute1 = service.Service('host1',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
compute2 = service.Service('host2',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute2.start()
|
|
||||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
||||||
db.service_update(self.context, s1['id'], {'disabled': True})
|
db.service_update(self.context, s1['id'], {'disabled': True})
|
||||||
@@ -199,18 +190,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_reports_enabled_hosts_as_up(self):
|
def test_reports_enabled_hosts_as_up(self):
|
||||||
"""Ensures driver can find the hosts that are up"""
|
"""Ensures driver can find the hosts that are up"""
|
||||||
# NOTE(vish): constructing service without create method
|
compute1 = self.start_service('compute', host='host1')
|
||||||
# because we are going to use it without queue
|
compute2 = self.start_service('compute', host='host2')
|
||||||
compute1 = service.Service('host1',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
compute2 = service.Service('host2',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute2.start()
|
|
||||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||||
self.assertEqual(2, len(hosts))
|
self.assertEqual(2, len(hosts))
|
||||||
compute1.kill()
|
compute1.kill()
|
||||||
@@ -218,16 +199,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_least_busy_host_gets_instance(self):
|
def test_least_busy_host_gets_instance(self):
|
||||||
"""Ensures the host with less cores gets the next one"""
|
"""Ensures the host with less cores gets the next one"""
|
||||||
compute1 = service.Service('host1',
|
compute1 = self.start_service('compute', host='host1')
|
||||||
'nova-compute',
|
compute2 = self.start_service('compute', host='host2')
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
compute2 = service.Service('host2',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute2.start()
|
|
||||||
instance_id1 = self._create_instance()
|
instance_id1 = self._create_instance()
|
||||||
compute1.run_instance(self.context, instance_id1)
|
compute1.run_instance(self.context, instance_id1)
|
||||||
instance_id2 = self._create_instance()
|
instance_id2 = self._create_instance()
|
||||||
@@ -241,16 +214,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_specific_host_gets_instance(self):
|
def test_specific_host_gets_instance(self):
|
||||||
"""Ensures if you set availability_zone it launches on that zone"""
|
"""Ensures if you set availability_zone it launches on that zone"""
|
||||||
compute1 = service.Service('host1',
|
compute1 = self.start_service('compute', host='host1')
|
||||||
'nova-compute',
|
compute2 = self.start_service('compute', host='host2')
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
compute2 = service.Service('host2',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute2.start()
|
|
||||||
instance_id1 = self._create_instance()
|
instance_id1 = self._create_instance()
|
||||||
compute1.run_instance(self.context, instance_id1)
|
compute1.run_instance(self.context, instance_id1)
|
||||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||||
@@ -263,11 +228,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
compute2.kill()
|
compute2.kill()
|
||||||
|
|
||||||
def test_wont_sechedule_if_specified_host_is_down(self):
|
def test_wont_sechedule_if_specified_host_is_down(self):
|
||||||
compute1 = service.Service('host1',
|
compute1 = self.start_service('compute', host='host1')
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||||
now = datetime.datetime.utcnow()
|
now = datetime.datetime.utcnow()
|
||||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||||
@@ -282,11 +243,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
compute1.kill()
|
compute1.kill()
|
||||||
|
|
||||||
def test_will_schedule_on_disabled_host_if_specified(self):
|
def test_will_schedule_on_disabled_host_if_specified(self):
|
||||||
compute1 = service.Service('host1',
|
compute1 = self.start_service('compute', host='host1')
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||||
db.service_update(self.context, s1['id'], {'disabled': True})
|
db.service_update(self.context, s1['id'], {'disabled': True})
|
||||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||||
@@ -298,16 +255,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_too_many_cores(self):
|
def test_too_many_cores(self):
|
||||||
"""Ensures we don't go over max cores"""
|
"""Ensures we don't go over max cores"""
|
||||||
compute1 = service.Service('host1',
|
compute1 = self.start_service('compute', host='host1')
|
||||||
'nova-compute',
|
compute2 = self.start_service('compute', host='host2')
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute1.start()
|
|
||||||
compute2 = service.Service('host2',
|
|
||||||
'nova-compute',
|
|
||||||
'compute',
|
|
||||||
FLAGS.compute_manager)
|
|
||||||
compute2.start()
|
|
||||||
instance_ids1 = []
|
instance_ids1 = []
|
||||||
instance_ids2 = []
|
instance_ids2 = []
|
||||||
for index in xrange(FLAGS.max_cores):
|
for index in xrange(FLAGS.max_cores):
|
||||||
@@ -322,6 +271,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
self.scheduler.driver.schedule_run_instance,
|
self.scheduler.driver.schedule_run_instance,
|
||||||
self.context,
|
self.context,
|
||||||
instance_id)
|
instance_id)
|
||||||
|
db.instance_destroy(self.context, instance_id)
|
||||||
for instance_id in instance_ids1:
|
for instance_id in instance_ids1:
|
||||||
compute1.terminate_instance(self.context, instance_id)
|
compute1.terminate_instance(self.context, instance_id)
|
||||||
for instance_id in instance_ids2:
|
for instance_id in instance_ids2:
|
||||||
@@ -331,16 +281,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_least_busy_host_gets_volume(self):
|
def test_least_busy_host_gets_volume(self):
|
||||||
"""Ensures the host with less gigabytes gets the next one"""
|
"""Ensures the host with less gigabytes gets the next one"""
|
||||||
volume1 = service.Service('host1',
|
volume1 = self.start_service('volume', host='host1')
|
||||||
'nova-volume',
|
volume2 = self.start_service('volume', host='host2')
|
||||||
'volume',
|
|
||||||
FLAGS.volume_manager)
|
|
||||||
volume1.start()
|
|
||||||
volume2 = service.Service('host2',
|
|
||||||
'nova-volume',
|
|
||||||
'volume',
|
|
||||||
FLAGS.volume_manager)
|
|
||||||
volume2.start()
|
|
||||||
volume_id1 = self._create_volume()
|
volume_id1 = self._create_volume()
|
||||||
volume1.create_volume(self.context, volume_id1)
|
volume1.create_volume(self.context, volume_id1)
|
||||||
volume_id2 = self._create_volume()
|
volume_id2 = self._create_volume()
|
||||||
@@ -354,16 +296,8 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_too_many_gigabytes(self):
|
def test_too_many_gigabytes(self):
|
||||||
"""Ensures we don't go over max gigabytes"""
|
"""Ensures we don't go over max gigabytes"""
|
||||||
volume1 = service.Service('host1',
|
volume1 = self.start_service('volume', host='host1')
|
||||||
'nova-volume',
|
volume2 = self.start_service('volume', host='host2')
|
||||||
'volume',
|
|
||||||
FLAGS.volume_manager)
|
|
||||||
volume1.start()
|
|
||||||
volume2 = service.Service('host2',
|
|
||||||
'nova-volume',
|
|
||||||
'volume',
|
|
||||||
FLAGS.volume_manager)
|
|
||||||
volume2.start()
|
|
||||||
volume_ids1 = []
|
volume_ids1 = []
|
||||||
volume_ids2 = []
|
volume_ids2 = []
|
||||||
for index in xrange(FLAGS.max_gigabytes):
|
for index in xrange(FLAGS.max_gigabytes):
|
||||||
|
|||||||
@@ -50,13 +50,6 @@ class ExtendedService(service.Service):
|
|||||||
class ServiceManagerTestCase(test.TestCase):
|
class ServiceManagerTestCase(test.TestCase):
|
||||||
"""Test cases for Services"""
|
"""Test cases for Services"""
|
||||||
|
|
||||||
def test_attribute_error_for_no_manager(self):
|
|
||||||
serv = service.Service('test',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'nova.tests.test_service.FakeManager')
|
|
||||||
self.assertRaises(AttributeError, getattr, serv, 'test_method')
|
|
||||||
|
|
||||||
def test_message_gets_to_manager(self):
|
def test_message_gets_to_manager(self):
|
||||||
serv = service.Service('test',
|
serv = service.Service('test',
|
||||||
'test',
|
'test',
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
#!/bin/bash
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
@@ -17,19 +16,25 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
# This gets zipped and run on the cloudpipe-managed OpenVPN server
|
"""Tests for the testing base code."""
|
||||||
NAME=$1
|
|
||||||
SUBJ=$2
|
|
||||||
|
|
||||||
mkdir -p projects/$NAME
|
from nova import rpc
|
||||||
cd projects/$NAME
|
from nova import test
|
||||||
|
|
||||||
# generate a server priv key
|
|
||||||
openssl genrsa -out server.key 2048
|
|
||||||
|
|
||||||
# generate a server CSR
|
class IsolationTestCase(test.TestCase):
|
||||||
openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ"
|
"""Ensure that things are cleaned up after failed tests.
|
||||||
|
|
||||||
if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then
|
These tests don't really do much here, but if isolation fails a bunch
|
||||||
sudo chown -R nova:nogroup .
|
of other tests should fail.
|
||||||
fi
|
|
||||||
|
"""
|
||||||
|
def test_service_isolation(self):
|
||||||
|
self.start_service('compute')
|
||||||
|
|
||||||
|
def test_rpc_consumer_isolation(self):
|
||||||
|
connection = rpc.Connection.instance(new=True)
|
||||||
|
consumer = rpc.TopicConsumer(connection, topic='compute')
|
||||||
|
consumer.register_callback(
|
||||||
|
lambda x, y: self.fail('I should never be called'))
|
||||||
|
consumer.attach_to_eventlet()
|
||||||
174
nova/tests/test_utils.py
Normal file
174
nova/tests/test_utils.py
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from nova import test
|
||||||
|
from nova import utils
|
||||||
|
from nova import exception
|
||||||
|
|
||||||
|
|
||||||
|
class GetFromPathTestCase(test.TestCase):
|
||||||
|
def test_tolerates_nones(self):
|
||||||
|
f = utils.get_from_path
|
||||||
|
|
||||||
|
input = []
|
||||||
|
self.assertEquals([], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [None]
|
||||||
|
self.assertEquals([], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': None}]
|
||||||
|
self.assertEquals([], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': None}}]
|
||||||
|
self.assertEquals([{'b': None}], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': None}}}]
|
||||||
|
self.assertEquals([{'b': {'c': None}}], f(input, "a"))
|
||||||
|
self.assertEquals([{'c': None}], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': None}}}, {'a': None}]
|
||||||
|
self.assertEquals([{'b': {'c': None}}], f(input, "a"))
|
||||||
|
self.assertEquals([{'c': None}], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
|
||||||
|
self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a"))
|
||||||
|
self.assertEquals([{'c': None}], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
def test_does_select(self):
|
||||||
|
f = utils.get_from_path
|
||||||
|
|
||||||
|
input = [{'a': 'a_1'}]
|
||||||
|
self.assertEquals(['a_1'], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': 'b_1'}}]
|
||||||
|
self.assertEquals([{'b': 'b_1'}], f(input, "a"))
|
||||||
|
self.assertEquals(['b_1'], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': 'c_1'}}}]
|
||||||
|
self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
|
||||||
|
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
|
||||||
|
self.assertEquals(['c_1'], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
|
||||||
|
self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
|
||||||
|
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
|
||||||
|
self.assertEquals(['c_1'], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': 'c_1'}}},
|
||||||
|
{'a': {'b': None}}]
|
||||||
|
self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
|
||||||
|
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
|
||||||
|
self.assertEquals(['c_1'], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': {'c': 'c_1'}}},
|
||||||
|
{'a': {'b': {'c': 'c_2'}}}]
|
||||||
|
self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
|
||||||
|
f(input, "a"))
|
||||||
|
self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
|
||||||
|
self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
self.assertEquals([], f(input, "a/b/c/d"))
|
||||||
|
self.assertEquals([], f(input, "c/a/b/d"))
|
||||||
|
self.assertEquals([], f(input, "i/r/t"))
|
||||||
|
|
||||||
|
def test_flattens_lists(self):
|
||||||
|
f = utils.get_from_path
|
||||||
|
|
||||||
|
input = [{'a': [1, 2, 3]}]
|
||||||
|
self.assertEquals([1, 2, 3], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': [1, 2, 3]}}]
|
||||||
|
self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
|
||||||
|
self.assertEquals([1, 2, 3], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
|
||||||
|
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
|
||||||
|
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = [{'a': [1, 2, {'b': 'b_1'}]}]
|
||||||
|
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
|
||||||
|
self.assertEquals(['b_1'], f(input, "a/b"))
|
||||||
|
|
||||||
|
def test_bad_xpath(self):
|
||||||
|
f = utils.get_from_path
|
||||||
|
|
||||||
|
self.assertRaises(exception.Error, f, [], None)
|
||||||
|
self.assertRaises(exception.Error, f, [], "")
|
||||||
|
self.assertRaises(exception.Error, f, [], "/")
|
||||||
|
self.assertRaises(exception.Error, f, [], "/a")
|
||||||
|
self.assertRaises(exception.Error, f, [], "/a/")
|
||||||
|
self.assertRaises(exception.Error, f, [], "//")
|
||||||
|
self.assertRaises(exception.Error, f, [], "//a")
|
||||||
|
self.assertRaises(exception.Error, f, [], "a//a")
|
||||||
|
self.assertRaises(exception.Error, f, [], "a//a/")
|
||||||
|
self.assertRaises(exception.Error, f, [], "a/a/")
|
||||||
|
|
||||||
|
def test_real_failure1(self):
|
||||||
|
# Real world failure case...
|
||||||
|
# We weren't coping when the input was a Dictionary instead of a List
|
||||||
|
# This led to test_accepts_dictionaries
|
||||||
|
f = utils.get_from_path
|
||||||
|
|
||||||
|
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
|
||||||
|
'address': '192.168.0.3'},
|
||||||
|
'hostname': ''}
|
||||||
|
|
||||||
|
private_ips = f(inst, 'fixed_ip/address')
|
||||||
|
public_ips = f(inst, 'fixed_ip/floating_ips/address')
|
||||||
|
self.assertEquals(['192.168.0.3'], private_ips)
|
||||||
|
self.assertEquals(['1.2.3.4'], public_ips)
|
||||||
|
|
||||||
|
def test_accepts_dictionaries(self):
|
||||||
|
f = utils.get_from_path
|
||||||
|
|
||||||
|
input = {'a': [1, 2, 3]}
|
||||||
|
self.assertEquals([1, 2, 3], f(input, "a"))
|
||||||
|
self.assertEquals([], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = {'a': {'b': [1, 2, 3]}}
|
||||||
|
self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
|
||||||
|
self.assertEquals([1, 2, 3], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
|
||||||
|
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
|
||||||
|
self.assertEquals([], f(input, "a/b/c"))
|
||||||
|
|
||||||
|
input = {'a': [1, 2, {'b': 'b_1'}]}
|
||||||
|
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
|
||||||
|
self.assertEquals(['b_1'], f(input, "a/b"))
|
||||||
@@ -204,6 +204,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = libvirt_conn.LibvirtConnection(True)
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, testuri)
|
self.assertEquals(uri, testuri)
|
||||||
|
db.instance_destroy(user_context, instance_ref['id'])
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(LibvirtConnTestCase, self).tearDown()
|
super(LibvirtConnTestCase, self).tearDown()
|
||||||
@@ -365,6 +366,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
'--dports 80:81 -j ACCEPT' % security_group_chain \
|
'--dports 80:81 -j ACCEPT' % security_group_chain \
|
||||||
in self.out_rules,
|
in self.out_rules,
|
||||||
"TCP port 80/81 acceptance rule wasn't added")
|
"TCP port 80/81 acceptance rule wasn't added")
|
||||||
|
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||||
|
|
||||||
|
|
||||||
class NWFilterTestCase(test.TestCase):
|
class NWFilterTestCase(test.TestCase):
|
||||||
@@ -514,3 +516,4 @@ class NWFilterTestCase(test.TestCase):
|
|||||||
self.fw.apply_instance_filter(instance)
|
self.fw.apply_instance_filter(instance)
|
||||||
_ensure_all_called()
|
_ensure_all_called()
|
||||||
self.teardown_security_group()
|
self.teardown_security_group()
|
||||||
|
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||||
|
|||||||
@@ -148,6 +148,7 @@ def WrapTwistedOptions(wrapped):
|
|||||||
options.insert(0, '')
|
options.insert(0, '')
|
||||||
|
|
||||||
args = FLAGS(options)
|
args = FLAGS(options)
|
||||||
|
logging.setup()
|
||||||
argv = args[1:]
|
argv = args[1:]
|
||||||
# ignore subcommands
|
# ignore subcommands
|
||||||
|
|
||||||
@@ -258,7 +259,6 @@ def serve(filename):
|
|||||||
print 'usage: %s [options] [start|stop|restart]' % argv[0]
|
print 'usage: %s [options] [start|stop|restart]' % argv[0]
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
logging.basicConfig()
|
|
||||||
logging.debug(_("Full set of FLAGS:"))
|
logging.debug(_("Full set of FLAGS:"))
|
||||||
for flag in FLAGS:
|
for flag in FLAGS:
|
||||||
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
|
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@@ -31,6 +32,7 @@ import string
|
|||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import types
|
||||||
from xml.sax import saxutils
|
from xml.sax import saxutils
|
||||||
import re
|
import re
|
||||||
import netaddr
|
import netaddr
|
||||||
@@ -55,7 +57,7 @@ def import_class(import_str):
|
|||||||
__import__(mod_str)
|
__import__(mod_str)
|
||||||
return getattr(sys.modules[mod_str], class_str)
|
return getattr(sys.modules[mod_str], class_str)
|
||||||
except (ImportError, ValueError, AttributeError), exc:
|
except (ImportError, ValueError, AttributeError), exc:
|
||||||
logging.debug(_('Inner Exception: %s'), exc)
|
LOG.debug(_('Inner Exception: %s'), exc)
|
||||||
raise exception.NotFound(_('Class %s cannot be found') % class_str)
|
raise exception.NotFound(_('Class %s cannot be found') % class_str)
|
||||||
|
|
||||||
|
|
||||||
@@ -499,3 +501,52 @@ def ensure_b64_encoding(val):
|
|||||||
return val
|
return val
|
||||||
except TypeError:
|
except TypeError:
|
||||||
return base64.b64encode(val)
|
return base64.b64encode(val)
|
||||||
|
|
||||||
|
|
||||||
|
def get_from_path(items, path):
|
||||||
|
""" Returns a list of items matching the specified path. Takes an
|
||||||
|
XPath-like expression e.g. prop1/prop2/prop3, and for each item in items,
|
||||||
|
looks up items[prop1][prop2][prop3]. Like XPath, if any of the
|
||||||
|
intermediate results are lists it will treat each list item individually.
|
||||||
|
A 'None' in items or any child expressions will be ignored, this function
|
||||||
|
will not throw because of None (anywhere) in items. The returned list
|
||||||
|
will contain no None values."""
|
||||||
|
|
||||||
|
if path is None:
|
||||||
|
raise exception.Error("Invalid mini_xpath")
|
||||||
|
|
||||||
|
(first_token, sep, remainder) = path.partition("/")
|
||||||
|
|
||||||
|
if first_token == "":
|
||||||
|
raise exception.Error("Invalid mini_xpath")
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
if items is None:
|
||||||
|
return results
|
||||||
|
|
||||||
|
if not isinstance(items, types.ListType):
|
||||||
|
# Wrap single objects in a list
|
||||||
|
items = [items]
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
if item is None:
|
||||||
|
continue
|
||||||
|
get_method = getattr(item, "get", None)
|
||||||
|
if get_method is None:
|
||||||
|
continue
|
||||||
|
child = get_method(first_token)
|
||||||
|
if child is None:
|
||||||
|
continue
|
||||||
|
if isinstance(child, types.ListType):
|
||||||
|
# Flatten intermediate lists
|
||||||
|
for x in child:
|
||||||
|
results.append(x)
|
||||||
|
else:
|
||||||
|
results.append(child)
|
||||||
|
|
||||||
|
if not sep:
|
||||||
|
# No more tokens
|
||||||
|
return results
|
||||||
|
else:
|
||||||
|
return get_from_path(results, remainder)
|
||||||
|
|||||||
@@ -38,6 +38,8 @@ flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
|
|||||||
'minimum size in bytes of root partition')
|
'minimum size in bytes of root partition')
|
||||||
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
|
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
|
||||||
'block_size to use for dd')
|
'block_size to use for dd')
|
||||||
|
flags.DEFINE_integer('timeout_nbd', 10,
|
||||||
|
'time to wait for a NBD device coming up')
|
||||||
|
|
||||||
|
|
||||||
def extend(image, size):
|
def extend(image, size):
|
||||||
@@ -117,7 +119,7 @@ def _link_device(image, nbd):
|
|||||||
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
|
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
|
||||||
# NOTE(vish): this forks into another process, so give it a chance
|
# NOTE(vish): this forks into another process, so give it a chance
|
||||||
# to set up before continuuing
|
# to set up before continuuing
|
||||||
for i in xrange(10):
|
for i in xrange(FLAGS.timeout_nbd):
|
||||||
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
|
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
|
||||||
return device
|
return device
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|||||||
@@ -319,7 +319,9 @@ class FakeConnection(object):
|
|||||||
return 'FAKE CONSOLE OUTPUT'
|
return 'FAKE CONSOLE OUTPUT'
|
||||||
|
|
||||||
def get_ajax_console(self, instance):
|
def get_ajax_console(self, instance):
|
||||||
return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
|
return {'token': 'FAKETOKEN',
|
||||||
|
'host': 'fakeajaxconsole.com',
|
||||||
|
'port': 6969}
|
||||||
|
|
||||||
def get_console_pool_info(self, console_type):
|
def get_console_pool_info(self, console_type):
|
||||||
return {'address': '127.0.0.1',
|
return {'address': '127.0.0.1',
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
options = {
|
options = {
|
||||||
'size': size,
|
'size': size,
|
||||||
'user_id': context.user.id,
|
'user_id': context.user_id,
|
||||||
'project_id': context.project_id,
|
'project_id': context.project_id,
|
||||||
'availability_zone': FLAGS.storage_availability_zone,
|
'availability_zone': FLAGS.storage_availability_zone,
|
||||||
'status': "creating",
|
'status': "creating",
|
||||||
@@ -85,7 +85,7 @@ class API(base.Base):
|
|||||||
return self.db.volume_get(context, volume_id)
|
return self.db.volume_get(context, volume_id)
|
||||||
|
|
||||||
def get_all(self, context):
|
def get_all(self, context):
|
||||||
if context.user.is_admin():
|
if context.is_admin:
|
||||||
return self.db.volume_get_all(context)
|
return self.db.volume_get_all(context)
|
||||||
return self.db.volume_get_all_by_project(context, context.project_id)
|
return self.db.volume_get_all_by_project(context, context.project_id)
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ Drivers for volumes.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
|
|||||||
'Which device to export the volumes on')
|
'Which device to export the volumes on')
|
||||||
flags.DEFINE_string('num_shell_tries', 3,
|
flags.DEFINE_string('num_shell_tries', 3,
|
||||||
'number of times to attempt to run flakey shell commands')
|
'number of times to attempt to run flakey shell commands')
|
||||||
|
flags.DEFINE_string('num_iscsi_scan_tries', 3,
|
||||||
|
'number of times to rescan iSCSI target to find volume')
|
||||||
flags.DEFINE_integer('num_shelves',
|
flags.DEFINE_integer('num_shelves',
|
||||||
100,
|
100,
|
||||||
'Number of vblade shelves')
|
'Number of vblade shelves')
|
||||||
@@ -88,7 +91,8 @@ class VolumeDriver(object):
|
|||||||
% FLAGS.volume_group)
|
% FLAGS.volume_group)
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Creates a logical volume."""
|
"""Creates a logical volume. Can optionally return a Dictionary of
|
||||||
|
changes to the volume object to be persisted."""
|
||||||
if int(volume['size']) == 0:
|
if int(volume['size']) == 0:
|
||||||
sizestr = '100M'
|
sizestr = '100M'
|
||||||
else:
|
else:
|
||||||
@@ -123,7 +127,8 @@ class VolumeDriver(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def create_export(self, context, volume):
|
def create_export(self, context, volume):
|
||||||
"""Exports the volume."""
|
"""Exports the volume. Can optionally return a Dictionary of changes
|
||||||
|
to the volume object to be persisted."""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
@@ -222,7 +227,18 @@ class FakeAOEDriver(AOEDriver):
|
|||||||
|
|
||||||
|
|
||||||
class ISCSIDriver(VolumeDriver):
|
class ISCSIDriver(VolumeDriver):
|
||||||
"""Executes commands relating to ISCSI volumes."""
|
"""Executes commands relating to ISCSI volumes.
|
||||||
|
|
||||||
|
We make use of model provider properties as follows:
|
||||||
|
|
||||||
|
:provider_location: if present, contains the iSCSI target information
|
||||||
|
in the same format as an ietadm discovery
|
||||||
|
i.e. '<ip>:<port>,<portal> <target IQN>'
|
||||||
|
|
||||||
|
:provider_auth: if present, contains a space-separated triple:
|
||||||
|
'<auth method> <auth username> <auth password>'.
|
||||||
|
`CHAP` is the only auth_method in use at the moment.
|
||||||
|
"""
|
||||||
|
|
||||||
def ensure_export(self, context, volume):
|
def ensure_export(self, context, volume):
|
||||||
"""Synchronously recreates an export for a logical volume."""
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
@@ -294,40 +310,149 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
self._execute("sudo ietadm --op delete --tid=%s" %
|
self._execute("sudo ietadm --op delete --tid=%s" %
|
||||||
iscsi_target)
|
iscsi_target)
|
||||||
|
|
||||||
def _get_name_and_portal(self, volume):
|
def _do_iscsi_discovery(self, volume):
|
||||||
"""Gets iscsi name and portal from volume name and host."""
|
#TODO(justinsb): Deprecate discovery and use stored info
|
||||||
|
#NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
|
||||||
|
LOG.warn(_("ISCSI provider_location not stored, using discovery"))
|
||||||
|
|
||||||
volume_name = volume['name']
|
volume_name = volume['name']
|
||||||
host = volume['host']
|
|
||||||
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
||||||
"sendtargets -p %s" % host)
|
"sendtargets -p %s" % (volume['host']))
|
||||||
for target in out.splitlines():
|
for target in out.splitlines():
|
||||||
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
|
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
|
||||||
(location, _sep, iscsi_name) = target.partition(" ")
|
return target
|
||||||
break
|
return None
|
||||||
iscsi_portal = location.split(",")[0]
|
|
||||||
return (iscsi_name, iscsi_portal)
|
def _get_iscsi_properties(self, volume):
|
||||||
|
"""Gets iscsi configuration
|
||||||
|
|
||||||
|
We ideally get saved information in the volume entity, but fall back
|
||||||
|
to discovery if need be. Discovery may be completely removed in future
|
||||||
|
The properties are:
|
||||||
|
|
||||||
|
:target_discovered: boolean indicating whether discovery was used
|
||||||
|
|
||||||
|
:target_iqn: the IQN of the iSCSI target
|
||||||
|
|
||||||
|
:target_portal: the portal of the iSCSI target
|
||||||
|
|
||||||
|
:auth_method:, :auth_username:, :auth_password:
|
||||||
|
|
||||||
|
the authentication details. Right now, either auth_method is not
|
||||||
|
present meaning no authentication, or auth_method == `CHAP`
|
||||||
|
meaning use CHAP with the specified credentials.
|
||||||
|
"""
|
||||||
|
|
||||||
|
properties = {}
|
||||||
|
|
||||||
|
location = volume['provider_location']
|
||||||
|
|
||||||
|
if location:
|
||||||
|
# provider_location is the same format as iSCSI discovery output
|
||||||
|
properties['target_discovered'] = False
|
||||||
|
else:
|
||||||
|
location = self._do_iscsi_discovery(volume)
|
||||||
|
|
||||||
|
if not location:
|
||||||
|
raise exception.Error(_("Could not find iSCSI export "
|
||||||
|
" for volume %s") %
|
||||||
|
(volume['name']))
|
||||||
|
|
||||||
|
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
|
||||||
|
properties['target_discovered'] = True
|
||||||
|
|
||||||
|
(iscsi_target, _sep, iscsi_name) = location.partition(" ")
|
||||||
|
|
||||||
|
iscsi_portal = iscsi_target.split(",")[0]
|
||||||
|
|
||||||
|
properties['target_iqn'] = iscsi_name
|
||||||
|
properties['target_portal'] = iscsi_portal
|
||||||
|
|
||||||
|
auth = volume['provider_auth']
|
||||||
|
|
||||||
|
if auth:
|
||||||
|
(auth_method, auth_username, auth_secret) = auth.split()
|
||||||
|
|
||||||
|
properties['auth_method'] = auth_method
|
||||||
|
properties['auth_username'] = auth_username
|
||||||
|
properties['auth_password'] = auth_secret
|
||||||
|
|
||||||
|
return properties
|
||||||
|
|
||||||
|
def _run_iscsiadm(self, iscsi_properties, iscsi_command):
|
||||||
|
command = ("sudo iscsiadm -m node -T %s -p %s %s" %
|
||||||
|
(iscsi_properties['target_iqn'],
|
||||||
|
iscsi_properties['target_portal'],
|
||||||
|
iscsi_command))
|
||||||
|
(out, err) = self._execute(command)
|
||||||
|
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
|
||||||
|
(iscsi_command, out, err))
|
||||||
|
return (out, err)
|
||||||
|
|
||||||
|
def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
|
||||||
|
iscsi_command = ("--op update -n %s -v %s" %
|
||||||
|
(property_key, property_value))
|
||||||
|
return self._run_iscsiadm(iscsi_properties, iscsi_command)
|
||||||
|
|
||||||
def discover_volume(self, volume):
|
def discover_volume(self, volume):
|
||||||
"""Discover volume on a remote host."""
|
"""Discover volume on a remote host."""
|
||||||
iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
|
iscsi_properties = self._get_iscsi_properties(volume)
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
|
|
||||||
(iscsi_name, iscsi_portal))
|
if not iscsi_properties['target_discovered']:
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
self._run_iscsiadm(iscsi_properties, "--op new")
|
||||||
"-n node.startup -v automatic" %
|
|
||||||
(iscsi_name, iscsi_portal))
|
if iscsi_properties.get('auth_method'):
|
||||||
return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal,
|
self._iscsiadm_update(iscsi_properties,
|
||||||
iscsi_name)
|
"node.session.auth.authmethod",
|
||||||
|
iscsi_properties['auth_method'])
|
||||||
|
self._iscsiadm_update(iscsi_properties,
|
||||||
|
"node.session.auth.username",
|
||||||
|
iscsi_properties['auth_username'])
|
||||||
|
self._iscsiadm_update(iscsi_properties,
|
||||||
|
"node.session.auth.password",
|
||||||
|
iscsi_properties['auth_password'])
|
||||||
|
|
||||||
|
self._run_iscsiadm(iscsi_properties, "--login")
|
||||||
|
|
||||||
|
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
|
||||||
|
|
||||||
|
mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" %
|
||||||
|
(iscsi_properties['target_portal'],
|
||||||
|
iscsi_properties['target_iqn']))
|
||||||
|
|
||||||
|
# The /dev/disk/by-path/... node is not always present immediately
|
||||||
|
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
|
||||||
|
tries = 0
|
||||||
|
while not os.path.exists(mount_device):
|
||||||
|
if tries >= FLAGS.num_iscsi_scan_tries:
|
||||||
|
raise exception.Error(_("iSCSI device not found at %s") %
|
||||||
|
(mount_device))
|
||||||
|
|
||||||
|
LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
|
||||||
|
"Will rescan & retry. Try number: %(tries)s") %
|
||||||
|
locals())
|
||||||
|
|
||||||
|
# The rescan isn't documented as being necessary(?), but it helps
|
||||||
|
self._run_iscsiadm(iscsi_properties, "--rescan")
|
||||||
|
|
||||||
|
tries = tries + 1
|
||||||
|
if not os.path.exists(mount_device):
|
||||||
|
time.sleep(tries ** 2)
|
||||||
|
|
||||||
|
if tries != 0:
|
||||||
|
LOG.debug(_("Found iSCSI node %(mount_device)s "
|
||||||
|
"(after %(tries)s rescans)") %
|
||||||
|
locals())
|
||||||
|
|
||||||
|
return mount_device
|
||||||
|
|
||||||
def undiscover_volume(self, volume):
|
def undiscover_volume(self, volume):
|
||||||
"""Undiscover volume on a remote host."""
|
"""Undiscover volume on a remote host."""
|
||||||
iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
|
iscsi_properties = self._get_iscsi_properties(volume)
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
|
||||||
"-n node.startup -v manual" %
|
self._run_iscsiadm(iscsi_properties, "--logout")
|
||||||
(iscsi_name, iscsi_portal))
|
self._run_iscsiadm(iscsi_properties, "--op delete")
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --logout " %
|
|
||||||
(iscsi_name, iscsi_portal))
|
|
||||||
self._execute("sudo iscsiadm -m node --op delete "
|
|
||||||
"--targetname %s" % iscsi_name)
|
|
||||||
|
|
||||||
|
|
||||||
class FakeISCSIDriver(ISCSIDriver):
|
class FakeISCSIDriver(ISCSIDriver):
|
||||||
|
|||||||
@@ -107,10 +107,14 @@ class VolumeManager(manager.Manager):
|
|||||||
vol_size = volume_ref['size']
|
vol_size = volume_ref['size']
|
||||||
LOG.debug(_("volume %(vol_name)s: creating lv of"
|
LOG.debug(_("volume %(vol_name)s: creating lv of"
|
||||||
" size %(vol_size)sG") % locals())
|
" size %(vol_size)sG") % locals())
|
||||||
self.driver.create_volume(volume_ref)
|
model_update = self.driver.create_volume(volume_ref)
|
||||||
|
if model_update:
|
||||||
|
self.db.volume_update(context, volume_ref['id'], model_update)
|
||||||
|
|
||||||
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
|
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
|
||||||
self.driver.create_export(context, volume_ref)
|
model_update = self.driver.create_export(context, volume_ref)
|
||||||
|
if model_update:
|
||||||
|
self.db.volume_update(context, volume_ref['id'], model_update)
|
||||||
except Exception:
|
except Exception:
|
||||||
self.db.volume_update(context,
|
self.db.volume_update(context,
|
||||||
volume_ref['id'], {'status': 'error'})
|
volume_ref['id'], {'status': 'error'})
|
||||||
|
|||||||
@@ -16,13 +16,16 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
"""
|
"""
|
||||||
Drivers for san-stored volumes.
|
Drivers for san-stored volumes.
|
||||||
|
|
||||||
The unique thing about a SAN is that we don't expect that we can run the volume
|
The unique thing about a SAN is that we don't expect that we can run the volume
|
||||||
controller on the SAN hardware. We expect to access it over SSH or some API.
|
controller on the SAN hardware. We expect to access it over SSH or some API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import paramiko
|
import paramiko
|
||||||
|
|
||||||
|
from xml.etree import ElementTree
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
@@ -41,37 +44,19 @@ flags.DEFINE_string('san_password', '',
|
|||||||
'Password for SAN controller')
|
'Password for SAN controller')
|
||||||
flags.DEFINE_string('san_privatekey', '',
|
flags.DEFINE_string('san_privatekey', '',
|
||||||
'Filename of private key to use for SSH authentication')
|
'Filename of private key to use for SSH authentication')
|
||||||
|
flags.DEFINE_string('san_clustername', '',
|
||||||
|
'Cluster name to use for creating volumes')
|
||||||
|
flags.DEFINE_integer('san_ssh_port', 22,
|
||||||
|
'SSH port to use with SAN')
|
||||||
|
|
||||||
|
|
||||||
class SanISCSIDriver(ISCSIDriver):
|
class SanISCSIDriver(ISCSIDriver):
|
||||||
""" Base class for SAN-style storage volumes
|
""" Base class for SAN-style storage volumes
|
||||||
(storage providers we access over SSH)"""
|
|
||||||
#Override because SAN ip != host ip
|
|
||||||
def _get_name_and_portal(self, volume):
|
|
||||||
"""Gets iscsi name and portal from volume name and host."""
|
|
||||||
volume_name = volume['name']
|
|
||||||
|
|
||||||
# TODO(justinsb): store in volume, remerge with generic iSCSI code
|
A SAN-style storage value is 'different' because the volume controller
|
||||||
host = FLAGS.san_ip
|
probably won't run on it, so we need to access is over SSH or another
|
||||||
|
remote protocol.
|
||||||
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
"""
|
||||||
"sendtargets -p %s" % host)
|
|
||||||
|
|
||||||
location = None
|
|
||||||
find_iscsi_name = self._build_iscsi_target_name(volume)
|
|
||||||
for target in out.splitlines():
|
|
||||||
if find_iscsi_name in target:
|
|
||||||
(location, _sep, iscsi_name) = target.partition(" ")
|
|
||||||
break
|
|
||||||
if not location:
|
|
||||||
raise exception.Error(_("Could not find iSCSI export "
|
|
||||||
" for volume %s") %
|
|
||||||
volume_name)
|
|
||||||
|
|
||||||
iscsi_portal = location.split(",")[0]
|
|
||||||
LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
|
|
||||||
(iscsi_name, iscsi_portal))
|
|
||||||
return (iscsi_name, iscsi_portal)
|
|
||||||
|
|
||||||
def _build_iscsi_target_name(self, volume):
|
def _build_iscsi_target_name(self, volume):
|
||||||
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||||
@@ -85,6 +70,7 @@ class SanISCSIDriver(ISCSIDriver):
|
|||||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||||
if FLAGS.san_password:
|
if FLAGS.san_password:
|
||||||
ssh.connect(FLAGS.san_ip,
|
ssh.connect(FLAGS.san_ip,
|
||||||
|
port=FLAGS.san_ssh_port,
|
||||||
username=FLAGS.san_login,
|
username=FLAGS.san_login,
|
||||||
password=FLAGS.san_password)
|
password=FLAGS.san_password)
|
||||||
elif FLAGS.san_privatekey:
|
elif FLAGS.san_privatekey:
|
||||||
@@ -92,10 +78,11 @@ class SanISCSIDriver(ISCSIDriver):
|
|||||||
# It sucks that paramiko doesn't support DSA keys
|
# It sucks that paramiko doesn't support DSA keys
|
||||||
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
|
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
|
||||||
ssh.connect(FLAGS.san_ip,
|
ssh.connect(FLAGS.san_ip,
|
||||||
|
port=FLAGS.san_ssh_port,
|
||||||
username=FLAGS.san_login,
|
username=FLAGS.san_login,
|
||||||
pkey=privatekey)
|
pkey=privatekey)
|
||||||
else:
|
else:
|
||||||
raise exception.Error("Specify san_password or san_privatekey")
|
raise exception.Error(_("Specify san_password or san_privatekey"))
|
||||||
return ssh
|
return ssh
|
||||||
|
|
||||||
def _run_ssh(self, command, check_exit_code=True):
|
def _run_ssh(self, command, check_exit_code=True):
|
||||||
@@ -124,10 +111,10 @@ class SanISCSIDriver(ISCSIDriver):
|
|||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Returns an error if prerequisites aren't met"""
|
"""Returns an error if prerequisites aren't met"""
|
||||||
if not (FLAGS.san_password or FLAGS.san_privatekey):
|
if not (FLAGS.san_password or FLAGS.san_privatekey):
|
||||||
raise exception.Error("Specify san_password or san_privatekey")
|
raise exception.Error(_("Specify san_password or san_privatekey"))
|
||||||
|
|
||||||
if not (FLAGS.san_ip):
|
if not (FLAGS.san_ip):
|
||||||
raise exception.Error("san_ip must be set")
|
raise exception.Error(_("san_ip must be set"))
|
||||||
|
|
||||||
|
|
||||||
def _collect_lines(data):
|
def _collect_lines(data):
|
||||||
@@ -155,17 +142,27 @@ def _get_prefixed_values(data, prefix):
|
|||||||
|
|
||||||
class SolarisISCSIDriver(SanISCSIDriver):
|
class SolarisISCSIDriver(SanISCSIDriver):
|
||||||
"""Executes commands relating to Solaris-hosted ISCSI volumes.
|
"""Executes commands relating to Solaris-hosted ISCSI volumes.
|
||||||
|
|
||||||
Basic setup for a Solaris iSCSI server:
|
Basic setup for a Solaris iSCSI server:
|
||||||
|
|
||||||
pkg install storage-server SUNWiscsit
|
pkg install storage-server SUNWiscsit
|
||||||
|
|
||||||
svcadm enable stmf
|
svcadm enable stmf
|
||||||
|
|
||||||
svcadm enable -r svc:/network/iscsi/target:default
|
svcadm enable -r svc:/network/iscsi/target:default
|
||||||
|
|
||||||
pfexec itadm create-tpg e1000g0 ${MYIP}
|
pfexec itadm create-tpg e1000g0 ${MYIP}
|
||||||
|
|
||||||
pfexec itadm create-target -t e1000g0
|
pfexec itadm create-target -t e1000g0
|
||||||
|
|
||||||
|
|
||||||
Then grant the user that will be logging on lots of permissions.
|
Then grant the user that will be logging on lots of permissions.
|
||||||
I'm not sure exactly which though:
|
I'm not sure exactly which though:
|
||||||
|
|
||||||
zfs allow justinsb create,mount,destroy rpool
|
zfs allow justinsb create,mount,destroy rpool
|
||||||
|
|
||||||
usermod -P'File System Management' justinsb
|
usermod -P'File System Management' justinsb
|
||||||
|
|
||||||
usermod -P'Primary Administrator' justinsb
|
usermod -P'Primary Administrator' justinsb
|
||||||
|
|
||||||
Also make sure you can login using san_login & san_password/san_privatekey
|
Also make sure you can login using san_login & san_password/san_privatekey
|
||||||
@@ -306,6 +303,17 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
|||||||
self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
|
self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
|
||||||
(target_group_name, luid))
|
(target_group_name, luid))
|
||||||
|
|
||||||
|
#TODO(justinsb): Is this always 1? Does it matter?
|
||||||
|
iscsi_portal_interface = '1'
|
||||||
|
iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
|
||||||
|
|
||||||
|
db_update = {}
|
||||||
|
db_update['provider_location'] = ("%s %s" %
|
||||||
|
(iscsi_portal,
|
||||||
|
iscsi_name))
|
||||||
|
|
||||||
|
return db_update
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
"""Removes an export for a logical volume."""
|
"""Removes an export for a logical volume."""
|
||||||
|
|
||||||
@@ -333,3 +341,245 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
|||||||
if self._is_lu_created(volume):
|
if self._is_lu_created(volume):
|
||||||
self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
|
self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
|
||||||
(luid))
|
(luid))
|
||||||
|
|
||||||
|
|
||||||
|
class HpSanISCSIDriver(SanISCSIDriver):
|
||||||
|
"""Executes commands relating to HP/Lefthand SAN ISCSI volumes.
|
||||||
|
|
||||||
|
We use the CLIQ interface, over SSH.
|
||||||
|
|
||||||
|
Rough overview of CLIQ commands used:
|
||||||
|
|
||||||
|
:createVolume: (creates the volume)
|
||||||
|
|
||||||
|
:getVolumeInfo: (to discover the IQN etc)
|
||||||
|
|
||||||
|
:getClusterInfo: (to discover the iSCSI target IP address)
|
||||||
|
|
||||||
|
:assignVolumeChap: (exports it with CHAP security)
|
||||||
|
|
||||||
|
The 'trick' here is that the HP SAN enforces security by default, so
|
||||||
|
normally a volume mount would need both to configure the SAN in the volume
|
||||||
|
layer and do the mount on the compute layer. Multi-layer operations are
|
||||||
|
not catered for at the moment in the nova architecture, so instead we
|
||||||
|
share the volume using CHAP at volume creation time. Then the mount need
|
||||||
|
only use those CHAP credentials, so can take place exclusively in the
|
||||||
|
compute layer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _cliq_run(self, verb, cliq_args):
|
||||||
|
"""Runs a CLIQ command over SSH, without doing any result parsing"""
|
||||||
|
cliq_arg_strings = []
|
||||||
|
for k, v in cliq_args.items():
|
||||||
|
cliq_arg_strings.append(" %s=%s" % (k, v))
|
||||||
|
cmd = verb + ''.join(cliq_arg_strings)
|
||||||
|
|
||||||
|
return self._run_ssh(cmd)
|
||||||
|
|
||||||
|
def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
|
||||||
|
"""Runs a CLIQ command over SSH, parsing and checking the output"""
|
||||||
|
cliq_args['output'] = 'XML'
|
||||||
|
(out, _err) = self._cliq_run(verb, cliq_args)
|
||||||
|
|
||||||
|
LOG.debug(_("CLIQ command returned %s"), out)
|
||||||
|
|
||||||
|
result_xml = ElementTree.fromstring(out)
|
||||||
|
if check_cliq_result:
|
||||||
|
response_node = result_xml.find("response")
|
||||||
|
if response_node is None:
|
||||||
|
msg = (_("Malformed response to CLIQ command "
|
||||||
|
"%(verb)s %(cliq_args)s. Result=%(out)s") %
|
||||||
|
locals())
|
||||||
|
raise exception.Error(msg)
|
||||||
|
|
||||||
|
result_code = response_node.attrib.get("result")
|
||||||
|
|
||||||
|
if result_code != "0":
|
||||||
|
msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
|
||||||
|
" Result=%(out)s") %
|
||||||
|
locals())
|
||||||
|
raise exception.Error(msg)
|
||||||
|
|
||||||
|
return result_xml
|
||||||
|
|
||||||
|
def _cliq_get_cluster_info(self, cluster_name):
|
||||||
|
"""Queries for info about the cluster (including IP)"""
|
||||||
|
cliq_args = {}
|
||||||
|
cliq_args['clusterName'] = cluster_name
|
||||||
|
cliq_args['searchDepth'] = '1'
|
||||||
|
cliq_args['verbose'] = '0'
|
||||||
|
|
||||||
|
result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
|
||||||
|
|
||||||
|
return result_xml
|
||||||
|
|
||||||
|
def _cliq_get_cluster_vip(self, cluster_name):
|
||||||
|
"""Gets the IP on which a cluster shares iSCSI volumes"""
|
||||||
|
cluster_xml = self._cliq_get_cluster_info(cluster_name)
|
||||||
|
|
||||||
|
vips = []
|
||||||
|
for vip in cluster_xml.findall("response/cluster/vip"):
|
||||||
|
vips.append(vip.attrib.get('ipAddress'))
|
||||||
|
|
||||||
|
if len(vips) == 1:
|
||||||
|
return vips[0]
|
||||||
|
|
||||||
|
_xml = ElementTree.tostring(cluster_xml)
|
||||||
|
msg = (_("Unexpected number of virtual ips for cluster "
|
||||||
|
" %(cluster_name)s. Result=%(_xml)s") %
|
||||||
|
locals())
|
||||||
|
raise exception.Error(msg)
|
||||||
|
|
||||||
|
def _cliq_get_volume_info(self, volume_name):
|
||||||
|
"""Gets the volume info, including IQN"""
|
||||||
|
cliq_args = {}
|
||||||
|
cliq_args['volumeName'] = volume_name
|
||||||
|
result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
|
||||||
|
|
||||||
|
# Result looks like this:
|
||||||
|
#<gauche version="1.0">
|
||||||
|
# <response description="Operation succeeded." name="CliqSuccess"
|
||||||
|
# processingTime="87" result="0">
|
||||||
|
# <volume autogrowPages="4" availability="online" blockSize="1024"
|
||||||
|
# bytesWritten="0" checkSum="false" clusterName="Cluster01"
|
||||||
|
# created="2011-02-08T19:56:53Z" deleting="false" description=""
|
||||||
|
# groupName="Group01" initialQuota="536870912" isPrimary="true"
|
||||||
|
# iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
|
||||||
|
# maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
|
||||||
|
# minReplication="1" name="vol-b" parity="0" replication="2"
|
||||||
|
# reserveQuota="536870912" scratchQuota="4194304"
|
||||||
|
# serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
|
||||||
|
# size="1073741824" stridePages="32" thinProvision="true">
|
||||||
|
# <status description="OK" value="2"/>
|
||||||
|
# <permission access="rw"
|
||||||
|
# authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
|
||||||
|
# chapName="chapusername" chapRequired="true" id="25369"
|
||||||
|
# initiatorSecret="" iqn="" iscsiEnabled="true"
|
||||||
|
# loadBalance="true" targetSecret="supersecret"/>
|
||||||
|
# </volume>
|
||||||
|
# </response>
|
||||||
|
#</gauche>
|
||||||
|
|
||||||
|
# Flatten the nodes into a dictionary; use prefixes to avoid collisions
|
||||||
|
volume_attributes = {}
|
||||||
|
|
||||||
|
volume_node = result_xml.find("response/volume")
|
||||||
|
for k, v in volume_node.attrib.items():
|
||||||
|
volume_attributes["volume." + k] = v
|
||||||
|
|
||||||
|
status_node = volume_node.find("status")
|
||||||
|
if not status_node is None:
|
||||||
|
for k, v in status_node.attrib.items():
|
||||||
|
volume_attributes["status." + k] = v
|
||||||
|
|
||||||
|
# We only consider the first permission node
|
||||||
|
permission_node = volume_node.find("permission")
|
||||||
|
if not permission_node is None:
|
||||||
|
for k, v in status_node.attrib.items():
|
||||||
|
volume_attributes["permission." + k] = v
|
||||||
|
|
||||||
|
LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
|
||||||
|
locals())
|
||||||
|
return volume_attributes
|
||||||
|
|
||||||
|
def create_volume(self, volume):
|
||||||
|
"""Creates a volume."""
|
||||||
|
cliq_args = {}
|
||||||
|
cliq_args['clusterName'] = FLAGS.san_clustername
|
||||||
|
#TODO(justinsb): Should we default to inheriting thinProvision?
|
||||||
|
cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0'
|
||||||
|
cliq_args['volumeName'] = volume['name']
|
||||||
|
if int(volume['size']) == 0:
|
||||||
|
cliq_args['size'] = '100MB'
|
||||||
|
else:
|
||||||
|
cliq_args['size'] = '%sGB' % volume['size']
|
||||||
|
|
||||||
|
self._cliq_run_xml("createVolume", cliq_args)
|
||||||
|
|
||||||
|
volume_info = self._cliq_get_volume_info(volume['name'])
|
||||||
|
cluster_name = volume_info['volume.clusterName']
|
||||||
|
iscsi_iqn = volume_info['volume.iscsiIqn']
|
||||||
|
|
||||||
|
#TODO(justinsb): Is this always 1? Does it matter?
|
||||||
|
cluster_interface = '1'
|
||||||
|
|
||||||
|
cluster_vip = self._cliq_get_cluster_vip(cluster_name)
|
||||||
|
iscsi_portal = cluster_vip + ":3260," + cluster_interface
|
||||||
|
|
||||||
|
model_update = {}
|
||||||
|
model_update['provider_location'] = ("%s %s" %
|
||||||
|
(iscsi_portal,
|
||||||
|
iscsi_iqn))
|
||||||
|
|
||||||
|
return model_update
|
||||||
|
|
||||||
|
def delete_volume(self, volume):
|
||||||
|
"""Deletes a volume."""
|
||||||
|
cliq_args = {}
|
||||||
|
cliq_args['volumeName'] = volume['name']
|
||||||
|
cliq_args['prompt'] = 'false' # Don't confirm
|
||||||
|
|
||||||
|
self._cliq_run_xml("deleteVolume", cliq_args)
|
||||||
|
|
||||||
|
def local_path(self, volume):
|
||||||
|
# TODO(justinsb): Is this needed here?
|
||||||
|
raise exception.Error(_("local_path not supported"))
|
||||||
|
|
||||||
|
def ensure_export(self, context, volume):
|
||||||
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
|
return self._do_export(context, volume, force_create=False)
|
||||||
|
|
||||||
|
def create_export(self, context, volume):
|
||||||
|
return self._do_export(context, volume, force_create=True)
|
||||||
|
|
||||||
|
def _do_export(self, context, volume, force_create):
|
||||||
|
"""Supports ensure_export and create_export"""
|
||||||
|
volume_info = self._cliq_get_volume_info(volume['name'])
|
||||||
|
|
||||||
|
is_shared = 'permission.authGroup' in volume_info
|
||||||
|
|
||||||
|
model_update = {}
|
||||||
|
|
||||||
|
should_export = False
|
||||||
|
|
||||||
|
if force_create or not is_shared:
|
||||||
|
should_export = True
|
||||||
|
# Check that we have a project_id
|
||||||
|
project_id = volume['project_id']
|
||||||
|
if not project_id:
|
||||||
|
project_id = context.project_id
|
||||||
|
|
||||||
|
if project_id:
|
||||||
|
#TODO(justinsb): Use a real per-project password here
|
||||||
|
chap_username = 'proj_' + project_id
|
||||||
|
# HP/Lefthand requires that the password be >= 12 characters
|
||||||
|
chap_password = 'project_secret_' + project_id
|
||||||
|
else:
|
||||||
|
msg = (_("Could not determine project for volume %s, "
|
||||||
|
"can't export") %
|
||||||
|
(volume['name']))
|
||||||
|
if force_create:
|
||||||
|
raise exception.Error(msg)
|
||||||
|
else:
|
||||||
|
LOG.warn(msg)
|
||||||
|
should_export = False
|
||||||
|
|
||||||
|
if should_export:
|
||||||
|
cliq_args = {}
|
||||||
|
cliq_args['volumeName'] = volume['name']
|
||||||
|
cliq_args['chapName'] = chap_username
|
||||||
|
cliq_args['targetSecret'] = chap_password
|
||||||
|
|
||||||
|
self._cliq_run_xml("assignVolumeChap", cliq_args)
|
||||||
|
|
||||||
|
model_update['provider_auth'] = ("CHAP %s %s" %
|
||||||
|
(chap_username, chap_password))
|
||||||
|
|
||||||
|
return model_update
|
||||||
|
|
||||||
|
def remove_export(self, context, volume):
|
||||||
|
"""Removes an export for a logical volume."""
|
||||||
|
cliq_args = {}
|
||||||
|
cliq_args['volumeName'] = volume['name']
|
||||||
|
|
||||||
|
self._cliq_run_xml("unassignVolume", cliq_args)
|
||||||
|
|||||||
@@ -59,7 +59,6 @@ class Server(object):
|
|||||||
"""Server class to manage multiple WSGI sockets and applications."""
|
"""Server class to manage multiple WSGI sockets and applications."""
|
||||||
|
|
||||||
def __init__(self, threads=1000):
|
def __init__(self, threads=1000):
|
||||||
logging.basicConfig()
|
|
||||||
self.pool = eventlet.GreenPool(threads)
|
self.pool = eventlet.GreenPool(threads)
|
||||||
|
|
||||||
def start(self, application, port, host='0.0.0.0', backlog=128):
|
def start(self, application, port, host='0.0.0.0', backlog=128):
|
||||||
@@ -515,10 +514,3 @@ def load_paste_app(filename, appname):
|
|||||||
except LookupError:
|
except LookupError:
|
||||||
pass
|
pass
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
def paste_config_to_flags(config, mixins):
|
|
||||||
for k, v in mixins.iteritems():
|
|
||||||
value = config.get(k, v)
|
|
||||||
converted_value = FLAGS[k].parser.Parse(value)
|
|
||||||
setattr(FLAGS, k, converted_value)
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user